mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-22 07:24:27 +01:00
Fix broken tests (casualties from the past few days)
This commit is contained in:
@@ -57,7 +57,6 @@ def config(
|
||||
config.plugins_dir = "tests/unit/data/test_plugins"
|
||||
config.plugins_config_file = temp_plugins_config_file
|
||||
|
||||
# HACK: this is necessary to ensure PLAIN_OUTPUT takes effect
|
||||
config.plain_output = True
|
||||
configure_logging(config, Path(__file__).parent / "logs")
|
||||
|
||||
@@ -95,7 +94,6 @@ def agent(config: Config) -> Agent:
|
||||
)
|
||||
|
||||
command_registry = CommandRegistry()
|
||||
ai_config.command_registry = command_registry
|
||||
config.memory_backend = "json_file"
|
||||
memory_json_file = get_memory(config)
|
||||
memory_json_file.clear()
|
||||
|
||||
@@ -11,7 +11,7 @@ def test_agent_initialization(agent: Agent):
|
||||
def test_execute_command_plugin(agent: Agent):
|
||||
"""Test that executing a command that came from a plugin works as expected"""
|
||||
command_name = "check_plan"
|
||||
agent.ai_config.prompt_generator.add_command(
|
||||
agent.prompt_generator.add_command(
|
||||
command_name,
|
||||
"Read the plan.md with the next goals to achieve",
|
||||
{},
|
||||
|
||||
@@ -55,8 +55,6 @@ def test_ai_config_file_not_exists(workspace):
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
assert ai_config.api_budget == 0.0
|
||||
assert ai_config.prompt_generator is None
|
||||
assert ai_config.command_registry is None
|
||||
|
||||
|
||||
def test_ai_config_file_is_empty(workspace):
|
||||
@@ -70,5 +68,3 @@ def test_ai_config_file_is_empty(workspace):
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
assert ai_config.api_budget == 0.0
|
||||
assert ai_config.prompt_generator is None
|
||||
assert ai_config.command_registry is None
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents import Agent, BaseAgent
|
||||
|
||||
from autogpt.models.command import Command, CommandParameter
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
@@ -14,7 +20,7 @@ PARAMETERS = [
|
||||
]
|
||||
|
||||
|
||||
def example_command_method(arg1: int, arg2: str) -> str:
|
||||
def example_command_method(arg1: int, arg2: str, agent: BaseAgent) -> str:
|
||||
"""Example function for testing the Command class."""
|
||||
# This function is static because it is not used by any other test cases.
|
||||
return f"{arg1} - {arg2}"
|
||||
@@ -47,16 +53,16 @@ def example_command():
|
||||
)
|
||||
|
||||
|
||||
def test_command_call(example_command: Command):
|
||||
def test_command_call(example_command: Command, agent: Agent):
|
||||
"""Test that Command(*args) calls and returns the result of method(*args)."""
|
||||
result = example_command(arg1=1, arg2="test")
|
||||
result = example_command(arg1=1, arg2="test", agent=agent)
|
||||
assert result == "1 - test"
|
||||
|
||||
|
||||
def test_command_call_with_invalid_arguments(example_command: Command):
|
||||
def test_command_call_with_invalid_arguments(example_command: Command, agent: Agent):
|
||||
"""Test that calling a Command object with invalid arguments raises a TypeError."""
|
||||
with pytest.raises(TypeError):
|
||||
example_command(arg1="invalid", does_not_exist="test")
|
||||
example_command(arg1="invalid", does_not_exist="test", agent=agent)
|
||||
|
||||
|
||||
def test_register_command(example_command: Command):
|
||||
@@ -148,7 +154,7 @@ def test_get_nonexistent_command():
|
||||
assert "nonexistent_command" not in registry
|
||||
|
||||
|
||||
def test_call_command():
|
||||
def test_call_command(agent: Agent):
|
||||
"""Test that a command can be called through the registry."""
|
||||
registry = CommandRegistry()
|
||||
cmd = Command(
|
||||
@@ -159,17 +165,17 @@ def test_call_command():
|
||||
)
|
||||
|
||||
registry.register(cmd)
|
||||
result = registry.call("example", arg1=1, arg2="test")
|
||||
result = registry.call("example", arg1=1, arg2="test", agent=agent)
|
||||
|
||||
assert result == "1 - test"
|
||||
|
||||
|
||||
def test_call_nonexistent_command():
|
||||
def test_call_nonexistent_command(agent: Agent):
|
||||
"""Test that attempting to call a nonexistent command raises a KeyError."""
|
||||
registry = CommandRegistry()
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
registry.call("nonexistent_command", arg1=1, arg2="test")
|
||||
registry.call("nonexistent_command", arg1=1, arg2="test", agent=agent)
|
||||
|
||||
|
||||
def test_import_mock_commands_module():
|
||||
|
||||
@@ -18,7 +18,7 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, agent: Agent):
|
||||
repo = "github.com/Significant-Gravitas/Auto-GPT.git"
|
||||
scheme = "https://"
|
||||
url = scheme + repo
|
||||
clone_path = str(workspace.get_path("auto-gpt-repo"))
|
||||
clone_path = workspace.get_path("auto-gpt-repo")
|
||||
|
||||
expected_output = f"Cloned {url} to {clone_path}"
|
||||
|
||||
@@ -33,7 +33,7 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, agent: Agent):
|
||||
|
||||
def test_clone_repository_error(workspace, mock_clone_from, agent: Agent):
|
||||
url = "https://github.com/this-repository/does-not-exist.git"
|
||||
clone_path = str(workspace.get_path("does-not-exist"))
|
||||
clone_path = workspace.get_path("does-not-exist")
|
||||
|
||||
mock_clone_from.side_effect = GitCommandError(
|
||||
"clone", "fatal: repository not found", ""
|
||||
|
||||
@@ -31,7 +31,7 @@ def error_factory(error_instance, error_count, retry_count, warn_user=True):
|
||||
return RaisesError()
|
||||
|
||||
|
||||
def test_retry_open_api_no_error(capsys):
|
||||
def test_retry_open_api_no_error(caplog: pytest.LogCaptureFixture):
|
||||
"""Tests the retry functionality with no errors expected"""
|
||||
|
||||
@openai.retry_api()
|
||||
@@ -41,9 +41,9 @@ def test_retry_open_api_no_error(capsys):
|
||||
result = f()
|
||||
assert result == 1
|
||||
|
||||
output = capsys.readouterr()
|
||||
assert output.out == ""
|
||||
assert output.err == ""
|
||||
output = caplog.text
|
||||
assert output == ""
|
||||
assert output == ""
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -51,7 +51,9 @@ def test_retry_open_api_no_error(capsys):
|
||||
[(2, 10, False), (2, 2, False), (10, 2, True), (3, 2, True), (1, 0, True)],
|
||||
ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"],
|
||||
)
|
||||
def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure):
|
||||
def test_retry_open_api_passing(
|
||||
caplog: pytest.LogCaptureFixture, error, error_count, retry_count, failure
|
||||
):
|
||||
"""Tests the retry with simulated errors [RateLimitError, ServiceUnavailableError, APIError], but should ulimately pass"""
|
||||
call_count = min(error_count, retry_count) + 1
|
||||
|
||||
@@ -65,20 +67,20 @@ def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure
|
||||
|
||||
assert raises.count == call_count
|
||||
|
||||
output = capsys.readouterr()
|
||||
output = caplog.text
|
||||
|
||||
if error_count and retry_count:
|
||||
if type(error) == RateLimitError:
|
||||
assert "Reached rate limit" in output.out
|
||||
assert "Please double check" in output.out
|
||||
assert "Reached rate limit" in output
|
||||
assert "Please double check" in output
|
||||
if type(error) == ServiceUnavailableError:
|
||||
assert "The OpenAI API engine is currently overloaded" in output.out
|
||||
assert "Please double check" in output.out
|
||||
assert "The OpenAI API engine is currently overloaded" in output
|
||||
assert "Please double check" in output
|
||||
else:
|
||||
assert output.out == ""
|
||||
assert output == ""
|
||||
|
||||
|
||||
def test_retry_open_api_rate_limit_no_warn(capsys):
|
||||
def test_retry_open_api_rate_limit_no_warn(caplog: pytest.LogCaptureFixture):
|
||||
"""Tests the retry logic with a rate limit error"""
|
||||
error_count = 2
|
||||
retry_count = 10
|
||||
@@ -89,13 +91,13 @@ def test_retry_open_api_rate_limit_no_warn(capsys):
|
||||
assert result == call_count
|
||||
assert raises.count == call_count
|
||||
|
||||
output = capsys.readouterr()
|
||||
output = caplog.text
|
||||
|
||||
assert "Reached rate limit" in output.out
|
||||
assert "Please double check" not in output.out
|
||||
assert "Reached rate limit" in output
|
||||
assert "Please double check" not in output
|
||||
|
||||
|
||||
def test_retry_open_api_service_unavairable_no_warn(capsys):
|
||||
def test_retry_open_api_service_unavairable_no_warn(caplog: pytest.LogCaptureFixture):
|
||||
"""Tests the retry logic with a service unavairable error"""
|
||||
error_count = 2
|
||||
retry_count = 10
|
||||
@@ -108,13 +110,13 @@ def test_retry_open_api_service_unavairable_no_warn(capsys):
|
||||
assert result == call_count
|
||||
assert raises.count == call_count
|
||||
|
||||
output = capsys.readouterr()
|
||||
output = caplog.text
|
||||
|
||||
assert "The OpenAI API engine is currently overloaded" in output.out
|
||||
assert "Please double check" not in output.out
|
||||
assert "The OpenAI API engine is currently overloaded" in output
|
||||
assert "Please double check" not in output
|
||||
|
||||
|
||||
def test_retry_openapi_other_api_error(capsys):
|
||||
def test_retry_openapi_other_api_error(caplog: pytest.LogCaptureFixture):
|
||||
"""Tests the Retry logic with a non rate limit error such as HTTP500"""
|
||||
error_count = 2
|
||||
retry_count = 10
|
||||
@@ -126,5 +128,5 @@ def test_retry_openapi_other_api_error(capsys):
|
||||
call_count = 1
|
||||
assert raises.count == call_count
|
||||
|
||||
output = capsys.readouterr()
|
||||
assert output.out == ""
|
||||
output = caplog.text
|
||||
assert output == ""
|
||||
|
||||
Reference in New Issue
Block a user