mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-08 00:34:20 +01:00
Rearrange tests & fix CI (#4596)
* Rearrange tests into unit/integration/challenge categories * Fix linting + `tests.challenges` imports * Fix obscured duplicate test in test_url_validation.py * Move VCR conftest to tests.vcr * Specify tests to run & their order (unit -> integration -> challenges) in CI * Fail Docker CI when tests fail * Fix import & linting errors in tests * Fix `get_text_summary` * Fix linting errors * Clean up pytest args in CI * Remove bogus tests from GoCodeo
This commit is contained in:
committed by
GitHub
parent
8a881f70a3
commit
dafbd11686
45
tests/unit/test_agent.py
Normal file
45
tests/unit/test_agent.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.config import AIConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent():
|
||||
ai_name = "Test AI"
|
||||
memory = MagicMock()
|
||||
next_action_count = 0
|
||||
command_registry = MagicMock()
|
||||
config = AIConfig()
|
||||
system_prompt = "System prompt"
|
||||
triggering_prompt = "Triggering prompt"
|
||||
workspace_directory = "workspace_directory"
|
||||
|
||||
agent = Agent(
|
||||
ai_name,
|
||||
memory,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
)
|
||||
return agent
|
||||
|
||||
|
||||
def test_agent_initialization(agent: Agent):
|
||||
assert agent.ai_name == "Test AI"
|
||||
assert agent.memory == agent.memory
|
||||
assert agent.history.messages == []
|
||||
assert agent.next_action_count == 0
|
||||
assert agent.command_registry == agent.command_registry
|
||||
assert agent.config == agent.config
|
||||
assert agent.system_prompt == "System prompt"
|
||||
assert agent.triggering_prompt == "Triggering prompt"
|
||||
|
||||
|
||||
# More test methods can be added for specific agent interactions
|
||||
# For example, mocking chat_with_ai and testing the agent's interaction loop
|
||||
64
tests/unit/test_agent_manager.py
Normal file
64
tests/unit/test_agent_manager.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.llm.chat import create_chat_completion
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent_manager():
|
||||
# Hack, real gross. Singletons are not good times.
|
||||
yield AgentManager()
|
||||
del AgentManager._instances[AgentManager]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def task():
|
||||
return "translate English to French"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def prompt():
|
||||
return "Translate the following English text to French: 'Hello, how are you?'"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def model():
|
||||
return "gpt-3.5-turbo"
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_create_chat_completion(mocker):
|
||||
mock_create_chat_completion = mocker.patch(
|
||||
"autogpt.agent.agent_manager.create_chat_completion",
|
||||
wraps=create_chat_completion,
|
||||
)
|
||||
mock_create_chat_completion.return_value = "irrelevant"
|
||||
return mock_create_chat_completion
|
||||
|
||||
|
||||
def test_create_agent(agent_manager: AgentManager, task, prompt, model):
|
||||
key, agent_reply = agent_manager.create_agent(task, prompt, model)
|
||||
assert isinstance(key, int)
|
||||
assert isinstance(agent_reply, str)
|
||||
assert key in agent_manager.agents
|
||||
|
||||
|
||||
def test_message_agent(agent_manager: AgentManager, task, prompt, model):
|
||||
key, _ = agent_manager.create_agent(task, prompt, model)
|
||||
user_message = "Please translate 'Good morning' to French."
|
||||
agent_reply = agent_manager.message_agent(key, user_message)
|
||||
assert isinstance(agent_reply, str)
|
||||
|
||||
|
||||
def test_list_agents(agent_manager: AgentManager, task, prompt, model):
|
||||
key, _ = agent_manager.create_agent(task, prompt, model)
|
||||
agents_list = agent_manager.list_agents()
|
||||
assert isinstance(agents_list, list)
|
||||
assert (key, task) in agents_list
|
||||
|
||||
|
||||
def test_delete_agent(agent_manager: AgentManager, task, prompt, model):
|
||||
key, _ = agent_manager.create_agent(task, prompt, model)
|
||||
success = agent_manager.delete_agent(key)
|
||||
assert success
|
||||
assert key not in agent_manager.agents
|
||||
74
tests/unit/test_ai_config.py
Normal file
74
tests/unit/test_ai_config.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
|
||||
"""
|
||||
Test cases for the AIConfig class, which handles loads the AI configuration
|
||||
settings from a YAML file.
|
||||
"""
|
||||
|
||||
|
||||
def test_goals_are_always_lists_of_strings(tmp_path):
|
||||
"""Test if the goals attribute is always a list of strings."""
|
||||
|
||||
yaml_content = """
|
||||
ai_goals:
|
||||
- Goal 1: Make a sandwich
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- "Goal 4: Wake up"
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
config_file = tmp_path / "ai_settings.yaml"
|
||||
config_file.write_text(yaml_content)
|
||||
|
||||
ai_config = AIConfig.load(config_file)
|
||||
|
||||
assert len(ai_config.ai_goals) == 4
|
||||
assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich"
|
||||
assert ai_config.ai_goals[1] == "Goal 2, Eat the sandwich"
|
||||
assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep"
|
||||
assert ai_config.ai_goals[3] == "Goal 4: Wake up"
|
||||
|
||||
config_file.write_text("")
|
||||
ai_config.save(config_file)
|
||||
|
||||
yaml_content2 = """ai_goals:
|
||||
- 'Goal 1: Make a sandwich'
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- 'Goal 4: Wake up'
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
assert config_file.read_text() == yaml_content2
|
||||
|
||||
|
||||
def test_ai_config_file_not_exists(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
config_file = workspace.get_path("ai_settings.yaml")
|
||||
|
||||
ai_config = AIConfig.load(str(config_file))
|
||||
assert ai_config.ai_name == ""
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
assert ai_config.api_budget == 0.0
|
||||
assert ai_config.prompt_generator is None
|
||||
assert ai_config.command_registry is None
|
||||
|
||||
|
||||
def test_ai_config_file_is_empty(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
config_file = workspace.get_path("ai_settings.yaml")
|
||||
config_file.write_text("")
|
||||
|
||||
ai_config = AIConfig.load(str(config_file))
|
||||
assert ai_config.ai_name == ""
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
assert ai_config.api_budget == 0.0
|
||||
assert ai_config.prompt_generator is None
|
||||
assert ai_config.command_registry is None
|
||||
130
tests/unit/test_api_manager.py
Normal file
130
tests/unit/test_api_manager.py
Normal file
@@ -0,0 +1,130 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.llm.api_manager import COSTS, ApiManager
|
||||
|
||||
api_manager = ApiManager()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_api_manager():
|
||||
api_manager.reset()
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_costs():
|
||||
with patch.dict(
|
||||
COSTS,
|
||||
{
|
||||
"gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0},
|
||||
},
|
||||
clear=True,
|
||||
):
|
||||
yield
|
||||
|
||||
|
||||
class TestApiManager:
|
||||
@staticmethod
|
||||
def test_create_chat_completion_debug_mode(caplog):
|
||||
"""Test if debug mode logs response."""
|
||||
api_manager_debug = ApiManager(debug=True)
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who won the world series in 2020?"},
|
||||
]
|
||||
model = "gpt-3.5-turbo"
|
||||
|
||||
with patch("openai.ChatCompletion.create") as mock_create:
|
||||
mock_response = MagicMock()
|
||||
del mock_response.error
|
||||
mock_response.usage.prompt_tokens = 10
|
||||
mock_response.usage.completion_tokens = 20
|
||||
mock_create.return_value = mock_response
|
||||
|
||||
api_manager_debug.create_chat_completion(messages, model=model)
|
||||
|
||||
assert "Response" in caplog.text
|
||||
|
||||
@staticmethod
|
||||
def test_create_chat_completion_empty_messages():
|
||||
"""Test if empty messages result in zero tokens and cost."""
|
||||
messages = []
|
||||
model = "gpt-3.5-turbo"
|
||||
|
||||
with patch("openai.ChatCompletion.create") as mock_create:
|
||||
mock_response = MagicMock()
|
||||
del mock_response.error
|
||||
mock_response.usage.prompt_tokens = 0
|
||||
mock_response.usage.completion_tokens = 0
|
||||
mock_create.return_value = mock_response
|
||||
|
||||
api_manager.create_chat_completion(messages, model=model)
|
||||
|
||||
assert api_manager.get_total_prompt_tokens() == 0
|
||||
assert api_manager.get_total_completion_tokens() == 0
|
||||
assert api_manager.get_total_cost() == 0
|
||||
|
||||
@staticmethod
|
||||
def test_create_chat_completion_valid_inputs():
|
||||
"""Test if valid inputs result in correct tokens and cost."""
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who won the world series in 2020?"},
|
||||
]
|
||||
model = "gpt-3.5-turbo"
|
||||
|
||||
with patch("openai.ChatCompletion.create") as mock_create:
|
||||
mock_response = MagicMock()
|
||||
del mock_response.error
|
||||
mock_response.usage.prompt_tokens = 10
|
||||
mock_response.usage.completion_tokens = 20
|
||||
mock_create.return_value = mock_response
|
||||
|
||||
api_manager.create_chat_completion(messages, model=model)
|
||||
|
||||
assert api_manager.get_total_prompt_tokens() == 10
|
||||
assert api_manager.get_total_completion_tokens() == 20
|
||||
assert api_manager.get_total_cost() == (10 * 0.002 + 20 * 0.002) / 1000
|
||||
|
||||
def test_getter_methods(self):
|
||||
"""Test the getter methods for total tokens, cost, and budget."""
|
||||
api_manager.update_cost(60, 120, "gpt-3.5-turbo")
|
||||
api_manager.set_total_budget(10.0)
|
||||
assert api_manager.get_total_prompt_tokens() == 60
|
||||
assert api_manager.get_total_completion_tokens() == 120
|
||||
assert api_manager.get_total_cost() == (60 * 0.002 + 120 * 0.002) / 1000
|
||||
assert api_manager.get_total_budget() == 10.0
|
||||
|
||||
@staticmethod
|
||||
def test_set_total_budget():
|
||||
"""Test if setting the total budget works correctly."""
|
||||
total_budget = 10.0
|
||||
api_manager.set_total_budget(total_budget)
|
||||
|
||||
assert api_manager.get_total_budget() == total_budget
|
||||
|
||||
@staticmethod
|
||||
def test_update_cost():
|
||||
"""Test if updating the cost works correctly."""
|
||||
prompt_tokens = 50
|
||||
completion_tokens = 100
|
||||
model = "gpt-3.5-turbo"
|
||||
|
||||
api_manager.update_cost(prompt_tokens, completion_tokens, model)
|
||||
|
||||
assert api_manager.get_total_prompt_tokens() == 50
|
||||
assert api_manager.get_total_completion_tokens() == 100
|
||||
assert api_manager.get_total_cost() == (50 * 0.002 + 100 * 0.002) / 1000
|
||||
|
||||
@staticmethod
|
||||
def test_get_models():
|
||||
"""Test if getting models works correctly."""
|
||||
with patch("openai.Model.list") as mock_list_models:
|
||||
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
|
||||
result = api_manager.get_models()
|
||||
|
||||
assert result[0]["id"] == "gpt-3.5-turbo"
|
||||
assert api_manager.models[0]["id"] == "gpt-3.5-turbo"
|
||||
205
tests/unit/test_commands.py
Normal file
205
tests/unit/test_commands.py
Normal file
@@ -0,0 +1,205 @@
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.commands.command import Command, CommandRegistry
|
||||
|
||||
SIGNATURE = "(arg1: int, arg2: str) -> str"
|
||||
|
||||
|
||||
class TestCommand:
|
||||
"""Test cases for the Command class."""
|
||||
|
||||
@staticmethod
|
||||
def example_command_method(arg1: int, arg2: str) -> str:
|
||||
"""Example function for testing the Command class."""
|
||||
# This function is static because it is not used by any other test cases.
|
||||
return f"{arg1} - {arg2}"
|
||||
|
||||
def test_command_creation(self):
|
||||
"""Test that a Command object can be created with the correct attributes."""
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=SIGNATURE,
|
||||
)
|
||||
|
||||
assert cmd.name == "example"
|
||||
assert cmd.description == "Example command"
|
||||
assert cmd.method == self.example_command_method
|
||||
assert cmd.signature == "(arg1: int, arg2: str) -> str"
|
||||
|
||||
def test_command_call(self):
|
||||
"""Test that Command(*args) calls and returns the result of method(*args)."""
|
||||
# Create a Command object with the example_command_method.
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
)
|
||||
result = cmd(arg1=1, arg2="test")
|
||||
assert result == "1 - test"
|
||||
|
||||
def test_command_call_with_invalid_arguments(self):
|
||||
"""Test that calling a Command object with invalid arguments raises a TypeError."""
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=SIGNATURE,
|
||||
)
|
||||
with pytest.raises(TypeError):
|
||||
cmd(arg1="invalid", does_not_exist="test")
|
||||
|
||||
def test_command_custom_signature(self):
|
||||
custom_signature = "custom_arg1: int, custom_arg2: str"
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=custom_signature,
|
||||
)
|
||||
|
||||
assert cmd.signature == custom_signature
|
||||
|
||||
|
||||
class TestCommandRegistry:
|
||||
@staticmethod
|
||||
def example_command_method(arg1: int, arg2: str) -> str:
|
||||
return f"{arg1} - {arg2}"
|
||||
|
||||
def test_register_command(self):
|
||||
"""Test that a command can be registered to the registry."""
|
||||
registry = CommandRegistry()
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=SIGNATURE,
|
||||
)
|
||||
|
||||
registry.register(cmd)
|
||||
|
||||
assert cmd.name in registry.commands
|
||||
assert registry.commands[cmd.name] == cmd
|
||||
|
||||
def test_unregister_command(self):
|
||||
"""Test that a command can be unregistered from the registry."""
|
||||
registry = CommandRegistry()
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=SIGNATURE,
|
||||
)
|
||||
|
||||
registry.register(cmd)
|
||||
registry.unregister(cmd.name)
|
||||
|
||||
assert cmd.name not in registry.commands
|
||||
|
||||
def test_get_command(self):
|
||||
"""Test that a command can be retrieved from the registry."""
|
||||
registry = CommandRegistry()
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=SIGNATURE,
|
||||
)
|
||||
|
||||
registry.register(cmd)
|
||||
retrieved_cmd = registry.get_command(cmd.name)
|
||||
|
||||
assert retrieved_cmd == cmd
|
||||
|
||||
def test_get_nonexistent_command(self):
|
||||
"""Test that attempting to get a nonexistent command raises a KeyError."""
|
||||
registry = CommandRegistry()
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
registry.get_command("nonexistent_command")
|
||||
|
||||
def test_call_command(self):
|
||||
"""Test that a command can be called through the registry."""
|
||||
registry = CommandRegistry()
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=SIGNATURE,
|
||||
)
|
||||
|
||||
registry.register(cmd)
|
||||
result = registry.call("example", arg1=1, arg2="test")
|
||||
|
||||
assert result == "1 - test"
|
||||
|
||||
def test_call_nonexistent_command(self):
|
||||
"""Test that attempting to call a nonexistent command raises a KeyError."""
|
||||
registry = CommandRegistry()
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
registry.call("nonexistent_command", arg1=1, arg2="test")
|
||||
|
||||
def test_get_command_prompt(self):
|
||||
"""Test that the command prompt is correctly formatted."""
|
||||
registry = CommandRegistry()
|
||||
cmd = Command(
|
||||
name="example",
|
||||
description="Example command",
|
||||
method=self.example_command_method,
|
||||
signature=SIGNATURE,
|
||||
)
|
||||
|
||||
registry.register(cmd)
|
||||
command_prompt = registry.command_prompt()
|
||||
|
||||
assert f"(arg1: int, arg2: str)" in command_prompt
|
||||
|
||||
def test_import_mock_commands_module(self):
|
||||
"""Test that the registry can import a module with mock command plugins."""
|
||||
registry = CommandRegistry()
|
||||
mock_commands_module = "tests.mocks.mock_commands"
|
||||
|
||||
registry.import_commands(mock_commands_module)
|
||||
|
||||
assert "function_based" in registry.commands
|
||||
assert registry.commands["function_based"].name == "function_based"
|
||||
assert (
|
||||
registry.commands["function_based"].description
|
||||
== "Function-based test command"
|
||||
)
|
||||
|
||||
def test_import_temp_command_file_module(self, tmp_path):
|
||||
"""
|
||||
Test that the registry can import a command plugins module from a temp file.
|
||||
Args:
|
||||
tmp_path (pathlib.Path): Path to a temporary directory.
|
||||
"""
|
||||
registry = CommandRegistry()
|
||||
|
||||
# Create a temp command file
|
||||
src = Path(os.getcwd()) / "tests/mocks/mock_commands.py"
|
||||
temp_commands_file = tmp_path / "mock_commands.py"
|
||||
shutil.copyfile(src, temp_commands_file)
|
||||
|
||||
# Add the temp directory to sys.path to make the module importable
|
||||
sys.path.append(str(tmp_path))
|
||||
|
||||
temp_commands_module = "mock_commands"
|
||||
registry.import_commands(temp_commands_module)
|
||||
|
||||
# Remove the temp directory from sys.path
|
||||
sys.path.remove(str(tmp_path))
|
||||
|
||||
assert "function_based" in registry.commands
|
||||
assert registry.commands["function_based"].name == "function_based"
|
||||
assert (
|
||||
registry.commands["function_based"].description
|
||||
== "Function-based test command"
|
||||
)
|
||||
233
tests/unit/test_config.py
Normal file
233
tests/unit/test_config.py
Normal file
@@ -0,0 +1,233 @@
|
||||
"""
|
||||
Test cases for the Config class, which handles the configuration settings
|
||||
for the AI and ensures it behaves as a singleton.
|
||||
"""
|
||||
from unittest import mock
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
|
||||
from autogpt.workspace.workspace import Workspace
|
||||
|
||||
|
||||
def test_initial_values(config: Config):
|
||||
"""
|
||||
Test if the initial values of the Config class attributes are set correctly.
|
||||
"""
|
||||
assert config.debug_mode == False
|
||||
assert config.continuous_mode == False
|
||||
assert config.speak_mode == False
|
||||
assert config.fast_llm_model == "gpt-3.5-turbo"
|
||||
assert config.smart_llm_model == "gpt-4"
|
||||
assert config.fast_token_limit == 4000
|
||||
assert config.smart_token_limit == 8000
|
||||
|
||||
|
||||
def test_set_continuous_mode(config: Config):
|
||||
"""
|
||||
Test if the set_continuous_mode() method updates the continuous_mode attribute.
|
||||
"""
|
||||
# Store continuous mode to reset it after the test
|
||||
continuous_mode = config.continuous_mode
|
||||
|
||||
config.set_continuous_mode(True)
|
||||
assert config.continuous_mode == True
|
||||
|
||||
# Reset continuous mode
|
||||
config.set_continuous_mode(continuous_mode)
|
||||
|
||||
|
||||
def test_set_speak_mode(config: Config):
|
||||
"""
|
||||
Test if the set_speak_mode() method updates the speak_mode attribute.
|
||||
"""
|
||||
# Store speak mode to reset it after the test
|
||||
speak_mode = config.speak_mode
|
||||
|
||||
config.set_speak_mode(True)
|
||||
assert config.speak_mode == True
|
||||
|
||||
# Reset speak mode
|
||||
config.set_speak_mode(speak_mode)
|
||||
|
||||
|
||||
def test_set_fast_llm_model(config: Config):
|
||||
"""
|
||||
Test if the set_fast_llm_model() method updates the fast_llm_model attribute.
|
||||
"""
|
||||
# Store model name to reset it after the test
|
||||
fast_llm_model = config.fast_llm_model
|
||||
|
||||
config.set_fast_llm_model("gpt-3.5-turbo-test")
|
||||
assert config.fast_llm_model == "gpt-3.5-turbo-test"
|
||||
|
||||
# Reset model name
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
|
||||
|
||||
def test_set_smart_llm_model(config: Config):
|
||||
"""
|
||||
Test if the set_smart_llm_model() method updates the smart_llm_model attribute.
|
||||
"""
|
||||
# Store model name to reset it after the test
|
||||
smart_llm_model = config.smart_llm_model
|
||||
|
||||
config.set_smart_llm_model("gpt-4-test")
|
||||
assert config.smart_llm_model == "gpt-4-test"
|
||||
|
||||
# Reset model name
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
|
||||
|
||||
def test_set_fast_token_limit(config: Config):
|
||||
"""
|
||||
Test if the set_fast_token_limit() method updates the fast_token_limit attribute.
|
||||
"""
|
||||
# Store token limit to reset it after the test
|
||||
fast_token_limit = config.fast_token_limit
|
||||
|
||||
config.set_fast_token_limit(5000)
|
||||
assert config.fast_token_limit == 5000
|
||||
|
||||
# Reset token limit
|
||||
config.set_fast_token_limit(fast_token_limit)
|
||||
|
||||
|
||||
def test_set_smart_token_limit(config: Config):
|
||||
"""
|
||||
Test if the set_smart_token_limit() method updates the smart_token_limit attribute.
|
||||
"""
|
||||
# Store token limit to reset it after the test
|
||||
smart_token_limit = config.smart_token_limit
|
||||
|
||||
config.set_smart_token_limit(9000)
|
||||
assert config.smart_token_limit == 9000
|
||||
|
||||
# Reset token limit
|
||||
config.set_smart_token_limit(smart_token_limit)
|
||||
|
||||
|
||||
def test_set_debug_mode(config: Config):
|
||||
"""
|
||||
Test if the set_debug_mode() method updates the debug_mode attribute.
|
||||
"""
|
||||
# Store debug mode to reset it after the test
|
||||
debug_mode = config.debug_mode
|
||||
|
||||
config.set_debug_mode(True)
|
||||
assert config.debug_mode == True
|
||||
|
||||
# Reset debug mode
|
||||
config.set_debug_mode(debug_mode)
|
||||
|
||||
|
||||
@patch("openai.Model.list")
|
||||
def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config):
|
||||
"""
|
||||
Test if models update to gpt-3.5-turbo if both are set to gpt-4.
|
||||
"""
|
||||
fast_llm_model = config.fast_llm_model
|
||||
smart_llm_model = config.smart_llm_model
|
||||
|
||||
config.fast_llm_model = "gpt-4"
|
||||
config.smart_llm_model = "gpt-4"
|
||||
|
||||
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
|
||||
|
||||
create_config(
|
||||
config=config,
|
||||
continuous=False,
|
||||
continuous_limit=False,
|
||||
ai_settings_file="",
|
||||
prompt_settings_file="",
|
||||
skip_reprompt=False,
|
||||
speak=False,
|
||||
debug=False,
|
||||
gpt3only=False,
|
||||
gpt4only=False,
|
||||
memory_type="",
|
||||
browser_name="",
|
||||
allow_downloads=False,
|
||||
skip_news=False,
|
||||
)
|
||||
|
||||
assert config.fast_llm_model == "gpt-3.5-turbo"
|
||||
assert config.smart_llm_model == "gpt-3.5-turbo"
|
||||
|
||||
# Reset config
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
|
||||
|
||||
def test_missing_azure_config(config: Config, workspace: Workspace):
|
||||
config_file = workspace.get_path("azure_config.yaml")
|
||||
with pytest.raises(FileNotFoundError):
|
||||
config.load_azure_config(str(config_file))
|
||||
|
||||
config_file.write_text("")
|
||||
config.load_azure_config(str(config_file))
|
||||
|
||||
assert config.openai_api_type == "azure"
|
||||
assert config.openai_api_base == ""
|
||||
assert config.openai_api_version == "2023-03-15-preview"
|
||||
assert config.azure_model_to_deployment_id_map == {}
|
||||
|
||||
|
||||
def test_create_config_gpt4only(config: Config) -> None:
|
||||
fast_llm_model = config.fast_llm_model
|
||||
smart_llm_model = config.smart_llm_model
|
||||
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
|
||||
mock_get_models.return_value = [{"id": GPT_4_MODEL}]
|
||||
create_config(
|
||||
config=config,
|
||||
continuous=False,
|
||||
continuous_limit=None,
|
||||
ai_settings_file=None,
|
||||
prompt_settings_file=None,
|
||||
skip_reprompt=False,
|
||||
speak=False,
|
||||
debug=False,
|
||||
gpt3only=False,
|
||||
gpt4only=True,
|
||||
memory_type=None,
|
||||
browser_name=None,
|
||||
allow_downloads=False,
|
||||
skip_news=False,
|
||||
)
|
||||
assert config.fast_llm_model == GPT_4_MODEL
|
||||
assert config.smart_llm_model == GPT_4_MODEL
|
||||
|
||||
# Reset config
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
|
||||
|
||||
def test_create_config_gpt3only(config: Config) -> None:
|
||||
fast_llm_model = config.fast_llm_model
|
||||
smart_llm_model = config.smart_llm_model
|
||||
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
|
||||
mock_get_models.return_value = [{"id": GPT_3_MODEL}]
|
||||
create_config(
|
||||
config=config,
|
||||
continuous=False,
|
||||
continuous_limit=None,
|
||||
ai_settings_file=None,
|
||||
prompt_settings_file=None,
|
||||
skip_reprompt=False,
|
||||
speak=False,
|
||||
debug=False,
|
||||
gpt3only=True,
|
||||
gpt4only=False,
|
||||
memory_type=None,
|
||||
browser_name=None,
|
||||
allow_downloads=False,
|
||||
skip_news=False,
|
||||
)
|
||||
assert config.fast_llm_model == GPT_3_MODEL
|
||||
assert config.smart_llm_model == GPT_3_MODEL
|
||||
|
||||
# Reset config
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
42
tests/unit/test_git_commands.py
Normal file
42
tests/unit/test_git_commands.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import pytest
|
||||
from git.exc import GitCommandError
|
||||
from git.repo.base import Repo
|
||||
|
||||
from autogpt.commands.git_operations import clone_repository
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_clone_from(mocker):
|
||||
return mocker.patch.object(Repo, "clone_from")
|
||||
|
||||
|
||||
def test_clone_auto_gpt_repository(workspace, mock_clone_from, config):
|
||||
mock_clone_from.return_value = None
|
||||
|
||||
repo = "github.com/Significant-Gravitas/Auto-GPT.git"
|
||||
scheme = "https://"
|
||||
url = scheme + repo
|
||||
clone_path = str(workspace.get_path("auto-gpt-repo"))
|
||||
|
||||
expected_output = f"Cloned {url} to {clone_path}"
|
||||
|
||||
clone_result = clone_repository(url=url, clone_path=clone_path, config=config)
|
||||
|
||||
assert clone_result == expected_output
|
||||
mock_clone_from.assert_called_once_with(
|
||||
url=f"{scheme}{config.github_username}:{config.github_api_key}@{repo}",
|
||||
to_path=clone_path,
|
||||
)
|
||||
|
||||
|
||||
def test_clone_repository_error(workspace, mock_clone_from, config):
|
||||
url = "https://github.com/this-repository/does-not-exist.git"
|
||||
clone_path = str(workspace.get_path("does-not-exist"))
|
||||
|
||||
mock_clone_from.side_effect = GitCommandError(
|
||||
"clone", "fatal: repository not found", ""
|
||||
)
|
||||
|
||||
result = clone_repository(url=url, clone_path=clone_path, config=config)
|
||||
|
||||
assert "Error: " in result
|
||||
134
tests/unit/test_google_search.py
Normal file
134
tests/unit/test_google_search.py
Normal file
@@ -0,0 +1,134 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
from googleapiclient.errors import HttpError
|
||||
|
||||
from autogpt.commands.google_search import (
|
||||
google_official_search,
|
||||
google_search,
|
||||
safe_google_results,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"query, expected_output",
|
||||
[("test", "test"), (["test1", "test2"], '["test1", "test2"]')],
|
||||
)
|
||||
def test_safe_google_results(query, expected_output):
|
||||
result = safe_google_results(query)
|
||||
assert isinstance(result, str)
|
||||
assert result == expected_output
|
||||
|
||||
|
||||
def test_safe_google_results_invalid_input():
|
||||
with pytest.raises(AttributeError):
|
||||
safe_google_results(123)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"query, num_results, expected_output, return_value",
|
||||
[
|
||||
(
|
||||
"test",
|
||||
1,
|
||||
'[\n {\n "title": "Result 1",\n "link": "https://example.com/result1"\n }\n]',
|
||||
[{"title": "Result 1", "link": "https://example.com/result1"}],
|
||||
),
|
||||
("", 1, "[]", []),
|
||||
("no results", 1, "[]", []),
|
||||
],
|
||||
)
|
||||
def test_google_search(
|
||||
query, num_results, expected_output, return_value, mocker, config
|
||||
):
|
||||
mock_ddg = mocker.Mock()
|
||||
mock_ddg.return_value = return_value
|
||||
|
||||
mocker.patch("autogpt.commands.google_search.DDGS.text", mock_ddg)
|
||||
actual_output = google_search(query, config, num_results=num_results)
|
||||
expected_output = safe_google_results(expected_output)
|
||||
assert actual_output == expected_output
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_googleapiclient(mocker):
|
||||
mock_build = mocker.patch("googleapiclient.discovery.build")
|
||||
mock_service = mocker.Mock()
|
||||
mock_build.return_value = mock_service
|
||||
return mock_service.cse().list().execute().get
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"query, num_results, search_results, expected_output",
|
||||
[
|
||||
(
|
||||
"test",
|
||||
3,
|
||||
[
|
||||
{"link": "http://example.com/result1"},
|
||||
{"link": "http://example.com/result2"},
|
||||
{"link": "http://example.com/result3"},
|
||||
],
|
||||
[
|
||||
"http://example.com/result1",
|
||||
"http://example.com/result2",
|
||||
"http://example.com/result3",
|
||||
],
|
||||
),
|
||||
("", 3, [], []),
|
||||
],
|
||||
)
|
||||
def test_google_official_search(
|
||||
query, num_results, expected_output, search_results, mock_googleapiclient, config
|
||||
):
|
||||
mock_googleapiclient.return_value = search_results
|
||||
actual_output = google_official_search(query, config, num_results=num_results)
|
||||
assert actual_output == safe_google_results(expected_output)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"query, num_results, expected_output, http_code, error_msg",
|
||||
[
|
||||
(
|
||||
"invalid query",
|
||||
3,
|
||||
"Error: <HttpError 400 when requesting https://www.googleapis.com/customsearch/v1?q=invalid+query&cx "
|
||||
'returned "Invalid Value". Details: "Invalid Value">',
|
||||
400,
|
||||
"Invalid Value",
|
||||
),
|
||||
(
|
||||
"invalid API key",
|
||||
3,
|
||||
"Error: The provided Google API key is invalid or missing.",
|
||||
403,
|
||||
"invalid API key",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_google_official_search_errors(
|
||||
query,
|
||||
num_results,
|
||||
expected_output,
|
||||
mock_googleapiclient,
|
||||
http_code,
|
||||
error_msg,
|
||||
config,
|
||||
):
|
||||
class resp:
|
||||
def __init__(self, _status, _reason):
|
||||
self.status = _status
|
||||
self.reason = _reason
|
||||
|
||||
response_content = {
|
||||
"error": {"code": http_code, "message": error_msg, "reason": "backendError"}
|
||||
}
|
||||
error = HttpError(
|
||||
resp=resp(http_code, error_msg),
|
||||
content=str.encode(json.dumps(response_content)),
|
||||
uri="https://www.googleapis.com/customsearch/v1?q=invalid+query&cx",
|
||||
)
|
||||
|
||||
mock_googleapiclient.side_effect = error
|
||||
actual_output = google_official_search(query, config, num_results=num_results)
|
||||
assert actual_output == safe_google_results(expected_output)
|
||||
28
tests/unit/test_logs.py
Normal file
28
tests/unit/test_logs.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.logs import remove_color_codes
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"raw_text, clean_text",
|
||||
[
|
||||
(
|
||||
"COMMAND = \x1b[36mbrowse_website\x1b[0m ARGUMENTS = \x1b[36m{'url': 'https://www.google.com', 'question': 'What is the capital of France?'}\x1b[0m",
|
||||
"COMMAND = browse_website ARGUMENTS = {'url': 'https://www.google.com', 'question': 'What is the capital of France?'}",
|
||||
),
|
||||
(
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}",
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}",
|
||||
),
|
||||
("", ""),
|
||||
("hello", "hello"),
|
||||
("hello\x1B[31m world", "hello world"),
|
||||
("\x1B[36mHello,\x1B[32m World!", "Hello, World!"),
|
||||
(
|
||||
"\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found",
|
||||
"Error: file not found",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_remove_color_codes(raw_text, clean_text):
|
||||
assert remove_color_codes(raw_text) == clean_text
|
||||
24
tests/unit/test_make_agent.py
Normal file
24
tests/unit/test_make_agent.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.app import list_agents, start_agent
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
def test_make_agent(config: Config, mocker: MockerFixture) -> None:
|
||||
"""Test that an agent can be created"""
|
||||
mock = mocker.patch("openai.ChatCompletion.create")
|
||||
|
||||
response = MagicMock()
|
||||
# del response.error
|
||||
response.choices[0].messages[0].content = "Test message"
|
||||
response.usage.prompt_tokens = 1
|
||||
response.usage.completion_tokens = 1
|
||||
mock.return_value = response
|
||||
start_agent("Test Agent", "chat", "Hello, how are you?", config, "gpt-3.5-turbo")
|
||||
agents = list_agents(config)
|
||||
assert "List of agents:\n0: chat" == agents
|
||||
start_agent("Test Agent 2", "write", "Hello, how are you?", config, "gpt-3.5-turbo")
|
||||
agents = list_agents(config)
|
||||
assert "List of agents:\n0: chat\n1: write" == agents
|
||||
48
tests/unit/test_prompt_config.py
Normal file
48
tests/unit/test_prompt_config.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from autogpt.config.prompt_config import PromptConfig
|
||||
|
||||
"""
|
||||
Test cases for the PromptConfig class, which handles loads the Prompts configuration
|
||||
settings from a YAML file.
|
||||
"""
|
||||
|
||||
|
||||
def test_prompt_config_loading(tmp_path):
|
||||
"""Test if the prompt configuration loads correctly"""
|
||||
|
||||
yaml_content = """
|
||||
constraints:
|
||||
- A test constraint
|
||||
- Another test constraint
|
||||
- A third test constraint
|
||||
resources:
|
||||
- A test resource
|
||||
- Another test resource
|
||||
- A third test resource
|
||||
performance_evaluations:
|
||||
- A test performance evaluation
|
||||
- Another test performance evaluation
|
||||
- A third test performance evaluation
|
||||
"""
|
||||
config_file = tmp_path / "test_prompt_settings.yaml"
|
||||
config_file.write_text(yaml_content)
|
||||
|
||||
prompt_config = PromptConfig(config_file)
|
||||
|
||||
assert len(prompt_config.constraints) == 3
|
||||
assert prompt_config.constraints[0] == "A test constraint"
|
||||
assert prompt_config.constraints[1] == "Another test constraint"
|
||||
assert prompt_config.constraints[2] == "A third test constraint"
|
||||
assert len(prompt_config.resources) == 3
|
||||
assert prompt_config.resources[0] == "A test resource"
|
||||
assert prompt_config.resources[1] == "Another test resource"
|
||||
assert prompt_config.resources[2] == "A third test resource"
|
||||
assert len(prompt_config.performance_evaluations) == 3
|
||||
assert prompt_config.performance_evaluations[0] == "A test performance evaluation"
|
||||
assert (
|
||||
prompt_config.performance_evaluations[1]
|
||||
== "Another test performance evaluation"
|
||||
)
|
||||
assert (
|
||||
prompt_config.performance_evaluations[2]
|
||||
== "A third test performance evaluation"
|
||||
)
|
||||
115
tests/unit/test_prompt_generator.py
Normal file
115
tests/unit/test_prompt_generator.py
Normal file
@@ -0,0 +1,115 @@
|
||||
from unittest import TestCase
|
||||
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
|
||||
class TestPromptGenerator(TestCase):
|
||||
"""
|
||||
Test cases for the PromptGenerator class, which is responsible for generating
|
||||
prompts for the AI with constraints, commands, resources, and performance evaluations.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
"""
|
||||
Set up the initial state for each test method by creating an instance of PromptGenerator.
|
||||
"""
|
||||
cls.generator = PromptGenerator()
|
||||
|
||||
# Test whether the add_constraint() method adds a constraint to the generator's constraints list
|
||||
def test_add_constraint(self):
|
||||
"""
|
||||
Test if the add_constraint() method adds a constraint to the generator's constraints list.
|
||||
"""
|
||||
constraint = "Constraint1"
|
||||
self.generator.add_constraint(constraint)
|
||||
self.assertIn(constraint, self.generator.constraints)
|
||||
|
||||
# Test whether the add_command() method adds a command to the generator's commands list
|
||||
def test_add_command(self):
|
||||
"""
|
||||
Test if the add_command() method adds a command to the generator's commands list.
|
||||
"""
|
||||
command_label = "Command Label"
|
||||
command_name = "command_name"
|
||||
args = {"arg1": "value1", "arg2": "value2"}
|
||||
self.generator.add_command(command_label, command_name, args)
|
||||
command = {
|
||||
"label": command_label,
|
||||
"name": command_name,
|
||||
"args": args,
|
||||
"function": None,
|
||||
}
|
||||
self.assertIn(command, self.generator.commands)
|
||||
|
||||
def test_add_resource(self):
|
||||
"""
|
||||
Test if the add_resource() method adds a resource to the generator's resources list.
|
||||
"""
|
||||
resource = "Resource1"
|
||||
self.generator.add_resource(resource)
|
||||
self.assertIn(resource, self.generator.resources)
|
||||
|
||||
def test_add_performance_evaluation(self):
|
||||
"""
|
||||
Test if the add_performance_evaluation() method adds an evaluation to the generator's
|
||||
performance_evaluation list.
|
||||
"""
|
||||
evaluation = "Evaluation1"
|
||||
self.generator.add_performance_evaluation(evaluation)
|
||||
self.assertIn(evaluation, self.generator.performance_evaluation)
|
||||
|
||||
def test_generate_prompt_string(self):
|
||||
"""
|
||||
Test if the generate_prompt_string() method generates a prompt string with all the added
|
||||
constraints, commands, resources, and evaluations.
|
||||
"""
|
||||
# Define the test data
|
||||
constraints = ["Constraint1", "Constraint2"]
|
||||
commands = [
|
||||
{
|
||||
"label": "Command1",
|
||||
"name": "command_name1",
|
||||
"args": {"arg1": "value1"},
|
||||
},
|
||||
{
|
||||
"label": "Command2",
|
||||
"name": "command_name2",
|
||||
"args": {},
|
||||
},
|
||||
]
|
||||
resources = ["Resource1", "Resource2"]
|
||||
evaluations = ["Evaluation1", "Evaluation2"]
|
||||
|
||||
# Add test data to the generator
|
||||
for constraint in constraints:
|
||||
self.generator.add_constraint(constraint)
|
||||
for command in commands:
|
||||
self.generator.add_command(
|
||||
command["label"], command["name"], command["args"]
|
||||
)
|
||||
for resource in resources:
|
||||
self.generator.add_resource(resource)
|
||||
for evaluation in evaluations:
|
||||
self.generator.add_performance_evaluation(evaluation)
|
||||
|
||||
# Generate the prompt string and verify its correctness
|
||||
prompt_string = self.generator.generate_prompt_string()
|
||||
self.assertIsNotNone(prompt_string)
|
||||
|
||||
# Check if all constraints, commands, resources, and evaluations are present in the prompt string
|
||||
for constraint in constraints:
|
||||
self.assertIn(constraint, prompt_string)
|
||||
for command in commands:
|
||||
self.assertIn(command["name"], prompt_string)
|
||||
for key, value in command["args"].items():
|
||||
self.assertIn(f'"{key}": "{value}"', prompt_string)
|
||||
for resource in resources:
|
||||
self.assertIn(resource, prompt_string)
|
||||
for evaluation in evaluations:
|
||||
self.assertIn(evaluation, prompt_string)
|
||||
|
||||
self.assertIn("constraints", prompt_string.lower())
|
||||
self.assertIn("commands", prompt_string.lower())
|
||||
self.assertIn("resources", prompt_string.lower())
|
||||
self.assertIn("performance evaluation", prompt_string.lower())
|
||||
150
tests/unit/test_text_file_parsers.py
Normal file
150
tests/unit/test_text_file_parsers.py
Normal file
@@ -0,0 +1,150 @@
|
||||
import json
|
||||
import tempfile
|
||||
from unittest import TestCase
|
||||
from xml.etree import ElementTree
|
||||
|
||||
import docx
|
||||
import yaml
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from autogpt.commands.file_operations_utils import is_file_binary_fn, read_textual_file
|
||||
from autogpt.logs import logger
|
||||
|
||||
plain_text_str = "Hello, world!"
|
||||
|
||||
|
||||
def mock_text_file():
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
|
||||
f.write(plain_text_str)
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_csv_file():
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".csv") as f:
|
||||
f.write(plain_text_str)
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_pdf_file():
|
||||
with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".pdf") as f:
|
||||
# Create a new PDF and add a page with the text plain_text_str
|
||||
# Write the PDF header
|
||||
f.write(b"%PDF-1.7\n")
|
||||
# Write the document catalog
|
||||
f.write(b"1 0 obj\n")
|
||||
f.write(b"<< /Type /Catalog /Pages 2 0 R >>\n")
|
||||
f.write(b"endobj\n")
|
||||
# Write the page object
|
||||
f.write(b"2 0 obj\n")
|
||||
f.write(
|
||||
b"<< /Type /Page /Parent 1 0 R /Resources << /Font << /F1 3 0 R >> >> /MediaBox [0 0 612 792] /Contents 4 0 R >>\n"
|
||||
)
|
||||
f.write(b"endobj\n")
|
||||
# Write the font object
|
||||
f.write(b"3 0 obj\n")
|
||||
f.write(
|
||||
b"<< /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /Helvetica-Bold >>\n"
|
||||
)
|
||||
f.write(b"endobj\n")
|
||||
# Write the page contents object
|
||||
f.write(b"4 0 obj\n")
|
||||
f.write(b"<< /Length 25 >>\n")
|
||||
f.write(b"stream\n")
|
||||
f.write(b"BT\n/F1 12 Tf\n72 720 Td\n(Hello, world!) Tj\nET\n")
|
||||
f.write(b"endstream\n")
|
||||
f.write(b"endobj\n")
|
||||
# Write the cross-reference table
|
||||
f.write(b"xref\n")
|
||||
f.write(b"0 5\n")
|
||||
f.write(b"0000000000 65535 f \n")
|
||||
f.write(b"0000000017 00000 n \n")
|
||||
f.write(b"0000000073 00000 n \n")
|
||||
f.write(b"0000000123 00000 n \n")
|
||||
f.write(b"0000000271 00000 n \n")
|
||||
f.write(b"trailer\n")
|
||||
f.write(b"<< /Size 5 /Root 1 0 R >>\n")
|
||||
f.write(b"startxref\n")
|
||||
f.write(b"380\n")
|
||||
f.write(b"%%EOF\n")
|
||||
f.write(b"\x00")
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_docx_file():
|
||||
with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".docx") as f:
|
||||
document = docx.Document()
|
||||
document.add_paragraph(plain_text_str)
|
||||
document.save(f.name)
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_json_file():
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
|
||||
json.dump({"text": plain_text_str}, f)
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_xml_file():
|
||||
root = ElementTree.Element("text")
|
||||
root.text = plain_text_str
|
||||
tree = ElementTree.ElementTree(root)
|
||||
with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".xml") as f:
|
||||
tree.write(f)
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_yaml_file():
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".yaml") as f:
|
||||
yaml.dump({"text": plain_text_str}, f)
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_html_file():
|
||||
html = BeautifulSoup(
|
||||
f"<html><head><title>This is a test</title></head><body><p>{plain_text_str}</p></body></html>",
|
||||
"html.parser",
|
||||
)
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".html") as f:
|
||||
f.write(str(html))
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_md_file():
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md") as f:
|
||||
f.write(f"# {plain_text_str}!\n")
|
||||
return f.name
|
||||
|
||||
|
||||
def mock_latex_file():
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".tex") as f:
|
||||
latex_str = rf"\documentclass{{article}}\begin{{document}}{plain_text_str}\end{{document}}"
|
||||
f.write(latex_str)
|
||||
return f.name
|
||||
|
||||
|
||||
respective_file_creation_functions = {
|
||||
".txt": mock_text_file,
|
||||
".csv": mock_csv_file,
|
||||
".pdf": mock_pdf_file,
|
||||
".docx": mock_docx_file,
|
||||
".json": mock_json_file,
|
||||
".xml": mock_xml_file,
|
||||
".yaml": mock_yaml_file,
|
||||
".html": mock_html_file,
|
||||
".md": mock_md_file,
|
||||
".tex": mock_latex_file,
|
||||
}
|
||||
|
||||
|
||||
class TestConfig(TestCase):
|
||||
def test_parsers(self):
|
||||
binary_files_extensions = [".pdf", ".docx"]
|
||||
for (
|
||||
file_extension,
|
||||
c_file_creator,
|
||||
) in respective_file_creation_functions.items():
|
||||
created_filepath = c_file_creator()
|
||||
loaded_text = read_textual_file(created_filepath, logger)
|
||||
self.assertIn(plain_text_str, loaded_text)
|
||||
should_be_binary = file_extension in binary_files_extensions
|
||||
self.assertEqual(should_be_binary, is_file_binary_fn(created_filepath))
|
||||
@@ -49,25 +49,17 @@ def test_url_validation_succeeds(url):
|
||||
assert dummy_method(url) == url
|
||||
|
||||
|
||||
bad_protocol_data = (
|
||||
("htt://example.com"),
|
||||
("httppp://example.com"),
|
||||
(" https://example.com"),
|
||||
@pytest.mark.parametrize(
|
||||
"url,expected_error",
|
||||
[
|
||||
("htt://example.com", "Invalid URL format"),
|
||||
("httppp://example.com", "Invalid URL format"),
|
||||
(" https://example.com", "Invalid URL format"),
|
||||
("http://?query=q", "Missing Scheme or Network location"),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("url", bad_protocol_data)
|
||||
def test_url_validation_fails_bad_protocol(url):
|
||||
with raises(ValueError, match="Invalid URL format"):
|
||||
dummy_method(url)
|
||||
|
||||
|
||||
missing_loc = (("http://?query=q"),)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("url", missing_loc)
|
||||
def test_url_validation_fails_bad_protocol(url):
|
||||
with raises(ValueError, match="Missing Scheme or Network location"):
|
||||
def test_url_validation_fails_invalid_url(url, expected_error):
|
||||
with raises(ValueError, match=expected_error):
|
||||
dummy_method(url)
|
||||
|
||||
|
||||
|
||||
152
tests/unit/test_utils.py
Normal file
152
tests/unit/test_utils.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.utils import (
|
||||
get_bulletin_from_web,
|
||||
get_current_git_branch,
|
||||
get_latest_bulletin,
|
||||
readable_file_size,
|
||||
validate_yaml_file,
|
||||
)
|
||||
from tests.utils import skip_in_ci
|
||||
|
||||
|
||||
def test_validate_yaml_file_valid():
|
||||
with open("valid_test_file.yaml", "w") as f:
|
||||
f.write("setting: value")
|
||||
result, message = validate_yaml_file("valid_test_file.yaml")
|
||||
os.remove("valid_test_file.yaml")
|
||||
|
||||
assert result == True
|
||||
assert "Successfully validated" in message
|
||||
|
||||
|
||||
def test_validate_yaml_file_not_found():
|
||||
result, message = validate_yaml_file("non_existent_file.yaml")
|
||||
|
||||
assert result == False
|
||||
assert "wasn't found" in message
|
||||
|
||||
|
||||
def test_validate_yaml_file_invalid():
|
||||
with open("invalid_test_file.yaml", "w") as f:
|
||||
f.write(
|
||||
"settings:\n first_setting: value\n second_setting: value\n nested_setting: value\n third_setting: value\nunindented_setting: value"
|
||||
)
|
||||
result, message = validate_yaml_file("invalid_test_file.yaml")
|
||||
os.remove("invalid_test_file.yaml")
|
||||
print(result)
|
||||
print(message)
|
||||
assert result == False
|
||||
assert "There was an issue while trying to read" in message
|
||||
|
||||
|
||||
def test_readable_file_size():
|
||||
size_in_bytes = 1024 * 1024 * 3.5 # 3.5 MB
|
||||
readable_size = readable_file_size(size_in_bytes)
|
||||
|
||||
assert readable_size == "3.50 MB"
|
||||
|
||||
|
||||
@patch("requests.get")
|
||||
def test_get_bulletin_from_web_success(mock_get):
|
||||
expected_content = "Test bulletin from web"
|
||||
|
||||
mock_get.return_value.status_code = 200
|
||||
mock_get.return_value.text = expected_content
|
||||
bulletin = get_bulletin_from_web()
|
||||
|
||||
assert expected_content in bulletin
|
||||
mock_get.assert_called_with(
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
|
||||
)
|
||||
|
||||
|
||||
@patch("requests.get")
|
||||
def test_get_bulletin_from_web_failure(mock_get):
|
||||
mock_get.return_value.status_code = 404
|
||||
bulletin = get_bulletin_from_web()
|
||||
|
||||
assert bulletin == ""
|
||||
|
||||
|
||||
@patch("requests.get")
|
||||
def test_get_bulletin_from_web_exception(mock_get):
|
||||
mock_get.side_effect = requests.exceptions.RequestException()
|
||||
bulletin = get_bulletin_from_web()
|
||||
|
||||
assert bulletin == ""
|
||||
|
||||
|
||||
def test_get_latest_bulletin_no_file():
|
||||
if os.path.exists("data/CURRENT_BULLETIN.md"):
|
||||
os.remove("data/CURRENT_BULLETIN.md")
|
||||
|
||||
bulletin, is_new = get_latest_bulletin()
|
||||
assert is_new
|
||||
|
||||
|
||||
def test_get_latest_bulletin_with_file():
|
||||
expected_content = "Test bulletin"
|
||||
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
|
||||
f.write(expected_content)
|
||||
|
||||
with patch("autogpt.utils.get_bulletin_from_web", return_value=""):
|
||||
bulletin, is_new = get_latest_bulletin()
|
||||
assert expected_content in bulletin
|
||||
assert is_new == False
|
||||
|
||||
os.remove("data/CURRENT_BULLETIN.md")
|
||||
|
||||
|
||||
def test_get_latest_bulletin_with_new_bulletin():
|
||||
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
|
||||
f.write("Old bulletin")
|
||||
|
||||
expected_content = "New bulletin from web"
|
||||
with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content):
|
||||
bulletin, is_new = get_latest_bulletin()
|
||||
assert "::NEW BULLETIN::" in bulletin
|
||||
assert expected_content in bulletin
|
||||
assert is_new
|
||||
|
||||
os.remove("data/CURRENT_BULLETIN.md")
|
||||
|
||||
|
||||
def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin():
|
||||
expected_content = "Current bulletin"
|
||||
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
|
||||
f.write(expected_content)
|
||||
|
||||
with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content):
|
||||
bulletin, is_new = get_latest_bulletin()
|
||||
assert expected_content in bulletin
|
||||
assert is_new == False
|
||||
|
||||
os.remove("data/CURRENT_BULLETIN.md")
|
||||
|
||||
|
||||
@skip_in_ci
|
||||
def test_get_current_git_branch():
|
||||
branch_name = get_current_git_branch()
|
||||
|
||||
# Assuming that the branch name will be non-empty if the function is working correctly.
|
||||
assert branch_name != ""
|
||||
|
||||
|
||||
@patch("autogpt.utils.Repo")
|
||||
def test_get_current_git_branch_success(mock_repo):
|
||||
mock_repo.return_value.active_branch.name = "test-branch"
|
||||
branch_name = get_current_git_branch()
|
||||
|
||||
assert branch_name == "test-branch"
|
||||
|
||||
|
||||
@patch("autogpt.utils.Repo")
|
||||
def test_get_current_git_branch_failure(mock_repo):
|
||||
mock_repo.side_effect = Exception()
|
||||
branch_name = get_current_git_branch()
|
||||
|
||||
assert branch_name == ""
|
||||
99
tests/unit/test_workspace.py
Normal file
99
tests/unit/test_workspace.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import itertools
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
_WORKSPACE_ROOT = Path("home/users/monty/auto_gpt_workspace")
|
||||
|
||||
_ACCESSIBLE_PATHS = [
|
||||
Path("."),
|
||||
Path("test_file.txt"),
|
||||
Path("test_folder"),
|
||||
Path("test_folder/test_file.txt"),
|
||||
Path("test_folder/.."),
|
||||
Path("test_folder/../test_file.txt"),
|
||||
Path("test_folder/../test_folder"),
|
||||
Path("test_folder/../test_folder/test_file.txt"),
|
||||
]
|
||||
|
||||
_INACCESSIBLE_PATHS = (
|
||||
[
|
||||
# Takes us out of the workspace
|
||||
Path(".."),
|
||||
Path("../test_file.txt"),
|
||||
Path("../not_auto_gpt_workspace"),
|
||||
Path("../not_auto_gpt_workspace/test_file.txt"),
|
||||
Path("test_folder/../.."),
|
||||
Path("test_folder/../../test_file.txt"),
|
||||
Path("test_folder/../../not_auto_gpt_workspace"),
|
||||
Path("test_folder/../../not_auto_gpt_workspace/test_file.txt"),
|
||||
]
|
||||
+ [
|
||||
# Contains null bytes
|
||||
Path(template.format(null_byte=null_byte))
|
||||
for template, null_byte in itertools.product(
|
||||
[
|
||||
"{null_byte}",
|
||||
"{null_byte}test_file.txt",
|
||||
"test_folder/{null_byte}",
|
||||
"test_folder/{null_byte}test_file.txt",
|
||||
],
|
||||
Workspace.NULL_BYTES,
|
||||
)
|
||||
]
|
||||
+ [
|
||||
# Absolute paths
|
||||
Path("/"),
|
||||
Path("/test_file.txt"),
|
||||
Path("/home"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def workspace_root(tmp_path):
|
||||
return tmp_path / _WORKSPACE_ROOT
|
||||
|
||||
|
||||
@pytest.fixture(params=_ACCESSIBLE_PATHS)
|
||||
def accessible_path(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=_INACCESSIBLE_PATHS)
|
||||
def inaccessible_path(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_sanitize_path_accessible(accessible_path, workspace_root):
|
||||
full_path = Workspace._sanitize_path(
|
||||
accessible_path,
|
||||
root=workspace_root,
|
||||
restrict_to_root=True,
|
||||
)
|
||||
assert full_path.is_absolute()
|
||||
assert full_path.is_relative_to(workspace_root)
|
||||
|
||||
|
||||
def test_sanitize_path_inaccessible(inaccessible_path, workspace_root):
|
||||
with pytest.raises(ValueError):
|
||||
Workspace._sanitize_path(
|
||||
inaccessible_path,
|
||||
root=workspace_root,
|
||||
restrict_to_root=True,
|
||||
)
|
||||
|
||||
|
||||
def test_get_path_accessible(accessible_path, workspace_root):
|
||||
workspace = Workspace(workspace_root, True)
|
||||
full_path = workspace.get_path(accessible_path)
|
||||
assert full_path.is_absolute()
|
||||
assert full_path.is_relative_to(workspace_root)
|
||||
|
||||
|
||||
def test_get_path_inaccessible(inaccessible_path, workspace_root):
|
||||
workspace = Workspace(workspace_root, True)
|
||||
with pytest.raises(ValueError):
|
||||
workspace.get_path(inaccessible_path)
|
||||
Reference in New Issue
Block a user