Files
Auto-GPT/tests/unit/test_utils.py
Reinier van der Leer db95d4cb84 Agent loop v2: Planning & Task Management (part 1: refactoring) (#4799)
* Move rename module `agent` -> `agents`

* WIP: abstract agent structure into base class and port Agent

* Move command arg path sanitization to decorator

* Add fallback token limit in llm.utils.create_chat_completion

* Rebase `MessageHistory` class on `ChatSequence` class

* Fix linting

* Consolidate logging modules

* Wham Bam Boom

* Fix tests & linting complaints

* Update Agent class docstring

* Fix Agent import in autogpt.llm.providers.openai

* Fix agent kwarg in test_execute_code.py

* Fix benchmarks.py

* Clean up lingering Agent(ai_name=...) initializations

* Fix agent kwarg

* Make sanitize_path_arg decorator more robust

* Fix linting

* Fix command enabling lambda's

* Use relative paths in file ops logger

* Fix test_execute_python_file_not_found

* Fix Config model validation breaking on .plugins

* Define validator for Config.plugins

* Fix Config model issues

* Fix agent iteration budget in testing

* Fix declaration of context_while_think

* Fix Agent.parse_and_process_response signature

* Fix Agent cycle_budget usages

* Fix budget checking in BaseAgent.__next__

* Fix cycle budget initialization

* Fix function calling in BaseAgent.think()

* Include functions in token length calculation

* Fix Config errors

* Add debug thing to patched_api_requestor to investigate HTTP 400 errors

* If this works I'm gonna be sad

* Fix BaseAgent cycle budget logic and document attributes

* Document attributes on `Agent`

* Fix import issues between Agent and MessageHistory

* Improve typing

* Extract application code from the agent (#4982)

* Extract application code from the agent

* Wrap interaction loop in a function and call in benchmarks

* Forgot the important function call

* Add docstrings and inline comments to run loop

* Update typing and docstrings in agent

* Docstring formatting

* Separate prompt construction from on_before_think

* Use `self.default_cycle_instruction` in `Agent.think()`

* Fix formatting

* hot fix the SIGINT handler (#4997)

The signal handler in the autogpt/main.py doesn't work properly because
of the clean_input(...) func. This commit remedies this issue. The issue
is mentioned in
3966cdfd69 (r1264278776)

* Update the sigint handler to be smart enough to actually work (#4999)

* Update the sigint handler to be smart enough to actually work

* Update autogpt/main.py

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Can still use context manager

* Merge in upstream

---------

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Fix CI

* Fix initial prompt construction

* off by one error

* allow exit/EXIT to shut down app

* Remove dead code

---------

Co-authored-by: collijk <collijk@uw.edu>
Co-authored-by: Cyrus <39694513+cyrus-hawk@users.noreply.github.com>
2023-07-20 17:34:49 +02:00

213 lines
6.7 KiB
Python

import os
from unittest.mock import patch
import pytest
import requests
from autogpt.config import Config
from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
from autogpt.utils import (
get_bulletin_from_web,
get_current_git_branch,
get_latest_bulletin,
readable_file_size,
validate_yaml_file,
)
from tests.utils import skip_in_ci
@pytest.fixture
def valid_json_response() -> dict:
return {
"thoughts": {
"text": "My task is complete. I will use the 'task_complete' command to shut down.",
"reasoning": "I will use the 'task_complete' command because it allows me to shut down and signal that my task is complete.",
"plan": "I will use the 'task_complete' command with the reason 'Task complete: retrieved Tesla's revenue in 2022.' to shut down.",
"criticism": "I need to ensure that I have completed all necessary tasks before shutting down.",
"speak": "",
},
"command": {
"name": "task_complete",
"args": {"reason": "Task complete: retrieved Tesla's revenue in 2022."},
},
}
@pytest.fixture
def invalid_json_response() -> dict:
return {
"thoughts": {
"text": "My task is complete. I will use the 'task_complete' command to shut down.",
"reasoning": "I will use the 'task_complete' command because it allows me to shut down and signal that my task is complete.",
"plan": "I will use the 'task_complete' command with the reason 'Task complete: retrieved Tesla's revenue in 2022.' to shut down.",
"criticism": "I need to ensure that I have completed all necessary tasks before shutting down.",
"speak": "",
},
"command": {"name": "", "args": {}},
}
def test_validate_yaml_file_valid():
with open("valid_test_file.yaml", "w") as f:
f.write("setting: value")
result, message = validate_yaml_file("valid_test_file.yaml")
os.remove("valid_test_file.yaml")
assert result == True
assert "Successfully validated" in message
def test_validate_yaml_file_not_found():
result, message = validate_yaml_file("non_existent_file.yaml")
assert result == False
assert "wasn't found" in message
def test_validate_yaml_file_invalid():
with open("invalid_test_file.yaml", "w") as f:
f.write(
"settings:\n first_setting: value\n second_setting: value\n nested_setting: value\n third_setting: value\nunindented_setting: value"
)
result, message = validate_yaml_file("invalid_test_file.yaml")
os.remove("invalid_test_file.yaml")
print(result)
print(message)
assert result == False
assert "There was an issue while trying to read" in message
def test_readable_file_size():
size_in_bytes = 1024 * 1024 * 3.5 # 3.5 MB
readable_size = readable_file_size(size_in_bytes)
assert readable_size == "3.50 MB"
@patch("requests.get")
def test_get_bulletin_from_web_success(mock_get):
expected_content = "Test bulletin from web"
mock_get.return_value.status_code = 200
mock_get.return_value.text = expected_content
bulletin = get_bulletin_from_web()
assert expected_content in bulletin
mock_get.assert_called_with(
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
)
@patch("requests.get")
def test_get_bulletin_from_web_failure(mock_get):
mock_get.return_value.status_code = 404
bulletin = get_bulletin_from_web()
assert bulletin == ""
@patch("requests.get")
def test_get_bulletin_from_web_exception(mock_get):
mock_get.side_effect = requests.exceptions.RequestException()
bulletin = get_bulletin_from_web()
assert bulletin == ""
def test_get_latest_bulletin_no_file():
if os.path.exists("data/CURRENT_BULLETIN.md"):
os.remove("data/CURRENT_BULLETIN.md")
bulletin, is_new = get_latest_bulletin()
assert is_new
def test_get_latest_bulletin_with_file():
expected_content = "Test bulletin"
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
f.write(expected_content)
with patch("autogpt.utils.get_bulletin_from_web", return_value=""):
bulletin, is_new = get_latest_bulletin()
assert expected_content in bulletin
assert is_new == False
os.remove("data/CURRENT_BULLETIN.md")
def test_get_latest_bulletin_with_new_bulletin():
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
f.write("Old bulletin")
expected_content = "New bulletin from web"
with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content):
bulletin, is_new = get_latest_bulletin()
assert "::NEW BULLETIN::" in bulletin
assert expected_content in bulletin
assert is_new
os.remove("data/CURRENT_BULLETIN.md")
def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin():
expected_content = "Current bulletin"
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
f.write(expected_content)
with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content):
bulletin, is_new = get_latest_bulletin()
assert expected_content in bulletin
assert is_new == False
os.remove("data/CURRENT_BULLETIN.md")
@skip_in_ci
def test_get_current_git_branch():
branch_name = get_current_git_branch()
# Assuming that the branch name will be non-empty if the function is working correctly.
assert branch_name != ""
@patch("autogpt.utils.Repo")
def test_get_current_git_branch_success(mock_repo):
mock_repo.return_value.active_branch.name = "test-branch"
branch_name = get_current_git_branch()
assert branch_name == "test-branch"
@patch("autogpt.utils.Repo")
def test_get_current_git_branch_failure(mock_repo):
mock_repo.side_effect = Exception()
branch_name = get_current_git_branch()
assert branch_name == ""
def test_validate_json_valid(valid_json_response, config: Config):
valid, errors = validate_dict(valid_json_response, config)
assert valid
assert errors is None
def test_validate_json_invalid(invalid_json_response, config: Config):
valid, errors = validate_dict(valid_json_response, config)
assert not valid
assert errors is not None
def test_extract_json_from_response(valid_json_response: dict):
emulated_response_from_openai = str(valid_json_response)
assert (
extract_dict_from_response(emulated_response_from_openai) == valid_json_response
)
def test_extract_json_from_response_wrapped_in_code_block(valid_json_response: dict):
emulated_response_from_openai = "```" + str(valid_json_response) + "```"
assert (
extract_dict_from_response(emulated_response_from_openai) == valid_json_response
)