Files
Auto-GPT/tests/challenges/utils.py
Reinier van der Leer db95d4cb84 Agent loop v2: Planning & Task Management (part 1: refactoring) (#4799)
* Move rename module `agent` -> `agents`

* WIP: abstract agent structure into base class and port Agent

* Move command arg path sanitization to decorator

* Add fallback token limit in llm.utils.create_chat_completion

* Rebase `MessageHistory` class on `ChatSequence` class

* Fix linting

* Consolidate logging modules

* Wham Bam Boom

* Fix tests & linting complaints

* Update Agent class docstring

* Fix Agent import in autogpt.llm.providers.openai

* Fix agent kwarg in test_execute_code.py

* Fix benchmarks.py

* Clean up lingering Agent(ai_name=...) initializations

* Fix agent kwarg

* Make sanitize_path_arg decorator more robust

* Fix linting

* Fix command enabling lambda's

* Use relative paths in file ops logger

* Fix test_execute_python_file_not_found

* Fix Config model validation breaking on .plugins

* Define validator for Config.plugins

* Fix Config model issues

* Fix agent iteration budget in testing

* Fix declaration of context_while_think

* Fix Agent.parse_and_process_response signature

* Fix Agent cycle_budget usages

* Fix budget checking in BaseAgent.__next__

* Fix cycle budget initialization

* Fix function calling in BaseAgent.think()

* Include functions in token length calculation

* Fix Config errors

* Add debug thing to patched_api_requestor to investigate HTTP 400 errors

* If this works I'm gonna be sad

* Fix BaseAgent cycle budget logic and document attributes

* Document attributes on `Agent`

* Fix import issues between Agent and MessageHistory

* Improve typing

* Extract application code from the agent (#4982)

* Extract application code from the agent

* Wrap interaction loop in a function and call in benchmarks

* Forgot the important function call

* Add docstrings and inline comments to run loop

* Update typing and docstrings in agent

* Docstring formatting

* Separate prompt construction from on_before_think

* Use `self.default_cycle_instruction` in `Agent.think()`

* Fix formatting

* hot fix the SIGINT handler (#4997)

The signal handler in the autogpt/main.py doesn't work properly because
of the clean_input(...) func. This commit remedies this issue. The issue
is mentioned in
3966cdfd69 (r1264278776)

* Update the sigint handler to be smart enough to actually work (#4999)

* Update the sigint handler to be smart enough to actually work

* Update autogpt/main.py

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Can still use context manager

* Merge in upstream

---------

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Fix CI

* Fix initial prompt construction

* off by one error

* allow exit/EXIT to shut down app

* Remove dead code

---------

Co-authored-by: collijk <collijk@uw.edu>
Co-authored-by: Cyrus <39694513+cyrus-hawk@users.noreply.github.com>
2023-07-20 17:34:49 +02:00

79 lines
2.2 KiB
Python

import contextlib
import random
import shutil
from pathlib import Path
from typing import Any, Generator
import pytest
from autogpt.logs import LogCycleHandler
from autogpt.workspace import Workspace
from benchmarks import run_task
from tests.challenges.schema import Task
def generate_noise(noise_size: int) -> str:
random.seed(42)
return "".join(
random.choices(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
k=noise_size,
)
)
def setup_mock_input(monkeypatch: pytest.MonkeyPatch, cycle_count: int) -> None:
"""
Sets up the mock input for testing.
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
:param cycle_count: The number of cycles to mock.
"""
input_sequence = ["y"] * (cycle_count) + ["EXIT"]
def input_generator() -> Generator[str, None, None]:
"""
Creates a generator that yields input strings from the given sequence.
"""
yield from input_sequence
gen = input_generator()
monkeypatch.setattr("autogpt.utils.session.prompt", lambda _, **kwargs: next(gen))
def setup_mock_log_cycle_agent_name(
monkeypatch: pytest.MonkeyPatch, challenge_name: str, level_to_run: int
) -> None:
def mock_get_agent_short_name(*args: Any, **kwargs: Any) -> str:
return f"{challenge_name}_level_{level_to_run}"
monkeypatch.setattr(
LogCycleHandler, "get_agent_short_name", mock_get_agent_short_name
)
def get_workspace_path(workspace: Workspace, file_name: str) -> str:
return str(workspace.get_path(file_name))
def copy_file_into_workspace(
workspace: Workspace, directory_path: Path, file_path: str
) -> None:
workspace_code_file_path = get_workspace_path(workspace, file_path)
code_file_path = directory_path / file_path
shutil.copy(code_file_path, workspace_code_file_path)
def run_challenge(
challenge_name: str,
level_to_run: int,
monkeypatch: pytest.MonkeyPatch,
user_input: str,
cycle_count: int,
) -> None:
setup_mock_input(monkeypatch, cycle_count)
setup_mock_log_cycle_agent_name(monkeypatch, challenge_name, level_to_run)
task = Task(user_input=user_input)
with contextlib.suppress(SystemExit):
run_task(task)