Files
Auto-GPT/tests/vcr/__init__.py
Reinier van der Leer db95d4cb84 Agent loop v2: Planning & Task Management (part 1: refactoring) (#4799)
* Move rename module `agent` -> `agents`

* WIP: abstract agent structure into base class and port Agent

* Move command arg path sanitization to decorator

* Add fallback token limit in llm.utils.create_chat_completion

* Rebase `MessageHistory` class on `ChatSequence` class

* Fix linting

* Consolidate logging modules

* Wham Bam Boom

* Fix tests & linting complaints

* Update Agent class docstring

* Fix Agent import in autogpt.llm.providers.openai

* Fix agent kwarg in test_execute_code.py

* Fix benchmarks.py

* Clean up lingering Agent(ai_name=...) initializations

* Fix agent kwarg

* Make sanitize_path_arg decorator more robust

* Fix linting

* Fix command enabling lambda's

* Use relative paths in file ops logger

* Fix test_execute_python_file_not_found

* Fix Config model validation breaking on .plugins

* Define validator for Config.plugins

* Fix Config model issues

* Fix agent iteration budget in testing

* Fix declaration of context_while_think

* Fix Agent.parse_and_process_response signature

* Fix Agent cycle_budget usages

* Fix budget checking in BaseAgent.__next__

* Fix cycle budget initialization

* Fix function calling in BaseAgent.think()

* Include functions in token length calculation

* Fix Config errors

* Add debug thing to patched_api_requestor to investigate HTTP 400 errors

* If this works I'm gonna be sad

* Fix BaseAgent cycle budget logic and document attributes

* Document attributes on `Agent`

* Fix import issues between Agent and MessageHistory

* Improve typing

* Extract application code from the agent (#4982)

* Extract application code from the agent

* Wrap interaction loop in a function and call in benchmarks

* Forgot the important function call

* Add docstrings and inline comments to run loop

* Update typing and docstrings in agent

* Docstring formatting

* Separate prompt construction from on_before_think

* Use `self.default_cycle_instruction` in `Agent.think()`

* Fix formatting

* hot fix the SIGINT handler (#4997)

The signal handler in the autogpt/main.py doesn't work properly because
of the clean_input(...) func. This commit remedies this issue. The issue
is mentioned in
3966cdfd69 (r1264278776)

* Update the sigint handler to be smart enough to actually work (#4999)

* Update the sigint handler to be smart enough to actually work

* Update autogpt/main.py

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Can still use context manager

* Merge in upstream

---------

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Fix CI

* Fix initial prompt construction

* off by one error

* allow exit/EXIT to shut down app

* Remove dead code

---------

Co-authored-by: collijk <collijk@uw.edu>
Co-authored-by: Cyrus <39694513+cyrus-hawk@users.noreply.github.com>
2023-07-20 17:34:49 +02:00

97 lines
2.6 KiB
Python

import os
from hashlib import sha256
import openai.api_requestor
import pytest
from pytest_mock import MockerFixture
from .vcr_filter import (
PROXY,
before_record_request,
before_record_response,
freeze_request_body,
)
DEFAULT_RECORD_MODE = "new_episodes"
BASE_VCR_CONFIG = {
"before_record_request": before_record_request,
"before_record_response": before_record_response,
"filter_headers": [
"Authorization",
"AGENT-MODE",
"AGENT-TYPE",
"OpenAI-Organization",
"X-OpenAI-Client-User-Agent",
"User-Agent",
],
"match_on": ["method", "headers"],
}
@pytest.fixture(scope="session")
def vcr_config(get_base_vcr_config):
return get_base_vcr_config
@pytest.fixture(scope="session")
def get_base_vcr_config(request):
record_mode = request.config.getoption("--record-mode", default="new_episodes")
config = BASE_VCR_CONFIG
if record_mode is None:
config["record_mode"] = DEFAULT_RECORD_MODE
return config
@pytest.fixture()
def vcr_cassette_dir(request):
test_name = os.path.splitext(request.node.name)[0]
return os.path.join("tests/Auto-GPT-test-cassettes", test_name)
def patch_api_base(requestor: openai.api_requestor.APIRequestor):
new_api_base = f"{PROXY}/v1"
requestor.api_base = new_api_base
return requestor
@pytest.fixture
def patched_api_requestor(mocker: MockerFixture):
init_requestor = openai.api_requestor.APIRequestor.__init__
prepare_request = openai.api_requestor.APIRequestor._prepare_request_raw
def patched_init_requestor(requestor, *args, **kwargs):
init_requestor(requestor, *args, **kwargs)
patch_api_base(requestor)
def patched_prepare_request(self, *args, **kwargs):
url, headers, data = prepare_request(self, *args, **kwargs)
if PROXY:
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
print(
f"[DEBUG] Outgoing API request: {headers}\n{data.decode() if data else None}"
)
# Add hash header for cheap & fast matching on cassette playback
headers["X-Content-Hash"] = sha256(
freeze_request_body(data), usedforsecurity=False
).hexdigest()
return url, headers, data
if PROXY:
mocker.patch.object(
openai.api_requestor.APIRequestor,
"__init__",
new=patched_init_requestor,
)
mocker.patch.object(
openai.api_requestor.APIRequestor,
"_prepare_request_raw",
new=patched_prepare_request,
)