Files
Auto-GPT/autogpt/json_utils/utilities.py
Reinier van der Leer db95d4cb84 Agent loop v2: Planning & Task Management (part 1: refactoring) (#4799)
* Move rename module `agent` -> `agents`

* WIP: abstract agent structure into base class and port Agent

* Move command arg path sanitization to decorator

* Add fallback token limit in llm.utils.create_chat_completion

* Rebase `MessageHistory` class on `ChatSequence` class

* Fix linting

* Consolidate logging modules

* Wham Bam Boom

* Fix tests & linting complaints

* Update Agent class docstring

* Fix Agent import in autogpt.llm.providers.openai

* Fix agent kwarg in test_execute_code.py

* Fix benchmarks.py

* Clean up lingering Agent(ai_name=...) initializations

* Fix agent kwarg

* Make sanitize_path_arg decorator more robust

* Fix linting

* Fix command enabling lambda's

* Use relative paths in file ops logger

* Fix test_execute_python_file_not_found

* Fix Config model validation breaking on .plugins

* Define validator for Config.plugins

* Fix Config model issues

* Fix agent iteration budget in testing

* Fix declaration of context_while_think

* Fix Agent.parse_and_process_response signature

* Fix Agent cycle_budget usages

* Fix budget checking in BaseAgent.__next__

* Fix cycle budget initialization

* Fix function calling in BaseAgent.think()

* Include functions in token length calculation

* Fix Config errors

* Add debug thing to patched_api_requestor to investigate HTTP 400 errors

* If this works I'm gonna be sad

* Fix BaseAgent cycle budget logic and document attributes

* Document attributes on `Agent`

* Fix import issues between Agent and MessageHistory

* Improve typing

* Extract application code from the agent (#4982)

* Extract application code from the agent

* Wrap interaction loop in a function and call in benchmarks

* Forgot the important function call

* Add docstrings and inline comments to run loop

* Update typing and docstrings in agent

* Docstring formatting

* Separate prompt construction from on_before_think

* Use `self.default_cycle_instruction` in `Agent.think()`

* Fix formatting

* hot fix the SIGINT handler (#4997)

The signal handler in the autogpt/main.py doesn't work properly because
of the clean_input(...) func. This commit remedies this issue. The issue
is mentioned in
3966cdfd69 (r1264278776)

* Update the sigint handler to be smart enough to actually work (#4999)

* Update the sigint handler to be smart enough to actually work

* Update autogpt/main.py

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Can still use context manager

* Merge in upstream

---------

Co-authored-by: Reinier van der Leer <github@pwuts.nl>

* Fix CI

* Fix initial prompt construction

* off by one error

* allow exit/EXIT to shut down app

* Remove dead code

---------

Co-authored-by: collijk <collijk@uw.edu>
Co-authored-by: Cyrus <39694513+cyrus-hawk@users.noreply.github.com>
2023-07-20 17:34:49 +02:00

76 lines
2.6 KiB
Python

"""Utilities for the json_fixes package."""
import ast
import json
import os.path
from typing import Any, Literal
from jsonschema import Draft7Validator
from autogpt.config import Config
from autogpt.logs import logger
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
def extract_dict_from_response(response_content: str) -> dict[str, Any]:
# Sometimes the response includes the JSON in a code block with ```
if response_content.startswith("```") and response_content.endswith("```"):
# Discard the first and last ```, then re-join in case the response naturally included ```
response_content = "```".join(response_content.split("```")[1:-1])
# response content comes from OpenAI as a Python `str(content_dict)`, literal_eval reverses this
try:
return ast.literal_eval(response_content)
except BaseException as e:
logger.info(f"Error parsing JSON response with literal_eval {e}")
logger.debug(f"Invalid JSON received in response: {response_content}")
# TODO: How to raise an error here without causing the program to exit?
return {}
def llm_response_schema(
config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
) -> dict[str, Any]:
filename = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
with open(filename, "r") as f:
try:
json_schema = json.load(f)
except Exception as e:
raise RuntimeError(f"Failed to load JSON schema: {e}")
if config.openai_functions:
del json_schema["properties"]["command"]
json_schema["required"].remove("command")
return json_schema
def validate_dict(
object: object, config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
) -> tuple[Literal[True], None] | tuple[Literal[False], list]:
"""
:type schema_name: object
:param schema_name: str
:type json_object: object
Returns:
bool: Whether the json_object is valid or not
list: Errors found in the json_object, or None if the object is valid
"""
schema = llm_response_schema(config, schema_name)
validator = Draft7Validator(schema)
if errors := sorted(validator.iter_errors(object), key=lambda e: e.path):
for error in errors:
logger.debug(f"JSON Validation Error: {error}")
if config.debug_mode:
logger.error(json.dumps(object, indent=4))
logger.error("The following issues were found:")
for error in errors:
logger.error(f"Error: {error.message}")
return False, errors
logger.debug("The JSON object is valid.")
return True, None