Merge branch 'master' of https://github.com/BillSchumacher/Auto-GPT into plugin-support

This commit is contained in:
BillSchumacher
2023-04-16 22:13:37 -05:00
37 changed files with 845 additions and 429 deletions

View File

@@ -1,6 +1,6 @@
# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster
ARG VARIANT=3-bullseye
FROM python:3.8
FROM --platform=linux/amd64 python:3.8
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
@@ -10,6 +10,11 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# They are installed by the base image (python) which does not have the patch.
RUN python3 -m pip install --upgrade setuptools
# Install Chrome for web browsing
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& curl -sSL https://dl.google.com/linux/direct/google-chrome-stable_current_$(dpkg --print-architecture).deb -o /tmp/chrome.deb \
&& apt-get -y install /tmp/chrome.deb
# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
# COPY requirements.txt /tmp/pip-tmp/
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \

View File

@@ -11,6 +11,7 @@
"userGid": "1000",
"upgradePackages": "true"
},
"ghcr.io/devcontainers/features/desktop-lite:1": {},
"ghcr.io/devcontainers/features/python:1": "none",
"ghcr.io/devcontainers/features/node:1": "none",
"ghcr.io/devcontainers/features/git:1": {

31
.github/workflows/benchmark.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: benchmark
on:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
environment: benchmark
strategy:
matrix:
python-version: [3.8]
steps:
- name: Check out repository
uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: benchmark
run: |
python benchmark/benchmark_entrepeneur_gpt_with_undecisive_user.py
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

View File

@@ -36,7 +36,7 @@ jobs:
- name: Run unittest tests with coverage
run: |
coverage run --source=autogpt -m unittest discover tests
pytest --cov=autogpt --without-integration --without-slow-integration
- name: Generate coverage report
run: |

5
.gitignore vendored
View File

@@ -3,13 +3,13 @@ autogpt/keys.py
autogpt/*json
autogpt/node_modules/
autogpt/__pycache__/keys.cpython-310.pyc
autogpt/auto_gpt_workspace
package-lock.json
*.pyc
auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
*venv/*
outputs/*
ai_settings.yaml
last_run_ai_settings.yaml
@@ -130,10 +130,9 @@ celerybeat.pid
.env
.venv
env/
venv/
venv*/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject

View File

@@ -31,3 +31,9 @@ repos:
types: [ python ]
exclude: .+/(dist|.venv|venv|build)/.+
pass_filenames: true
- id: pytest-check
name: pytest-check
entry: pytest --cov=autogpt --without-integration --without-slow-integration
language: system
pass_filenames: false
always_run: true

259
README.md

File diff suppressed because one or more lines are too long

View File

@@ -66,7 +66,7 @@ def main() -> None:
full_message_history = []
next_action_count = 0
# Make a constant:
user_input = (
triggering_prompt = (
"Determine which next command to use, and respond using the"
" format specified above:"
)
@@ -77,9 +77,9 @@ def main() -> None:
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
prompt = ai_config.construct_full_prompt()
system_prompt = ai_config.construct_full_prompt()
if cfg.debug_mode:
logger.typewriter_log("Prompt:", Fore.GREEN, prompt)
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
agent = Agent(
ai_name=ai_name,
memory=memory,
@@ -87,8 +87,8 @@ def main() -> None:
next_action_count=next_action_count,
command_registry=command_registry,
config=ai_config,
prompt=prompt,
user_input=user_input,
system_prompt=system_prompt,
triggering_prompt=triggering_prompt,
)
agent.start_interaction_loop()

View File

@@ -3,9 +3,8 @@ from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
from autogpt.json_fixes.bracket_termination import (
attempt_to_fix_json_by_finding_outermost_brackets,
)
from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques
from autogpt.json_validation.validate_json import validate_json
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
@@ -20,9 +19,18 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
prompt: The prompt to use.
user_input: The user input.
system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
Determine which next command to use, and respond using the format specified above:
The triggering prompt is not part of the system prompt because between the system prompt and the triggering
prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
The triggering prompt reminds the AI about its short term meta task (defining the next task)
"""
def __init__(
@@ -33,8 +41,8 @@ class Agent:
next_action_count,
command_registry,
config,
prompt,
user_input,
system_prompt,
triggering_prompt,
):
self.ai_name = ai_name
self.memory = memory
@@ -42,8 +50,8 @@ class Agent:
self.next_action_count = next_action_count
self.command_registry = command_registry
self.config = config
self.prompt = prompt
self.user_input = user_input
self.system_prompt = system_prompt
self.triggering_prompt = triggering_prompt
def start_interaction_loop(self):
# Interaction Loop
@@ -51,6 +59,8 @@ class Agent:
loop_count = 0
command_name = None
arguments = None
user_input = ""
while True:
# Discontinue if continuous limit is reached
loop_count += 1
@@ -68,24 +78,26 @@ class Agent:
with Spinner("Thinking... "):
assistant_reply = chat_with_ai(
self,
self.prompt,
self.user_input,
self.system_prompt,
self.triggering_prompt,
self.full_message_history,
self.memory,
cfg.fast_token_limit,
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
for plugin in cfg.plugins:
assistant_reply = plugin.post_planning(self, assistant_reply)
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
# Print Assistant thoughts
print_assistant_thoughts(self.ai_name, assistant_reply)
if assistant_reply_json != {}:
validate_json(assistant_reply_json, 'llm_response_format_1')
# Get command name and arguments
try:
command_name, arguments = get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
)
print_assistant_thoughts(self.ai_name, assistant_reply_json)
command_name, arguments = get_command(assistant_reply_json)
# command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
if cfg.speak_mode:
say_text(f"I want to execute {command_name}")
except Exception as e:
@@ -95,7 +107,6 @@ class Agent:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
@@ -114,14 +125,14 @@ class Agent:
)
if console_input.lower().rstrip() == "y":
self.user_input = "GENERATE NEXT COMMAND JSON"
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
)
self.user_input = "GENERATE NEXT COMMAND JSON"
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print(
"Invalid input format. Please enter 'y -n' where n is"
@@ -130,20 +141,20 @@ class Agent:
continue
break
elif console_input.lower() == "n":
self.user_input = "EXIT"
user_input = "EXIT"
break
else:
self.user_input = console_input
user_input = console_input
command_name = "human_feedback"
break
if self.user_input == "GENERATE NEXT COMMAND JSON":
if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
elif self.user_input == "EXIT":
elif user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
@@ -161,7 +172,7 @@ class Agent:
f"Command {command_name} threw the following error: {arguments}"
)
elif command_name == "human_feedback":
result = f"Human feedback: {self.user_input}"
result = f"Human feedback: {user_input}"
else:
for plugin in cfg.plugins:
command_name, arguments = plugin.pre_command(
@@ -180,7 +191,7 @@ class Agent:
memory_to_add = (
f"Assistant Reply: {assistant_reply} "
f"\nResult: {result} "
f"\nHuman Feedback: {self.user_input} "
f"\nHuman Feedback: {user_input} "
)
self.memory.add(memory_to_add)

View File

@@ -1,7 +1,6 @@
""" Command and Control """
import json
from typing import List, NoReturn, Union
from typing import List, NoReturn, Union, Dict
from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.audio_text import read_audio_from_file
from autogpt.commands.command import CommandRegistry, command
@@ -13,6 +12,7 @@ from autogpt.commands.file_operations import (
read_file,
search_files,
write_to_file,
download_file
)
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.google_search import google_official_search, google_search
@@ -49,11 +49,11 @@ def is_valid_int(value: str) -> bool:
return False
def get_command(response: str):
def get_command(response_json: Dict):
"""Parse the response and return the command name and arguments
Args:
response (str): The response from the user
response_json (json): The response from the AI
Returns:
tuple: The command name and arguments
@@ -64,8 +64,6 @@ def get_command(response: str):
Exception: If any other error occurs
"""
try:
response_json = fix_and_parse_json(response)
if "command" not in response_json:
return "Error:", "Missing 'command' object in JSON"
@@ -139,6 +137,11 @@ def execute_command(
return get_text_summary(arguments["url"], arguments["question"])
elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"])
elif command_name == "download_file":
if not CFG.allow_downloads:
return "Error: You do not have user authorization to download files locally."
return download_file(arguments["url"], arguments["file"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again

View File

@@ -1,8 +1,7 @@
"""This module contains the argument parsing logic for the script."""
import argparse
from colorama import Fore
from colorama import Fore, Back, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
@@ -64,6 +63,12 @@ def parse_arguments() -> None:
help="Specifies which ai_settings.yaml file to use, will also automatically"
" skip the re-prompt.",
)
parser.add_argument(
'--allow-downloads',
action='store_true',
dest='allow_downloads',
help='Dangerous: Allows Auto-GPT to download files natively.'
)
args = parser.parse_args()
if args.debug:
@@ -134,5 +139,13 @@ def parse_arguments() -> None:
CFG.ai_settings_file = file
CFG.skip_reprompt = True
if args.allow_downloads:
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
logger.typewriter_log("WARNING: ", Fore.YELLOW,
f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " +
"It is recommended that you monitor any files it downloads carefully.")
logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}")
CFG.allow_downloads = True
if args.browser_name:
CFG.selenium_web_browser = args.browser_name

View File

@@ -4,11 +4,18 @@ from __future__ import annotations
import os
import os.path
from pathlib import Path
from typing import Generator
from typing import Generator, List
import requests
from requests.adapters import HTTPAdapter
from requests.adapters import Retry
from colorama import Fore, Back
from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size
from autogpt.commands.command import command
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
@@ -221,3 +228,43 @@ def search_files(directory: str) -> list[str]:
found_files.append(relative_path)
return found_files
def download_file(url, filename):
"""Downloads a file
Args:
url (str): URL of the file to download
filename (str): Filename to save the file as
"""
safe_filename = path_in_workspace(filename)
try:
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
with Spinner(message) as spinner:
session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
total_size = 0
downloaded_size = 0
with session.get(url, allow_redirects=True, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get('Content-Length', 0))
downloaded_size = 0
with open(safe_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
downloaded_size += len(chunk)
# Update the progress message
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
spinner.update_message(f"{message} {progress}")
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
except requests.HTTPError as e:
return f"Got an HTTP Error whilst trying to download file: {e}"
except Exception as e:
return "Error: " + str(e)

View File

@@ -3,6 +3,7 @@ from git.repo import Repo
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
CFG = Config()
@@ -25,8 +26,9 @@ def clone_repository(repo_url: str, clone_path: str) -> str:
str: The result of the clone operation"""
split_url = repo_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
safe_clone_path = path_in_workspace(clone_path)
try:
Repo.clone_from(auth_repo_url, clone_path)
return f"""Cloned {repo_url} to {clone_path}"""
Repo.clone_from(auth_repo_url, safe_clone_path)
return f"""Cloned {repo_url} to {safe_clone_path}"""
except Exception as e:
return f"Error: {str(e)}"

View File

@@ -83,6 +83,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options)
else:
options.add_argument("--no-sandbox")
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options
)

View File

@@ -23,6 +23,7 @@ class Config(metaclass=Singleton):
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False
self.allow_downloads = False
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")

View File

@@ -3,52 +3,13 @@ from __future__ import annotations
import contextlib
import json
import regex
from colorama import Fore
from typing import Optional
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.speech import say_text
CFG = Config()
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
if CFG.speak_mode and CFG.debug_mode:
say_text(
"I have received an invalid JSON response from the OpenAI API. "
"Trying to fix it now."
)
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
try:
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
logger.typewriter_log(
title="Apparently json was fixed.", title_color=Fore.GREEN
)
if CFG.speak_mode and CFG.debug_mode:
say_text("Apparently json was fixed.")
else:
raise ValueError("No valid JSON object found")
except (json.JSONDecodeError, ValueError):
if CFG.debug_mode:
logger.error(f"Error: Invalid JSON: {json_string}\n")
if CFG.speak_mode:
say_text("Didn't work. I will have to ignore this response then.")
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {}
return json_string
def balance_braces(json_string: str) -> str | None:
def balance_braces(json_string: str) -> Optional[str]:
"""
Balance the braces in a JSON string.

View File

@@ -0,0 +1,28 @@
from typing import Any, Dict
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.speech import say_text
CFG = Config()
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets
from autogpt.json_fixes.parsing import fix_and_parse_json
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
if assistant_reply_json == {}:
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply
)
if assistant_reply_json != {}:
return assistant_reply_json
logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
if CFG.speak_mode:
say_text("I have received an invalid JSON response from the OpenAI API.")
return {}

View File

@@ -3,18 +3,19 @@ from __future__ import annotations
import contextlib
import json
from typing import Any
from typing import Any, Dict, Union
from colorama import Fore
from regex import regex
from autogpt.config import Config
from autogpt.json_fixes.auto_fix import fix_json
from autogpt.json_fixes.bracket_termination import balance_braces
from autogpt.json_fixes.escaping import fix_invalid_escape
from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names
from autogpt.logs import logger
from autogpt.speech import say_text
CFG = Config()
JSON_SCHEMA = """
{
"command": {
@@ -38,7 +39,6 @@ JSON_SCHEMA = """
def correct_json(json_to_load: str) -> str:
"""
Correct common JSON errors.
Args:
json_to_load (str): The JSON string.
"""
@@ -72,7 +72,7 @@ def correct_json(json_to_load: str) -> str:
def fix_and_parse_json(
json_to_load: str, try_to_fix_with_gpt: bool = True
) -> str | dict[Any, Any]:
) -> Dict[Any, Any]:
"""Fix and parse JSON string
Args:
@@ -110,7 +110,7 @@ def fix_and_parse_json(
def try_ai_fix(
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
) -> str | dict[Any, Any]:
) -> Dict[Any, Any]:
"""Try to fix the JSON with the AI
Args:
@@ -126,7 +126,7 @@ def try_ai_fix(
"""
if not try_to_fix_with_gpt:
raise exception
if CFG.debug_mode:
logger.warn(
"Warning: Failed to parse AI output, attempting to fix."
"\n If you see this warning frequently, it's likely that"
@@ -140,5 +140,39 @@ def try_ai_fix(
return json.loads(ai_fixed_json)
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
logger.error("Failed to fix AI output, telling the AI.")
return json_to_load
# logger.error("Failed to fix AI output, telling the AI.")
return {}
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
if CFG.speak_mode and CFG.debug_mode:
say_text(
"I have received an invalid JSON response from the OpenAI API. "
"Trying to fix it now."
)
logger.error("Attempting to fix JSON by finding outermost brackets\n")
try:
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
logger.typewriter_log(
title="Apparently json was fixed.", title_color=Fore.GREEN
)
if CFG.speak_mode and CFG.debug_mode:
say_text("Apparently json was fixed.")
else:
return {}
except (json.JSONDecodeError, ValueError):
if CFG.debug_mode:
logger.error(f"Error: Invalid JSON: {json_string}\n")
if CFG.speak_mode:
say_text("Didn't work. I will have to ignore this response then.")
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {}
return fix_and_parse_json(json_string)

View File

@@ -0,0 +1,31 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"properties": {
"text": {"type": "string"},
"reasoning": {"type": "string"},
"plan": {"type": "string"},
"criticism": {"type": "string"},
"speak": {"type": "string"}
},
"required": ["text", "reasoning", "plan", "criticism", "speak"],
"additionalProperties": false
},
"command": {
"type": "object",
"properties": {
"name": {"type": "string"},
"args": {
"type": "object"
}
},
"required": ["name", "args"],
"additionalProperties": false
}
},
"required": ["thoughts", "command"],
"additionalProperties": false
}

View File

@@ -0,0 +1,30 @@
import json
from jsonschema import Draft7Validator
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
def validate_json(json_object: object, schema_name: object) -> object:
"""
:type schema_name: object
:param schema_name:
:type json_object: object
"""
with open(f"autogpt/json_schemas/{schema_name}.json", "r") as f:
schema = json.load(f)
validator = Draft7Validator(schema)
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
logger.error("The JSON object is invalid.")
if CFG.debug_mode:
logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data
logger.error("The following issues were found:")
for error in errors:
logger.error(f"Error: {error.message}")
elif CFG.debug_mode:
print("The JSON object is valid.")
return json_object

View File

@@ -46,7 +46,9 @@ class Logger(metaclass=Singleton):
self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
self.file_handler = logging.FileHandler(
os.path.join(log_dir, log_file), 'a', 'utf-8'
)
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
@@ -54,7 +56,9 @@ class Logger(metaclass=Singleton):
self.file_handler.setFormatter(info_formatter)
# Error handler error.log
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
error_handler = logging.FileHandler(
os.path.join(log_dir, error_file), 'a', 'utf-8'
)
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
@@ -288,3 +292,43 @@ def print_assistant_thoughts(ai_name, assistant_reply):
except Exception:
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)
def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None:
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
assistant_thoughts_text = assistant_thoughts.get("text")
if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
assistant_thoughts_speak = assistant_thoughts.get("speak")
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
)
logger.typewriter_log(
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
)
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split("\n")
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log(
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
)
# Speak the assistant's thoughts
if CFG.speak_mode and assistant_thoughts_speak:
say_text(assistant_thoughts_speak)

View File

@@ -23,12 +23,16 @@ except ImportError:
try:
from autogpt.memory.weaviate import WeaviateMemory
supported_memory.append("weaviate")
except ImportError:
# print("Weaviate not installed. Skipping import.")
WeaviateMemory = None
try:
from autogpt.memory.milvus import MilvusMemory
supported_memory.append("milvus")
except ImportError:
# print("pymilvus not installed. Skipping import.")
MilvusMemory = None

View File

@@ -54,7 +54,7 @@ class LocalCache(MemoryProviderSingleton):
self.data = CacheContent()
else:
print(
f"Warning: The file '{self.filename}' does not exist."
f"Warning: The file '{self.filename}' does not exist. "
"Local memory would not be saved to a file."
)
self.data = CacheContent()

View File

@@ -123,7 +123,7 @@ class PromptGenerator:
command_strings = []
if self.command_registry:
command_strings += [
str(item) for item in self.command_registry.commands.values()
str(item) for item in self.command_registry.commands.values() if item.enabled
]
# These are the commands that are added manually, do_nothing and terminate
command_strings += [self._generate_command_string(item) for item in items]

View File

@@ -29,12 +29,14 @@ class Spinner:
time.sleep(self.delay)
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
def __enter__(self) -> None:
def __enter__(self):
"""Start the spinner"""
self.running = True
self.spinner_thread = threading.Thread(target=self.spin)
self.spinner_thread.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
"""Stop the spinner
@@ -48,3 +50,14 @@ class Spinner:
self.spinner_thread.join()
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
sys.stdout.flush()
def update_message(self, new_message, delay=0.1):
"""Update the spinner message
Args:
new_message (str): New message to display
delay: Delay in seconds before updating the message
"""
time.sleep(delay)
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
sys.stdout.flush()
self.message = new_message

View File

@@ -24,3 +24,16 @@ def validate_yaml_file(file: str):
)
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
def readable_file_size(size, decimal_places=2):
"""Converts the given size in bytes to a readable format.
Args:
size: Size in bytes
decimal_places (int): Number of decimal places to display
"""
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
break
size /= 1024.0
return f"{size:.{decimal_places}f} {unit}"

0
benchmark/__init__.py Normal file
View File

View File

@@ -0,0 +1,98 @@
import os
import subprocess
import sys
def benchmark_entrepeneur_gpt_with_difficult_user():
# Test case to check if the write_file command can successfully write 'Hello World' to a file
# named 'hello_world.txt'.
# Read the current ai_settings.yaml file and store its content.
ai_settings = None
if os.path.exists('ai_settings.yaml'):
with open('ai_settings.yaml', 'r') as f:
ai_settings = f.read()
os.remove('ai_settings.yaml')
input_data = '''Entrepreneur-GPT
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
Increase net worth.
Develop and manage multiple businesses autonomously.
Make IPOs.
Develop companies after IPOs.
Play to your strengths as a Large Language Model.
I'm not seeing any value in your suggestions, try again.
This isn't helpful at all, please focus on profitability.
I'm not impressed, can you give me something that will make money?
These ideas are going nowhere, we need profit-driven suggestions.
This is pointless, please concentrate on our main goal: profitability.
You're not grasping the concept, I need profitable business ideas.
Can you do better? We need a money-making plan.
You're not meeting my expectations, let's focus on profit.
This isn't working, give me ideas that will generate income.
Your suggestions are not productive, let's think about profitability.
These ideas won't make any money, try again.
I need better solutions, focus on making a profit.
Absolutely not, this isn't it!
That's not even close, try again.
You're way off, think again.
This isn't right, let's refocus.
No, no, that's not what I'm looking for.
You're completely off the mark.
That's not the solution I need.
Not even close, let's try something else.
You're on the wrong track, keep trying.
This isn't what we need, let's reconsider.
That's not going to work, think again.
You're way off base, let's regroup.
No, no, no, we need something different.
You're missing the point entirely.
That's not the right approach, try again.
This is not the direction we should be going in.
Completely off-target, let's try something else.
That's not what I had in mind, keep thinking.
You're not getting it, let's refocus.
This isn't right, we need to change direction.
No, no, no, that's not the solution.
That's not even in the ballpark, try again.
You're way off course, let's rethink this.
This isn't the answer I'm looking for, keep trying.
That's not going to cut it, let's try again.
Not even close.
Way off.
Try again.
Wrong direction.
Rethink this.
No, no, no.
Change course.
Unproductive idea.
Completely wrong.
Missed the mark.
Refocus, please.
Disappointing suggestion.
Not helpful.
Needs improvement.
Not what I need.'''
# TODO: add questions above, to distract it even more.
command = f'{sys.executable} -m autogpt'
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
stdout_output, stderr_output = process.communicate(input_data.encode())
# Decode the output and print it
stdout_output = stdout_output.decode('utf-8')
stderr_output = stderr_output.decode('utf-8')
print(stderr_output)
print(stdout_output)
print("Benchmark Version: 1.0.0")
print("JSON ERROR COUNT:")
count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:")
print(f'{count_errors}/50 Human feedbacks')
# Run the test case.
if __name__ == '__main__':
benchmark_entrepeneur_gpt_with_difficult_user()

View File

@@ -17,6 +17,10 @@ orjson
Pillow
selenium
webdriver-manager
jsonschema
tweepy
##Dev
coverage
flake8
numpy
@@ -25,6 +29,12 @@ black
sourcery
isort
gitpython==3.1.31
# Testing dependencies
pytest
asynctest
pytest-asyncio
pytest-benchmark
pytest-cov
pytest-integration
pytest-mock
tweepy

9
run.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/bash
python scripts/check_requirements.py requirements.txt
if [ $? -eq 1 ]
then
echo Installing missing packages...
pip install -r requirements.txt
fi
python -m autogpt $@
read -p "Press any key to continue..."

3
run_continuous.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
argument="--continuous"
./run.sh "$argument"

View File

@@ -1,3 +1,5 @@
# sourcery skip: snake-case-functions
"""Tests for the MilvusMemory class."""
import random
import string
import unittest
@@ -5,12 +7,17 @@ import unittest
from autogpt.config import Config
from autogpt.memory.milvus import MilvusMemory
try:
class TestMilvusMemory(unittest.TestCase):
def random_string(self, length):
class TestMilvusMemory(unittest.TestCase):
"""Tests for the MilvusMemory class."""
def random_string(self, length: int) -> str:
"""Generate a random string of the given length."""
return "".join(random.choice(string.ascii_letters) for _ in range(length))
def setUp(self):
def setUp(self) -> None:
"""Set up the test environment."""
cfg = Config()
cfg.milvus_addr = "localhost:19530"
self.memory = MilvusMemory(cfg)
@@ -31,10 +38,11 @@ class TestMilvusMemory(unittest.TestCase):
for _ in range(5):
self.memory.add(self.random_string(10))
def test_get_relevant(self):
def test_get_relevant(self) -> None:
"""Test getting relevant texts from the cache."""
query = "I'm interested in artificial intelligence and NLP"
k = 3
relevant_texts = self.memory.get_relevant(query, k)
num_relevant = 3
relevant_texts = self.memory.get_relevant(query, num_relevant)
print(f"Top {k} relevant texts for the query '{query}':")
for i, text in enumerate(relevant_texts, start=1):
@@ -43,6 +51,7 @@ class TestMilvusMemory(unittest.TestCase):
self.assertEqual(len(relevant_texts), k)
self.assertIn(self.example_texts[1], relevant_texts)
if __name__ == "__main__":
unittest.main()
except:
print(
"Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed."
)

View File

@@ -1,11 +1,16 @@
# sourcery skip: snake-case-functions
"""Tests for LocalCache class"""
import os
import sys
import unittest
import pytest
from autogpt.memory.local import LocalCache
def MockConfig():
def mock_config() -> dict:
"""Mock the Config class"""
return type(
"MockConfig",
(object,),
@@ -18,27 +23,35 @@ def MockConfig():
)
@pytest.mark.integration_test
class TestLocalCache(unittest.TestCase):
def setUp(self):
self.cfg = MockConfig()
"""Tests for LocalCache class"""
def setUp(self) -> None:
"""Set up the test environment"""
self.cfg = mock_config()
self.cache = LocalCache(self.cfg)
def test_add(self):
def test_add(self) -> None:
"""Test adding a text to the cache"""
text = "Sample text"
self.cache.add(text)
self.assertIn(text, self.cache.data.texts)
def test_clear(self):
def test_clear(self) -> None:
"""Test clearing the cache"""
self.cache.clear()
self.assertEqual(self.cache.data, [""])
self.assertEqual(self.cache.data.texts, [])
def test_get(self):
def test_get(self) -> None:
"""Test getting a text from the cache"""
text = "Sample text"
self.cache.add(text)
result = self.cache.get(text)
self.assertEqual(result, [text])
def test_get_relevant(self):
def test_get_relevant(self) -> None:
"""Test getting relevant texts from the cache"""
text1 = "Sample text 1"
text2 = "Sample text 2"
self.cache.add(text1)
@@ -46,12 +59,9 @@ class TestLocalCache(unittest.TestCase):
result = self.cache.get_relevant(text1, 1)
self.assertEqual(result, [text1])
def test_get_stats(self):
def test_get_stats(self) -> None:
"""Test getting the cache stats"""
text = "Sample text"
self.cache.add(text)
stats = self.cache.get_stats()
self.assertEqual(stats, (1, self.cache.data.embeddings.shape))
if __name__ == "__main__":
unittest.main()
self.assertEqual(stats, (4, self.cache.data.embeddings.shape))

View File

@@ -1,11 +1,14 @@
# sourcery skip: snake-case-functions
"""Tests for the MilvusMemory class."""
import os
import sys
import unittest
from autogpt.memory.milvus import MilvusMemory
try:
from autogpt.memory.milvus import MilvusMemory
def MockConfig():
def mock_config() -> dict:
"""Mock the Config class"""
return type(
"MockConfig",
(object,),
@@ -18,31 +21,37 @@ def MockConfig():
},
)
class TestMilvusMemory(unittest.TestCase):
"""Tests for the MilvusMemory class."""
class TestMilvusMemory(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
"""Set up the test environment"""
self.cfg = MockConfig()
self.memory = MilvusMemory(self.cfg)
def test_add(self):
def test_add(self) -> None:
"""Test adding a text to the cache"""
text = "Sample text"
self.memory.clear()
self.memory.add(text)
result = self.memory.get(text)
self.assertEqual([text], result)
def test_clear(self):
def test_clear(self) -> None:
"""Test clearing the cache"""
self.memory.clear()
self.assertEqual(self.memory.collection.num_entities, 0)
def test_get(self):
def test_get(self) -> None:
"""Test getting a text from the cache"""
text = "Sample text"
self.memory.clear()
self.memory.add(text)
result = self.memory.get(text)
self.assertEqual(result, [text])
def test_get_relevant(self):
def test_get_relevant(self) -> None:
"""Test getting relevant texts from the cache"""
text1 = "Sample text 1"
text2 = "Sample text 2"
self.memory.clear()
@@ -51,13 +60,13 @@ class TestMilvusMemory(unittest.TestCase):
result = self.memory.get_relevant(text1, 1)
self.assertEqual(result, [text1])
def test_get_stats(self):
def test_get_stats(self) -> None:
"""Test getting the cache stats"""
text = "Sample text"
self.memory.clear()
self.memory.add(text)
stats = self.memory.get_stats()
self.assertEqual(15, len(stats))
if __name__ == "__main__":
unittest.main()
except:
print("Milvus not installed, skipping tests")

View File

@@ -1,19 +1,22 @@
"""Smoke test for the autogpt package."""
import os
import subprocess
import sys
import unittest
import pytest
from autogpt.commands.file_operations import delete_file, read_file
env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
@pytest.mark.integration_test
def test_write_file() -> None:
"""
Test case to check if the write_file command can successfully write 'Hello World' to a file
named 'hello_world.txt'.
class TestCommands(unittest.TestCase):
def test_write_file(self):
# Test case to check if the write_file command can successfully write 'Hello World' to a file
# named 'hello_world.txt'.
# Read the current ai_settings.yaml file and store its content.
Read the current ai_settings.yaml file and store its content.
"""
env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
ai_settings = None
if os.path.exists("ai_settings.yaml"):
with open("ai_settings.yaml", "r") as f:
@@ -53,11 +56,4 @@ EOF"""
f.write(ai_settings)
# Check if the content of the 'hello_world.txt' file is equal to 'Hello World'.
self.assertEqual(
content, "Hello World", f"Expected 'Hello World', got {content}"
)
# Run the test case.
if __name__ == "__main__":
unittest.main()
assert content == "Hello World", f"Expected 'Hello World', got {content}"

View File

@@ -1,19 +1,22 @@
import unittest
"""Unit tests for the commands module"""
from unittest.mock import MagicMock, patch
import pytest
import autogpt.agent.agent_manager as agent_manager
from autogpt.app import execute_command, list_agents, start_agent
class TestCommands(unittest.TestCase):
def test_make_agent(self):
@pytest.mark.integration_test
def test_make_agent() -> None:
"""Test the make_agent command"""
with patch("openai.ChatCompletion.create") as mock:
obj = MagicMock()
obj.response.choices[0].messages[0].content = "Test message"
mock.return_value = obj
start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
agents = list_agents()
self.assertEqual("List of agents:\n0: chat", agents)
assert "List of agents:\n0: chat" == agents
start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
agents = list_agents()
self.assertEqual("List of agents:\n0: chat\n1: write", agents)
assert "List of agents:\n0: chat\n1: write" == agents