Finish integrating command registry

This commit is contained in:
BillSchumacher
2023-04-16 21:51:36 -05:00
parent 167628c696
commit c110f3489d
50 changed files with 238 additions and 234 deletions

View File

@@ -2,17 +2,17 @@
import logging import logging
import os import os
from pathlib import Path from pathlib import Path
from colorama import Fore from colorama import Fore
from autogpt.agent.agent import Agent from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments from autogpt.args import parse_arguments
from autogpt.commands.command import CommandRegistry from autogpt.commands.command import CommandRegistry
from autogpt.config import Config, check_openai_api_key from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory import get_memory from autogpt.memory import get_memory
from autogpt.prompts.prompt import construct_main_ai_config
from autogpt.plugins import load_plugins from autogpt.plugins import load_plugins
from autogpt.prompts.prompt import construct_main_ai_config
# Load environment variables from .env file # Load environment variables from .env file
@@ -47,13 +47,20 @@ def main() -> None:
cfg.set_plugins(loaded_plugins) cfg.set_plugins(loaded_plugins)
# Create a CommandRegistry instance and scan default folder # Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry() command_registry = CommandRegistry()
command_registry.import_commands("scripts.ai_functions") command_registry.import_commands("autogpt.commands.audio_text")
command_registry.import_commands("scripts.commands") command_registry.import_commands("autogpt.commands.evaluate_code")
command_registry.import_commands("scripts.execute_code") command_registry.import_commands("autogpt.commands.execute_code")
command_registry.import_commands("scripts.agent_manager") command_registry.import_commands("autogpt.commands.file_operations")
command_registry.import_commands("scripts.file_operations") command_registry.import_commands("autogpt.commands.git_operations")
command_registry.import_commands("autogpt.commands.google_search")
command_registry.import_commands("autogpt.commands.image_gen")
command_registry.import_commands("autogpt.commands.twitter")
command_registry.import_commands("autogpt.commands.web_selenium")
command_registry.import_commands("autogpt.commands.write_tests")
command_registry.import_commands("autogpt.app")
ai_name = "" ai_name = ""
ai_config = construct_main_ai_config() ai_config = construct_main_ai_config()
ai_config.command_registry = command_registry
# print(prompt) # print(prompt)
# Initialize variables # Initialize variables
full_message_history = [] full_message_history = []
@@ -70,6 +77,9 @@ def main() -> None:
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
) )
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser) logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
prompt = ai_config.construct_full_prompt()
if cfg.debug_mode:
logger.typewriter_log("Prompt:", Fore.GREEN, prompt)
agent = Agent( agent = Agent(
ai_name=ai_name, ai_name=ai_name,
memory=memory, memory=memory,
@@ -77,7 +87,7 @@ def main() -> None:
next_action_count=next_action_count, next_action_count=next_action_count,
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,
prompt=ai_config.construct_full_prompt(), prompt=prompt,
user_input=user_input, user_input=user_input,
) )
agent.start_interaction_loop() agent.start_interaction_loop()

View File

@@ -1,6 +1,6 @@
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config from autogpt.config import Config
from autogpt.json_fixes.bracket_termination import ( from autogpt.json_fixes.bracket_termination import (

View File

@@ -1,8 +1,8 @@
"""Agent manager for managing GPT agents""" """Agent manager for managing GPT agents"""
from __future__ import annotations from __future__ import annotations
from autogpt.config.config import Config, Singleton
from autogpt.llm_utils import create_chat_completion from autogpt.llm_utils import create_chat_completion
from autogpt.config.config import Singleton, Config
class AgentManager(metaclass=Singleton): class AgentManager(metaclass=Singleton):

View File

@@ -1,16 +1,11 @@
""" Command and Control """ """ Command and Control """
import json import json
from typing import List, NoReturn, Union from typing import List, NoReturn, Union
from autogpt.agent.agent_manager import AgentManager from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.command import command, CommandRegistry
from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.google_search import google_official_search, google_search
from autogpt.commands.improve_code import improve_code
from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
from autogpt.commands.image_gen import generate_image
from autogpt.commands.audio_text import read_audio_from_file from autogpt.commands.audio_text import read_audio_from_file
from autogpt.commands.web_requests import scrape_links, scrape_text from autogpt.commands.command import CommandRegistry, command
from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.execute_code import execute_python_file, execute_shell from autogpt.commands.execute_code import execute_python_file, execute_shell
from autogpt.commands.file_operations import ( from autogpt.commands.file_operations import (
append_to_file, append_to_file,
@@ -19,15 +14,20 @@ from autogpt.commands.file_operations import (
search_files, search_files,
write_to_file, write_to_file,
) )
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.google_search import google_official_search, google_search
from autogpt.commands.image_gen import generate_image
from autogpt.commands.improve_code import improve_code
from autogpt.commands.twitter import send_tweet
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.commands.web_selenium import browse_website
from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
from autogpt.json_fixes.parsing import fix_and_parse_json from autogpt.json_fixes.parsing import fix_and_parse_json
from autogpt.memory import get_memory from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text from autogpt.processing.text import summarize_text
from autogpt.prompts.generator import PromptGenerator from autogpt.prompts.generator import PromptGenerator
from autogpt.speech import say_text from autogpt.speech import say_text
from autogpt.commands.web_selenium import browse_website
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.twitter import send_tweet
CFG = Config() CFG = Config()
AGENT_MANAGER = AgentManager() AGENT_MANAGER = AgentManager()
@@ -132,76 +132,16 @@ def execute_command(
# TODO: Remove commands below after they are moved to the command registry. # TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name) command_name = map_command_synonyms(command_name)
if command_name == "google":
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial
# search method
key = CFG.google_api_key
if key and key.strip() and key != "your-google-api-key":
google_result = google_official_search(arguments["input"])
return google_result
else:
google_result = google_search(arguments["input"])
# google_result can be a list or a string depending on the search results if command_name == "memory_add":
if isinstance(google_result, list):
safe_message = [
google_result_single.encode("utf-8", "ignore")
for google_result_single in google_result
]
else:
safe_message = google_result.encode("utf-8", "ignore")
return str(safe_message)
elif command_name == "memory_add":
return memory.add(arguments["string"]) return memory.add(arguments["string"])
elif command_name == "start_agent":
return start_agent(
arguments["name"], arguments["task"], arguments["prompt"]
)
elif command_name == "message_agent":
return message_agent(arguments["key"], arguments["message"])
elif command_name == "list_agents":
return list_agents()
elif command_name == "delete_agent":
return delete_agent(arguments["key"])
elif command_name == "get_text_summary": elif command_name == "get_text_summary":
return get_text_summary(arguments["url"], arguments["question"]) return get_text_summary(arguments["url"], arguments["question"])
elif command_name == "get_hyperlinks": elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"]) return get_hyperlinks(arguments["url"])
elif command_name == "clone_repository":
return clone_repository(
arguments["repository_url"], arguments["clone_path"]
)
elif command_name == "read_file":
return read_file(arguments["file"])
elif command_name == "write_to_file":
return write_to_file(arguments["file"], arguments["text"])
elif command_name == "append_to_file":
return append_to_file(arguments["file"], arguments["text"])
elif command_name == "delete_file":
return delete_file(arguments["file"])
elif command_name == "search_files":
return search_files(arguments["directory"])
elif command_name == "browse_website":
return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if # TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python # non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again" # filepath, write your code to file and try again
elif command_name == "evaluate_code":
return evaluate_code(arguments["code"])
elif command_name == "improve_code":
return improve_code(arguments["suggestions"], arguments["code"])
elif command_name == "write_tests":
return write_tests(arguments["code"], arguments.get("focus"))
elif command_name == "execute_python_file": # Add this command
return execute_python_file(arguments["file"])
elif command_name == "read_audio_from_file":
return read_audio_from_file(arguments["file"])
elif command_name == "generate_image":
return generate_image(arguments["prompt"])
elif command_name == "send_tweet":
return send_tweet(arguments["text"])
elif command_name == "do_nothing": elif command_name == "do_nothing":
return "No action performed." return "No action performed."
elif command_name == "task_complete": elif command_name == "task_complete":
@@ -305,7 +245,7 @@ def message_agent(key: str, message: str) -> str:
@command("list_agents", "List GPT Agents", "") @command("list_agents", "List GPT Agents", "")
def list_agents(): def list_agents() -> str:
"""List all agents """List all agents
Returns: Returns:

View File

@@ -2,6 +2,7 @@
import argparse import argparse
from colorama import Fore from colorama import Fore
from autogpt import utils from autogpt import utils
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger from autogpt.logs import logger

View File

@@ -1,23 +1,51 @@
import requests """Commands for converting audio to text."""
import json import json
import requests
from autogpt.commands.command import command
from autogpt.config import Config from autogpt.config import Config
from autogpt.workspace import path_in_workspace from autogpt.workspace import path_in_workspace
cfg = Config() CFG = Config()
def read_audio_from_file(audio_path): @command(
"read_audio_from_file",
"Convert Audio to text",
'"file": "<file>"',
CFG.huggingface_audio_to_text_model,
"Configure huggingface_audio_to_text_model.",
)
def read_audio_from_file(audio_path: str) -> str:
"""
Convert audio to text.
Args:
audio_path (str): The path to the audio file
Returns:
str: The text from the audio
"""
audio_path = path_in_workspace(audio_path) audio_path = path_in_workspace(audio_path)
with open(audio_path, "rb") as audio_file: with open(audio_path, "rb") as audio_file:
audio = audio_file.read() audio = audio_file.read()
return read_audio(audio) return read_audio(audio)
def read_audio(audio): def read_audio(audio: bytes) -> str:
model = cfg.huggingface_audio_to_text_model """
Convert audio to text.
Args:
audio (bytes): The audio to convert
Returns:
str: The text from the audio
"""
model = CFG.huggingface_audio_to_text_model
api_url = f"https://api-inference.huggingface.co/models/{model}" api_url = f"https://api-inference.huggingface.co/models/{model}"
api_token = cfg.huggingface_api_token api_token = CFG.huggingface_api_token
headers = {"Authorization": f"Bearer {api_token}"} headers = {"Authorization": f"Bearer {api_token}"}
if api_token is None: if api_token is None:
@@ -32,4 +60,4 @@ def read_audio(audio):
) )
text = json.loads(response.content.decode("utf-8"))["text"] text = json.loads(response.content.decode("utf-8"))["text"]
return "The audio says: " + text return f"The audio says: {text}"

View File

@@ -1,8 +1,8 @@
import os
import sys
import importlib import importlib
import inspect import inspect
from typing import Callable, Any, List, Optional import os
import sys
from typing import Any, Callable, List, Optional
# Unique identifier for auto-gpt commands # Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command" AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"

View File

@@ -4,9 +4,10 @@ import subprocess
import docker import docker
from docker.errors import ImageNotFound from docker.errors import ImageNotFound
from autogpt.config import Config
from autogpt.commands.command import command from autogpt.commands.command import command
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH from autogpt.config import Config
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
CFG = Config() CFG = Config()

View File

@@ -5,8 +5,9 @@ import os
import os.path import os.path
from pathlib import Path from pathlib import Path
from typing import Generator from typing import Generator
from autogpt.commands.command import command from autogpt.commands.command import command
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
LOG_FILE = "file_logger.txt" LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE

View File

@@ -1,10 +1,19 @@
"""Git operations for autogpt""" """Git operations for autogpt"""
import git from git.repo import Repo
from autogpt.commands.command import command
from autogpt.config import Config from autogpt.config import Config
CFG = Config() CFG = Config()
@command(
"clone_repository",
"Clone Repositoryy",
'"repository_url": "<url>", "clone_path": "<directory>"',
CFG.github_username and CFG.github_api_key,
"Configure github_username and github_api_key.",
)
def clone_repository(repo_url: str, clone_path: str) -> str: def clone_repository(repo_url: str, clone_path: str) -> str:
"""Clone a github repository locally """Clone a github repository locally
@@ -17,7 +26,7 @@ def clone_repository(repo_url: str, clone_path: str) -> str:
split_url = repo_url.split("//") split_url = repo_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
try: try:
git.Repo.clone_from(auth_repo_url, clone_path) Repo.clone_from(auth_repo_url, clone_path)
return f"""Cloned {repo_url} to {clone_path}""" return f"""Cloned {repo_url} to {clone_path}"""
except Exception as e: except Exception as e:
return f"Error: {str(e)}" return f"Error: {str(e)}"

View File

@@ -5,11 +5,13 @@ import json
from duckduckgo_search import ddg from duckduckgo_search import ddg
from autogpt.commands.command import command
from autogpt.config import Config from autogpt.config import Config
CFG = Config() CFG = Config()
@command("google", "Google Search", '"input": "<search>"', not CFG.google_api_key)
def google_search(query: str, num_results: int = 8) -> str: def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a google search """Return the results of a google search
@@ -31,9 +33,17 @@ def google_search(query: str, num_results: int = 8) -> str:
for j in results: for j in results:
search_results.append(j) search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4) results = json.dumps(search_results, ensure_ascii=False, indent=4)
return safe_google_results(results)
@command(
"google",
"Google Search",
'"input": "<search>"',
bool(CFG.google_api_key),
"Configure google_api_key.",
)
def google_official_search(query: str, num_results: int = 8) -> str | list[str]: def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
"""Return the results of a google search using the official Google API """Return the results of a google search using the official Google API
@@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
return "Error: The provided Google API key is invalid or missing." return "Error: The provided Google API key is invalid or missing."
else: else:
return f"Error: {e}" return f"Error: {e}"
# google_result can be a list or a string depending on the search results
# Return the list of search result URLs # Return the list of search result URLs
return search_results_links return safe_google_results(search_results_links)
def safe_google_results(results: str | list) -> str:
"""
Return the results of a google search in a safe format.
Args:
results (str | list): The search results.
Returns:
str: The results of the search.
"""
if isinstance(results, list):
safe_message = json.dumps(
[result.enocde("utf-8", "ignore") for result in results]
)
else:
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
return safe_message

View File

@@ -1,12 +1,12 @@
""" Image Generation Module for AutoGPT.""" """ Image Generation Module for AutoGPT."""
import io import io
import os.path
import uuid import uuid
from base64 import b64decode from base64 import b64decode
import openai import openai
import requests import requests
from PIL import Image from PIL import Image
from autogpt.commands.command import command from autogpt.commands.command import command
from autogpt.config import Config from autogpt.config import Config
from autogpt.workspace import path_in_workspace from autogpt.workspace import path_in_workspace

View File

@@ -2,7 +2,7 @@ from __future__ import annotations
import json import json
from autogpt.commands import command from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function from autogpt.llm_utils import call_ai_function

View File

@@ -1,11 +1,30 @@
import tweepy """A module that contains a command to send a tweet."""
import os import os
import tweepy
from dotenv import load_dotenv from dotenv import load_dotenv
from autogpt.commands.command import command
load_dotenv() load_dotenv()
def send_tweet(tweet_text): @command(
"send_tweet",
"Send Tweet",
'"text": "<text>"',
)
def send_tweet(tweet_text: str) -> str:
"""
A function that takes in a string and returns a response from create chat
completion api call.
Args:
tweet_text (str): Text to be tweeted.
Returns:
A result from sending the tweet.
"""
consumer_key = os.environ.get("TW_CONSUMER_KEY") consumer_key = os.environ.get("TW_CONSUMER_KEY")
consumer_secret = os.environ.get("TW_CONSUMER_SECRET") consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
access_token = os.environ.get("TW_ACCESS_TOKEN") access_token = os.environ.get("TW_ACCESS_TOKEN")
@@ -20,6 +39,6 @@ def send_tweet(tweet_text):
# Send tweet # Send tweet
try: try:
api.update_status(tweet_text) api.update_status(tweet_text)
print("Tweet sent successfully!") return "Tweet sent successfully!"
except tweepy.TweepyException as e: except tweepy.TweepyException as e:
print("Error sending tweet: {}".format(e.reason)) return f"Error sending tweet: {e.reason}"

View File

@@ -8,6 +8,7 @@ except ImportError:
"Playwright not installed. Please install it with 'pip install playwright' to use." "Playwright not installed. Please install it with 'pip install playwright' to use."
) )
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks from autogpt.processing.html import extract_hyperlinks, format_hyperlinks

View File

@@ -4,9 +4,9 @@ from __future__ import annotations
from urllib.parse import urljoin, urlparse from urllib.parse import urljoin, urlparse
import requests import requests
from requests.compat import urljoin
from requests import Response
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from requests import Response
from requests.compat import urljoin
from autogpt.config import Config from autogpt.config import Config
from autogpt.memory import get_memory from autogpt.memory import get_memory

View File

@@ -1,22 +1,25 @@
"""Selenium web scraping module.""" """Selenium web scraping module."""
from __future__ import annotations from __future__ import annotations
from selenium import webdriver
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
import autogpt.processing.text as summary
from bs4 import BeautifulSoup
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.safari.options import Options as SafariOptions
import logging import logging
from pathlib import Path from pathlib import Path
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from autogpt.commands.command import command
import autogpt.processing.text as summary
from autogpt.config import Config from autogpt.config import Config
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
FILE_DIR = Path(__file__).parent.parent FILE_DIR = Path(__file__).parent.parent
CFG = Config() CFG = Config()

View File

@@ -2,7 +2,8 @@
from __future__ import annotations from __future__ import annotations
import json import json
from autogpt.commands import command
from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function from autogpt.llm_utils import call_ai_function

View File

@@ -2,7 +2,7 @@
This module contains the configuration classes for AutoGPT. This module contains the configuration classes for AutoGPT.
""" """
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.config.config import check_openai_api_key, Config from autogpt.config.config import Config, check_openai_api_key
from autogpt.config.singleton import AbstractSingleton, Singleton from autogpt.config.singleton import AbstractSingleton, Singleton
__all__ = [ __all__ = [

View File

@@ -6,7 +6,8 @@ from __future__ import annotations
import os import os
from pathlib import Path from pathlib import Path
from typing import Type from typing import Optional, Type
import yaml import yaml
from autogpt.prompts.generator import PromptGenerator from autogpt.prompts.generator import PromptGenerator
@@ -41,6 +42,7 @@ class AIConfig:
self.ai_role = ai_role self.ai_role = ai_role
self.ai_goals = ai_goals self.ai_goals = ai_goals
self.prompt_generator = None self.prompt_generator = None
self.command_registry = None
# Soon this will go in a folder where it remembers more stuff about the run(s) # Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = Path(os.getcwd()) / "ai_settings.yaml" SAVE_FILE = Path(os.getcwd()) / "ai_settings.yaml"
@@ -113,8 +115,8 @@ class AIConfig:
"" ""
) )
from autogpt.prompts.prompt import build_default_prompt_generator
from autogpt.config import Config from autogpt.config import Config
from autogpt.prompts.prompt import build_default_prompt_generator
cfg = Config() cfg = Config()
if prompt_generator is None: if prompt_generator is None:
@@ -122,6 +124,7 @@ class AIConfig:
prompt_generator.goals = self.ai_goals prompt_generator.goals = self.ai_goals
prompt_generator.name = self.ai_name prompt_generator.name = self.ai_name
prompt_generator.role = self.ai_role prompt_generator.role = self.ai_role
prompt_generator.command_registry = self.command_registry
for plugin in cfg.plugins: for plugin in cfg.plugins:
prompt_generator = plugin.post_prompt(prompt_generator) prompt_generator = plugin.post_prompt(prompt_generator)

View File

@@ -1,14 +1,13 @@
"""Configuration class to store the state of bools for different scripts access.""" """Configuration class to store the state of bools for different scripts access."""
import os import os
from colorama import Fore
from autogpt.config.singleton import Singleton
import openai import openai
import yaml import yaml
from colorama import Fore
from dotenv import load_dotenv from dotenv import load_dotenv
from autogpt.config.singleton import Singleton
load_dotenv(verbose=True) load_dotenv(verbose=True)

View File

@@ -1,8 +1,8 @@
import argparse import argparse
import logging import logging
from autogpt.config import Config
from autogpt.commands.file_operations import ingest_file, search_files from autogpt.commands.file_operations import ingest_file, search_files
from autogpt.config import Config
from autogpt.memory import get_memory from autogpt.memory import get_memory
cfg = Config() cfg = Config()

View File

@@ -1,9 +1,9 @@
"""This module contains the function to fix JSON strings using GPT-3.""" """This module contains the function to fix JSON strings using GPT-3."""
import json import json
from autogpt.config import Config
from autogpt.llm_utils import call_ai_function from autogpt.llm_utils import call_ai_function
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.config import Config
CFG = Config() CFG = Config()

View File

@@ -3,11 +3,12 @@ from __future__ import annotations
import contextlib import contextlib
import json import json
import regex import regex
from colorama import Fore from colorama import Fore
from autogpt.logs import logger
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger
from autogpt.speech import say_text from autogpt.speech import say_text
CFG = Config() CFG = Config()

View File

@@ -1,11 +1,11 @@
from __future__ import annotations from __future__ import annotations
from ast import List
import time import time
from ast import List
import openai import openai
from openai.error import APIError, RateLimitError
from colorama import Fore from colorama import Fore
from openai.error import APIError, RateLimitError
from autogpt.config import Config from autogpt.config import Config

View File

@@ -5,13 +5,13 @@ import os
import random import random
import re import re
import time import time
from logging import LogRecord
import traceback import traceback
from logging import LogRecord
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.speech import say_text
from autogpt.config import Config, Singleton from autogpt.config import Config, Singleton
from autogpt.speech import say_text
CFG = Config() CFG = Config()

View File

@@ -7,8 +7,8 @@ from typing import Any
import numpy as np import numpy as np
import orjson import orjson
from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada from autogpt.llm_utils import create_embedding_with_ada
from autogpt.memory.base import MemoryProviderSingleton
EMBED_DIM = 1536 EMBED_DIM = 1536
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS

View File

@@ -1,11 +1,5 @@
""" Milvus memory storage provider.""" """ Milvus memory storage provider."""
from pymilvus import ( from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
connections,
FieldSchema,
CollectionSchema,
DataType,
Collection,
)
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding

View File

@@ -1,9 +1,9 @@
import pinecone import pinecone
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
class PineconeMemory(MemoryProviderSingleton): class PineconeMemory(MemoryProviderSingleton):

View File

@@ -10,9 +10,9 @@ from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query from redis.commands.search.query import Query
from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
SCHEMA = [ SCHEMA = [
TextField("data"), TextField("data"),

View File

@@ -1,11 +1,13 @@
from autogpt.config import Config
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
import uuid import uuid
import weaviate import weaviate
from weaviate import Client from weaviate import Client
from weaviate.embedded import EmbeddedOptions from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5 from weaviate.util import generate_uuid5
from autogpt.config import Config
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
def default_schema(weaviate_index): def default_schema(weaviate_index):
return { return {

View File

@@ -1,10 +1,10 @@
"""Handles loading of plugins.""" """Handles loading of plugins."""
from ast import Module
import zipfile import zipfile
from ast import Module
from pathlib import Path from pathlib import Path
from zipimport import zipimporter
from typing import List, Optional, Tuple from typing import List, Optional, Tuple
from zipimport import zipimporter
def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]: def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]:

View File

@@ -1,8 +1,8 @@
"""HTML processing functions""" """HTML processing functions"""
from __future__ import annotations from __future__ import annotations
from requests.compat import urljoin
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from requests.compat import urljoin
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:

View File

@@ -1,9 +1,11 @@
"""Text processing functions""" """Text processing functions"""
from typing import Generator, Optional, Dict from typing import Dict, Generator, Optional
from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webdriver import WebDriver
from autogpt.memory import get_memory
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion from autogpt.llm_utils import create_chat_completion
from autogpt.memory import get_memory
CFG = Config() CFG = Config()
MEMORY = get_memory(CFG) MEMORY = get_memory(CFG)

View File

@@ -19,6 +19,7 @@ class PromptGenerator:
self.resources = [] self.resources = []
self.performance_evaluation = [] self.performance_evaluation = []
self.goals = [] self.goals = []
self.command_registry = None
self.name = "Bob" self.name = "Bob"
self.role = "AI" self.role = "AI"
self.response_format = { self.response_format = {
@@ -119,10 +120,14 @@ class PromptGenerator:
str: The formatted numbered list. str: The formatted numbered list.
""" """
if item_type == "command": if item_type == "command":
return "\n".join( command_strings = []
f"{i+1}. {self._generate_command_string(item)}" if self.command_registry:
for i, item in enumerate(items) command_strings += [
) str(item) for item in self.command_registry.commands.values()
]
# These are the commands that are added manually, do_nothing and terminate
command_strings += [self._generate_command_string(item) for item in items]
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
else: else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))

View File

@@ -1,4 +1,5 @@
from colorama import Fore from colorama import Fore
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config from autogpt.config.config import Config
from autogpt.logs import logger from autogpt.logs import logger
@@ -37,63 +38,9 @@ def build_default_prompt_generator() -> PromptGenerator:
# Define the command list # Define the command list
commands = [ commands = [
("Google Search", "google", {"input": "<search>"}),
(
"Browse Website",
"browse_website",
{"url": "<url>", "question": "<what_you_want_to_find_on_website>"},
),
(
"Start GPT Agent",
"start_agent",
{"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"},
),
(
"Message GPT Agent",
"message_agent",
{"key": "<key>", "message": "<message>"},
),
("List GPT Agents", "list_agents", {}),
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
(
"Clone Repository",
"clone_repository",
{"repository_url": "<url>", "clone_path": "<directory>"},
),
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
("Read file", "read_file", {"file": "<file>"}),
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
("Delete file", "delete_file", {"file": "<file>"}),
("Search Files", "search_files", {"directory": "<directory>"}),
("Evaluate Code", "evaluate_code", {"code": "<full_code_string>"}),
(
"Get Improved Code",
"improve_code",
{"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"},
),
(
"Write Tests",
"write_tests",
{"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
),
("Execute Python File", "execute_python_file", {"file": "<file>"}),
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
("Send Tweet", "send_tweet", {"text": "<text>"}),
]
# Only add the audio to text command if the model is specified
if cfg.huggingface_audio_to_text_model:
commands.append(
("Convert Audio to text", "read_audio_from_file", {"file": "<file>"}),
)
# Add these command last.
commands.append(
("Do Nothing", "do_nothing", {}), ("Do Nothing", "do_nothing", {}),
)
commands.append(
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}), ("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
) ]
# Add commands to the PromptGenerator object # Add commands to the PromptGenerator object
for command_label, command_name, args in commands: for command_label, command_name, args in commands:

View File

@@ -1,5 +1,6 @@
"""Setup the AI and its goals""" """Setup the AI and its goals"""
from colorama import Fore, Style from colorama import Fore, Style
from autogpt import utils from autogpt import utils
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.logs import logger from autogpt.logs import logger

View File

@@ -1,5 +1,6 @@
""" Brian speech module for autogpt """ """ Brian speech module for autogpt """
import os import os
import requests import requests
from playsound import playsound from playsound import playsound

View File

@@ -1,8 +1,8 @@
"""ElevenLabs speech module""" """ElevenLabs speech module"""
import os import os
from playsound import playsound
import requests import requests
from playsound import playsound
from autogpt.config import Config from autogpt.config import Config
from autogpt.speech.base import VoiceBase from autogpt.speech.base import VoiceBase

View File

@@ -1,7 +1,8 @@
""" GTTS Voice. """ """ GTTS Voice. """
import os import os
from playsound import playsound
import gtts import gtts
from playsound import playsound
from autogpt.speech.base import VoiceBase from autogpt.speech.base import VoiceBase

View File

@@ -1,13 +1,12 @@
""" Text to speech module """ """ Text to speech module """
from autogpt.config import Config
import threading import threading
from threading import Semaphore from threading import Semaphore
from autogpt.speech.brian import BrianSpeech
from autogpt.speech.macos_tts import MacOSTTS
from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.eleven_labs import ElevenLabsSpeech
from autogpt.config import Config
from autogpt.speech.brian import BrianSpeech
from autogpt.speech.eleven_labs import ElevenLabsSpeech
from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.macos_tts import MacOSTTS
CFG = Config() CFG = Config()
DEFAULT_VOICE_ENGINE = GTTSVoice() DEFAULT_VOICE_ENGINE = GTTSVoice()

View File

@@ -1,6 +1,7 @@
import pkg_resources
import sys import sys
import pkg_resources
def main(): def main():
requirements_file = sys.argv[1] requirements_file = sys.argv[1]

View File

@@ -1,4 +1,5 @@
import unittest import unittest
import coverage import coverage
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -1,6 +1,6 @@
import unittest
import os import os
import sys import sys
import unittest
from bs4 import BeautifulSoup from bs4 import BeautifulSoup

View File

@@ -1,15 +1,15 @@
import os
import sys
import unittest import unittest
from unittest import mock from unittest import mock
import sys from uuid import uuid4
import os
from weaviate import Client from weaviate import Client
from weaviate.util import get_valid_uuid from weaviate.util import get_valid_uuid
from uuid import uuid4
from autogpt.config import Config from autogpt.config import Config
from autogpt.memory.weaviate import WeaviateMemory
from autogpt.memory.base import get_ada_embedding from autogpt.memory.base import get_ada_embedding
from autogpt.memory.weaviate import WeaviateMemory
@mock.patch.dict( @mock.patch.dict(

View File

@@ -3,6 +3,7 @@ import sys
from pathlib import Path from pathlib import Path
import pytest import pytest
from autogpt.commands.command import Command, CommandRegistry from autogpt.commands.command import Command, CommandRegistry

View File

@@ -1,4 +1,5 @@
import unittest import unittest
import tests.context import tests.context
from autogpt.token_counter import count_message_tokens, count_string_tokens from autogpt.token_counter import count_message_tokens, count_string_tokens

View File

@@ -1,6 +1,6 @@
# Generated by CodiumAI # Generated by CodiumAI
import unittest
import time import time
import unittest
from unittest.mock import patch from unittest.mock import patch
from autogpt.chat import create_chat_message, generate_context from autogpt.chat import create_chat_message, generate_context

View File

@@ -1,7 +1,8 @@
import autogpt.agent.agent_manager as agent_manager
from autogpt.app import start_agent, list_agents, execute_command
import unittest import unittest
from unittest.mock import patch, MagicMock from unittest.mock import MagicMock, patch
import autogpt.agent.agent_manager as agent_manager
from autogpt.app import execute_command, list_agents, start_agent
class TestCommands(unittest.TestCase): class TestCommands(unittest.TestCase):