Finish integrating command registry

This commit is contained in:
BillSchumacher
2023-04-16 21:51:36 -05:00
parent 167628c696
commit c110f3489d
50 changed files with 238 additions and 234 deletions

View File

@@ -2,17 +2,17 @@
import logging
import os
from pathlib import Path
from colorama import Fore
from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments
from autogpt.commands.command import CommandRegistry
from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger
from autogpt.memory import get_memory
from autogpt.prompts.prompt import construct_main_ai_config
from autogpt.plugins import load_plugins
from autogpt.prompts.prompt import construct_main_ai_config
# Load environment variables from .env file
@@ -47,13 +47,20 @@ def main() -> None:
cfg.set_plugins(loaded_plugins)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry()
command_registry.import_commands("scripts.ai_functions")
command_registry.import_commands("scripts.commands")
command_registry.import_commands("scripts.execute_code")
command_registry.import_commands("scripts.agent_manager")
command_registry.import_commands("scripts.file_operations")
command_registry.import_commands("autogpt.commands.audio_text")
command_registry.import_commands("autogpt.commands.evaluate_code")
command_registry.import_commands("autogpt.commands.execute_code")
command_registry.import_commands("autogpt.commands.file_operations")
command_registry.import_commands("autogpt.commands.git_operations")
command_registry.import_commands("autogpt.commands.google_search")
command_registry.import_commands("autogpt.commands.image_gen")
command_registry.import_commands("autogpt.commands.twitter")
command_registry.import_commands("autogpt.commands.web_selenium")
command_registry.import_commands("autogpt.commands.write_tests")
command_registry.import_commands("autogpt.app")
ai_name = ""
ai_config = construct_main_ai_config()
ai_config.command_registry = command_registry
# print(prompt)
# Initialize variables
full_message_history = []
@@ -70,6 +77,9 @@ def main() -> None:
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
prompt = ai_config.construct_full_prompt()
if cfg.debug_mode:
logger.typewriter_log("Prompt:", Fore.GREEN, prompt)
agent = Agent(
ai_name=ai_name,
memory=memory,
@@ -77,7 +87,7 @@ def main() -> None:
next_action_count=next_action_count,
command_registry=command_registry,
config=ai_config,
prompt=ai_config.construct_full_prompt(),
prompt=prompt,
user_input=user_input,
)
agent.start_interaction_loop()

View File

@@ -1,6 +1,6 @@
from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
from autogpt.json_fixes.bracket_termination import (

View File

@@ -1,8 +1,8 @@
"""Agent manager for managing GPT agents"""
from __future__ import annotations
from autogpt.config.config import Config, Singleton
from autogpt.llm_utils import create_chat_completion
from autogpt.config.config import Singleton, Config
class AgentManager(metaclass=Singleton):

View File

@@ -1,16 +1,11 @@
""" Command and Control """
import json
from typing import List, NoReturn, Union
from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.command import command, CommandRegistry
from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.google_search import google_official_search, google_search
from autogpt.commands.improve_code import improve_code
from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
from autogpt.commands.image_gen import generate_image
from autogpt.commands.audio_text import read_audio_from_file
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.commands.command import CommandRegistry, command
from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.execute_code import execute_python_file, execute_shell
from autogpt.commands.file_operations import (
append_to_file,
@@ -19,15 +14,20 @@ from autogpt.commands.file_operations import (
search_files,
write_to_file,
)
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.google_search import google_official_search, google_search
from autogpt.commands.image_gen import generate_image
from autogpt.commands.improve_code import improve_code
from autogpt.commands.twitter import send_tweet
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.commands.web_selenium import browse_website
from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
from autogpt.json_fixes.parsing import fix_and_parse_json
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
from autogpt.prompts.generator import PromptGenerator
from autogpt.speech import say_text
from autogpt.commands.web_selenium import browse_website
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.twitter import send_tweet
CFG = Config()
AGENT_MANAGER = AgentManager()
@@ -132,76 +132,16 @@ def execute_command(
# TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name)
if command_name == "google":
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial
# search method
key = CFG.google_api_key
if key and key.strip() and key != "your-google-api-key":
google_result = google_official_search(arguments["input"])
return google_result
else:
google_result = google_search(arguments["input"])
# google_result can be a list or a string depending on the search results
if isinstance(google_result, list):
safe_message = [
google_result_single.encode("utf-8", "ignore")
for google_result_single in google_result
]
else:
safe_message = google_result.encode("utf-8", "ignore")
return str(safe_message)
elif command_name == "memory_add":
if command_name == "memory_add":
return memory.add(arguments["string"])
elif command_name == "start_agent":
return start_agent(
arguments["name"], arguments["task"], arguments["prompt"]
)
elif command_name == "message_agent":
return message_agent(arguments["key"], arguments["message"])
elif command_name == "list_agents":
return list_agents()
elif command_name == "delete_agent":
return delete_agent(arguments["key"])
elif command_name == "get_text_summary":
return get_text_summary(arguments["url"], arguments["question"])
elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"])
elif command_name == "clone_repository":
return clone_repository(
arguments["repository_url"], arguments["clone_path"]
)
elif command_name == "read_file":
return read_file(arguments["file"])
elif command_name == "write_to_file":
return write_to_file(arguments["file"], arguments["text"])
elif command_name == "append_to_file":
return append_to_file(arguments["file"], arguments["text"])
elif command_name == "delete_file":
return delete_file(arguments["file"])
elif command_name == "search_files":
return search_files(arguments["directory"])
elif command_name == "browse_website":
return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again"
elif command_name == "evaluate_code":
return evaluate_code(arguments["code"])
elif command_name == "improve_code":
return improve_code(arguments["suggestions"], arguments["code"])
elif command_name == "write_tests":
return write_tests(arguments["code"], arguments.get("focus"))
elif command_name == "execute_python_file": # Add this command
return execute_python_file(arguments["file"])
elif command_name == "read_audio_from_file":
return read_audio_from_file(arguments["file"])
elif command_name == "generate_image":
return generate_image(arguments["prompt"])
elif command_name == "send_tweet":
return send_tweet(arguments["text"])
# filepath, write your code to file and try again
elif command_name == "do_nothing":
return "No action performed."
elif command_name == "task_complete":
@@ -305,7 +245,7 @@ def message_agent(key: str, message: str) -> str:
@command("list_agents", "List GPT Agents", "")
def list_agents():
def list_agents() -> str:
"""List all agents
Returns:

View File

@@ -2,6 +2,7 @@
import argparse
from colorama import Fore
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger

View File

@@ -1,23 +1,51 @@
import requests
"""Commands for converting audio to text."""
import json
import requests
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
cfg = Config()
CFG = Config()
def read_audio_from_file(audio_path):
@command(
"read_audio_from_file",
"Convert Audio to text",
'"file": "<file>"',
CFG.huggingface_audio_to_text_model,
"Configure huggingface_audio_to_text_model.",
)
def read_audio_from_file(audio_path: str) -> str:
"""
Convert audio to text.
Args:
audio_path (str): The path to the audio file
Returns:
str: The text from the audio
"""
audio_path = path_in_workspace(audio_path)
with open(audio_path, "rb") as audio_file:
audio = audio_file.read()
return read_audio(audio)
def read_audio(audio):
model = cfg.huggingface_audio_to_text_model
def read_audio(audio: bytes) -> str:
"""
Convert audio to text.
Args:
audio (bytes): The audio to convert
Returns:
str: The text from the audio
"""
model = CFG.huggingface_audio_to_text_model
api_url = f"https://api-inference.huggingface.co/models/{model}"
api_token = cfg.huggingface_api_token
api_token = CFG.huggingface_api_token
headers = {"Authorization": f"Bearer {api_token}"}
if api_token is None:
@@ -32,4 +60,4 @@ def read_audio(audio):
)
text = json.loads(response.content.decode("utf-8"))["text"]
return "The audio says: " + text
return f"The audio says: {text}"

View File

@@ -1,8 +1,8 @@
import os
import sys
import importlib
import inspect
from typing import Callable, Any, List, Optional
import os
import sys
from typing import Any, Callable, List, Optional
# Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"

View File

@@ -4,9 +4,10 @@ import subprocess
import docker
from docker.errors import ImageNotFound
from autogpt.config import Config
from autogpt.commands.command import command
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
from autogpt.config import Config
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
CFG = Config()

View File

@@ -5,8 +5,9 @@ import os
import os.path
from pathlib import Path
from typing import Generator
from autogpt.commands.command import command
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE

View File

@@ -1,10 +1,19 @@
"""Git operations for autogpt"""
import git
from git.repo import Repo
from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
@command(
"clone_repository",
"Clone Repositoryy",
'"repository_url": "<url>", "clone_path": "<directory>"',
CFG.github_username and CFG.github_api_key,
"Configure github_username and github_api_key.",
)
def clone_repository(repo_url: str, clone_path: str) -> str:
"""Clone a github repository locally
@@ -17,7 +26,7 @@ def clone_repository(repo_url: str, clone_path: str) -> str:
split_url = repo_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
try:
git.Repo.clone_from(auth_repo_url, clone_path)
Repo.clone_from(auth_repo_url, clone_path)
return f"""Cloned {repo_url} to {clone_path}"""
except Exception as e:
return f"Error: {str(e)}"

View File

@@ -5,11 +5,13 @@ import json
from duckduckgo_search import ddg
from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
@command("google", "Google Search", '"input": "<search>"', not CFG.google_api_key)
def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a google search
@@ -31,9 +33,17 @@ def google_search(query: str, num_results: int = 8) -> str:
for j in results:
search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4)
results = json.dumps(search_results, ensure_ascii=False, indent=4)
return safe_google_results(results)
@command(
"google",
"Google Search",
'"input": "<search>"',
bool(CFG.google_api_key),
"Configure google_api_key.",
)
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
"""Return the results of a google search using the official Google API
@@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
return "Error: The provided Google API key is invalid or missing."
else:
return f"Error: {e}"
# google_result can be a list or a string depending on the search results
# Return the list of search result URLs
return search_results_links
return safe_google_results(search_results_links)
def safe_google_results(results: str | list) -> str:
"""
Return the results of a google search in a safe format.
Args:
results (str | list): The search results.
Returns:
str: The results of the search.
"""
if isinstance(results, list):
safe_message = json.dumps(
[result.enocde("utf-8", "ignore") for result in results]
)
else:
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
return safe_message

View File

@@ -1,12 +1,12 @@
""" Image Generation Module for AutoGPT."""
import io
import os.path
import uuid
from base64 import b64decode
import openai
import requests
from PIL import Image
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace

View File

@@ -2,7 +2,7 @@ from __future__ import annotations
import json
from autogpt.commands import command
from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function

View File

@@ -1,11 +1,30 @@
import tweepy
"""A module that contains a command to send a tweet."""
import os
import tweepy
from dotenv import load_dotenv
from autogpt.commands.command import command
load_dotenv()
def send_tweet(tweet_text):
@command(
"send_tweet",
"Send Tweet",
'"text": "<text>"',
)
def send_tweet(tweet_text: str) -> str:
"""
A function that takes in a string and returns a response from create chat
completion api call.
Args:
tweet_text (str): Text to be tweeted.
Returns:
A result from sending the tweet.
"""
consumer_key = os.environ.get("TW_CONSUMER_KEY")
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
access_token = os.environ.get("TW_ACCESS_TOKEN")
@@ -20,6 +39,6 @@ def send_tweet(tweet_text):
# Send tweet
try:
api.update_status(tweet_text)
print("Tweet sent successfully!")
return "Tweet sent successfully!"
except tweepy.TweepyException as e:
print("Error sending tweet: {}".format(e.reason))
return f"Error sending tweet: {e.reason}"

View File

@@ -8,6 +8,7 @@ except ImportError:
"Playwright not installed. Please install it with 'pip install playwright' to use."
)
from bs4 import BeautifulSoup
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks

View File

@@ -4,9 +4,9 @@ from __future__ import annotations
from urllib.parse import urljoin, urlparse
import requests
from requests.compat import urljoin
from requests import Response
from bs4 import BeautifulSoup
from requests import Response
from requests.compat import urljoin
from autogpt.config import Config
from autogpt.memory import get_memory

View File

@@ -1,22 +1,25 @@
"""Selenium web scraping module."""
from __future__ import annotations
from selenium import webdriver
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
import autogpt.processing.text as summary
from bs4 import BeautifulSoup
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.safari.options import Options as SafariOptions
import logging
from pathlib import Path
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from autogpt.commands.command import command
import autogpt.processing.text as summary
from autogpt.config import Config
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
FILE_DIR = Path(__file__).parent.parent
CFG = Config()

View File

@@ -2,7 +2,8 @@
from __future__ import annotations
import json
from autogpt.commands import command
from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function

View File

@@ -2,7 +2,7 @@
This module contains the configuration classes for AutoGPT.
"""
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import check_openai_api_key, Config
from autogpt.config.config import Config, check_openai_api_key
from autogpt.config.singleton import AbstractSingleton, Singleton
__all__ = [

View File

@@ -6,7 +6,8 @@ from __future__ import annotations
import os
from pathlib import Path
from typing import Type
from typing import Optional, Type
import yaml
from autogpt.prompts.generator import PromptGenerator
@@ -41,6 +42,7 @@ class AIConfig:
self.ai_role = ai_role
self.ai_goals = ai_goals
self.prompt_generator = None
self.command_registry = None
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = Path(os.getcwd()) / "ai_settings.yaml"
@@ -113,8 +115,8 @@ class AIConfig:
""
)
from autogpt.prompts.prompt import build_default_prompt_generator
from autogpt.config import Config
from autogpt.prompts.prompt import build_default_prompt_generator
cfg = Config()
if prompt_generator is None:
@@ -122,6 +124,7 @@ class AIConfig:
prompt_generator.goals = self.ai_goals
prompt_generator.name = self.ai_name
prompt_generator.role = self.ai_role
prompt_generator.command_registry = self.command_registry
for plugin in cfg.plugins:
prompt_generator = plugin.post_prompt(prompt_generator)

View File

@@ -1,14 +1,13 @@
"""Configuration class to store the state of bools for different scripts access."""
import os
from colorama import Fore
from autogpt.config.singleton import Singleton
import openai
import yaml
from colorama import Fore
from dotenv import load_dotenv
from autogpt.config.singleton import Singleton
load_dotenv(verbose=True)

View File

@@ -1,8 +1,8 @@
import argparse
import logging
from autogpt.config import Config
from autogpt.commands.file_operations import ingest_file, search_files
from autogpt.config import Config
from autogpt.memory import get_memory
cfg = Config()

View File

@@ -1,9 +1,9 @@
"""This module contains the function to fix JSON strings using GPT-3."""
import json
from autogpt.config import Config
from autogpt.llm_utils import call_ai_function
from autogpt.logs import logger
from autogpt.config import Config
CFG = Config()

View File

@@ -3,11 +3,12 @@ from __future__ import annotations
import contextlib
import json
import regex
from colorama import Fore
from autogpt.logs import logger
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.speech import say_text
CFG = Config()

View File

@@ -1,11 +1,11 @@
from __future__ import annotations
from ast import List
import time
from ast import List
import openai
from openai.error import APIError, RateLimitError
from colorama import Fore
from openai.error import APIError, RateLimitError
from autogpt.config import Config

View File

@@ -5,13 +5,13 @@ import os
import random
import re
import time
from logging import LogRecord
import traceback
from logging import LogRecord
from colorama import Fore, Style
from autogpt.speech import say_text
from autogpt.config import Config, Singleton
from autogpt.speech import say_text
CFG = Config()

View File

@@ -7,8 +7,8 @@ from typing import Any
import numpy as np
import orjson
from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
from autogpt.memory.base import MemoryProviderSingleton
EMBED_DIM = 1536
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS

View File

@@ -1,11 +1,5 @@
""" Milvus memory storage provider."""
from pymilvus import (
connections,
FieldSchema,
CollectionSchema,
DataType,
Collection,
)
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding

View File

@@ -1,9 +1,9 @@
import pinecone
from colorama import Fore, Style
from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
class PineconeMemory(MemoryProviderSingleton):

View File

@@ -10,9 +10,9 @@ from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
SCHEMA = [
TextField("data"),

View File

@@ -1,11 +1,13 @@
from autogpt.config import Config
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
import uuid
import weaviate
from weaviate import Client
from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5
from autogpt.config import Config
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
def default_schema(weaviate_index):
return {

View File

@@ -1,10 +1,10 @@
"""Handles loading of plugins."""
from ast import Module
import zipfile
from ast import Module
from pathlib import Path
from zipimport import zipimporter
from typing import List, Optional, Tuple
from zipimport import zipimporter
def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]:

View File

@@ -1,8 +1,8 @@
"""HTML processing functions"""
from __future__ import annotations
from requests.compat import urljoin
from bs4 import BeautifulSoup
from requests.compat import urljoin
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:

View File

@@ -1,9 +1,11 @@
"""Text processing functions"""
from typing import Generator, Optional, Dict
from typing import Dict, Generator, Optional
from selenium.webdriver.remote.webdriver import WebDriver
from autogpt.memory import get_memory
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
from autogpt.memory import get_memory
CFG = Config()
MEMORY = get_memory(CFG)

View File

@@ -19,6 +19,7 @@ class PromptGenerator:
self.resources = []
self.performance_evaluation = []
self.goals = []
self.command_registry = None
self.name = "Bob"
self.role = "AI"
self.response_format = {
@@ -119,10 +120,14 @@ class PromptGenerator:
str: The formatted numbered list.
"""
if item_type == "command":
return "\n".join(
f"{i+1}. {self._generate_command_string(item)}"
for i, item in enumerate(items)
)
command_strings = []
if self.command_registry:
command_strings += [
str(item) for item in self.command_registry.commands.values()
]
# These are the commands that are added manually, do_nothing and terminate
command_strings += [self._generate_command_string(item) for item in items]
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))

View File

@@ -1,4 +1,5 @@
from colorama import Fore
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.logs import logger
@@ -37,63 +38,9 @@ def build_default_prompt_generator() -> PromptGenerator:
# Define the command list
commands = [
("Google Search", "google", {"input": "<search>"}),
(
"Browse Website",
"browse_website",
{"url": "<url>", "question": "<what_you_want_to_find_on_website>"},
),
(
"Start GPT Agent",
"start_agent",
{"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"},
),
(
"Message GPT Agent",
"message_agent",
{"key": "<key>", "message": "<message>"},
),
("List GPT Agents", "list_agents", {}),
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
(
"Clone Repository",
"clone_repository",
{"repository_url": "<url>", "clone_path": "<directory>"},
),
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
("Read file", "read_file", {"file": "<file>"}),
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
("Delete file", "delete_file", {"file": "<file>"}),
("Search Files", "search_files", {"directory": "<directory>"}),
("Evaluate Code", "evaluate_code", {"code": "<full_code_string>"}),
(
"Get Improved Code",
"improve_code",
{"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"},
),
(
"Write Tests",
"write_tests",
{"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
),
("Execute Python File", "execute_python_file", {"file": "<file>"}),
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
("Send Tweet", "send_tweet", {"text": "<text>"}),
]
# Only add the audio to text command if the model is specified
if cfg.huggingface_audio_to_text_model:
commands.append(
("Convert Audio to text", "read_audio_from_file", {"file": "<file>"}),
)
# Add these command last.
commands.append(
("Do Nothing", "do_nothing", {}),
)
commands.append(
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
)
]
# Add commands to the PromptGenerator object
for command_label, command_name, args in commands:

View File

@@ -1,5 +1,6 @@
"""Setup the AI and its goals"""
from colorama import Fore, Style
from autogpt import utils
from autogpt.config.ai_config import AIConfig
from autogpt.logs import logger

View File

@@ -1,5 +1,6 @@
""" Brian speech module for autogpt """
import os
import requests
from playsound import playsound

View File

@@ -1,8 +1,8 @@
"""ElevenLabs speech module"""
import os
from playsound import playsound
import requests
from playsound import playsound
from autogpt.config import Config
from autogpt.speech.base import VoiceBase

View File

@@ -1,7 +1,8 @@
""" GTTS Voice. """
import os
from playsound import playsound
import gtts
from playsound import playsound
from autogpt.speech.base import VoiceBase

View File

@@ -1,13 +1,12 @@
""" Text to speech module """
from autogpt.config import Config
import threading
from threading import Semaphore
from autogpt.speech.brian import BrianSpeech
from autogpt.speech.macos_tts import MacOSTTS
from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.eleven_labs import ElevenLabsSpeech
from autogpt.config import Config
from autogpt.speech.brian import BrianSpeech
from autogpt.speech.eleven_labs import ElevenLabsSpeech
from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.macos_tts import MacOSTTS
CFG = Config()
DEFAULT_VOICE_ENGINE = GTTSVoice()

View File

@@ -1,6 +1,7 @@
import pkg_resources
import sys
import pkg_resources
def main():
requirements_file = sys.argv[1]

View File

@@ -1,4 +1,5 @@
import unittest
import coverage
if __name__ == "__main__":

View File

@@ -1,6 +1,6 @@
import unittest
import os
import sys
import unittest
from bs4 import BeautifulSoup

View File

@@ -1,15 +1,15 @@
import os
import sys
import unittest
from unittest import mock
import sys
import os
from uuid import uuid4
from weaviate import Client
from weaviate.util import get_valid_uuid
from uuid import uuid4
from autogpt.config import Config
from autogpt.memory.weaviate import WeaviateMemory
from autogpt.memory.base import get_ada_embedding
from autogpt.memory.weaviate import WeaviateMemory
@mock.patch.dict(

View File

@@ -3,6 +3,7 @@ import sys
from pathlib import Path
import pytest
from autogpt.commands.command import Command, CommandRegistry

View File

@@ -1,4 +1,5 @@
import unittest
import tests.context
from autogpt.token_counter import count_message_tokens, count_string_tokens

View File

@@ -1,6 +1,6 @@
# Generated by CodiumAI
import unittest
import time
import unittest
from unittest.mock import patch
from autogpt.chat import create_chat_message, generate_context

View File

@@ -1,7 +1,8 @@
import autogpt.agent.agent_manager as agent_manager
from autogpt.app import start_agent, list_agents, execute_command
import unittest
from unittest.mock import patch, MagicMock
from unittest.mock import MagicMock, patch
import autogpt.agent.agent_manager as agent_manager
from autogpt.app import execute_command, list_agents, start_agent
class TestCommands(unittest.TestCase):