Merge master into release-v0.4.2

Signed-off-by: Merwane Hamadi <merwanehamadi@gmail.com>
This commit is contained in:
Merwane Hamadi
2023-06-19 18:39:19 -07:00
75 changed files with 1191 additions and 1948 deletions

View File

@@ -5,25 +5,22 @@ from datetime import datetime
from colorama import Fore, Style
from autogpt.commands.command import CommandRegistry
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
from autogpt.llm.base import ChatSequence
from autogpt.llm.chat import chat_with_ai, create_chat_completion
from autogpt.llm.chat import chat_with_ai
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.llm.utils import count_string_tokens
from autogpt.log_cycle.log_cycle import (
FULL_MESSAGE_HISTORY_FILE_NAME,
NEXT_ACTION_FILE_NAME,
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
SUPERVISOR_FEEDBACK_FILE_NAME,
USER_INPUT_FILE_NAME,
LogCycleHandler,
)
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.memory.message_history import MessageHistory
from autogpt.memory.vector import VectorMemory
from autogpt.models.command_registry import CommandRegistry
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import clean_input
@@ -146,7 +143,7 @@ class Agent:
try:
assistant_reply_json = extract_json_from_response(assistant_reply)
validate_json(assistant_reply_json)
validate_json(assistant_reply_json, self.config)
except json.JSONDecodeError as e:
logger.error(f"Exception while validating assistant reply JSON: {e}")
assistant_reply_json = {}
@@ -161,7 +158,7 @@ class Agent:
# Get command name and arguments
try:
print_assistant_thoughts(
self.ai_name, assistant_reply_json, self.config.speak_mode
self.ai_name, assistant_reply_json, self.config
)
command_name, arguments = get_command(assistant_reply_json)
if self.config.speak_mode:
@@ -200,32 +197,16 @@ class Agent:
)
while True:
if self.config.chat_messages_enabled:
console_input = clean_input("Waiting for your response...")
console_input = clean_input(
self.config, "Waiting for your response..."
)
else:
console_input = clean_input(
Fore.MAGENTA + "Input:" + Style.RESET_ALL
self.config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
if console_input.lower().strip() == self.config.authorise_key:
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().strip() == "s":
logger.typewriter_log(
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
thoughts = assistant_reply_json.get("thoughts", {})
self_feedback_resp = self.get_self_feedback(
thoughts, self.config.fast_llm_model
)
logger.typewriter_log(
f"SELF FEEDBACK: {self_feedback_resp}",
Fore.YELLOW,
"",
)
user_input = self_feedback_resp
command_name = "self_feedback"
break
elif console_input.lower().strip() == "":
logger.warn("Invalid input format.")
continue
@@ -281,8 +262,6 @@ class Agent:
result = f"Could not execute command: {arguments}"
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
elif command_name == "self_feedback":
result = f"Self feedback: {user_input}"
else:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_command():
@@ -335,45 +314,3 @@ class Agent:
self.workspace.get_path(command_args[pathlike])
)
return command_args
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
"""Generates a feedback response based on the provided thoughts dictionary.
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
feedback message and uses the create_chat_completion() function to generate a
response based on the input message.
Args:
thoughts (dict): A dictionary containing thought elements like reasoning,
plan, thoughts, and criticism.
Returns:
str: A feedback response generated using the provided thoughts dictionary.
"""
ai_role = self.ai_config.ai_role
feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
reasoning = thoughts.get("reasoning", "")
plan = thoughts.get("plan", "")
thought = thoughts.get("thoughts", "")
feedback_thoughts = thought + reasoning + plan
prompt = ChatSequence.for_model(llm_model)
prompt.add("user", feedback_prompt + feedback_thoughts)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
prompt.raw(),
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
)
feedback = create_chat_completion(prompt)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
feedback,
SUPERVISOR_FEEDBACK_FILE_NAME,
)
return feedback

View File

@@ -10,12 +10,12 @@ from autogpt.singleton import Singleton
class AgentManager(metaclass=Singleton):
"""Agent manager for managing GPT agents"""
def __init__(self):
def __init__(self, config: Config):
self.next_key = 0
self.agents: dict[
int, tuple[str, list[Message], str]
] = {} # key, (task, full_message_history, model)
self.cfg = Config()
self.config = config
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
@@ -35,18 +35,18 @@ class AgentManager(metaclass=Singleton):
"""
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction(messages.raw()):
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
# Start GPT instance
agent_reply = create_chat_completion(prompt=messages)
agent_reply = create_chat_completion(prompt=messages, config=self.config)
messages.add("assistant", agent_reply)
plugins_reply = ""
for i, plugin in enumerate(self.cfg.plugins):
for i, plugin in enumerate(self.config.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
@@ -62,7 +62,7 @@ class AgentManager(metaclass=Singleton):
self.agents[key] = (task, list(messages), model)
for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)
@@ -85,19 +85,19 @@ class AgentManager(metaclass=Singleton):
messages = ChatSequence.for_model(model, messages)
messages.add("user", message)
for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
# Start GPT instance
agent_reply = create_chat_completion(prompt=messages)
agent_reply = create_chat_completion(prompt=messages, config=self.config)
messages.add("assistant", agent_reply)
plugins_reply = agent_reply
for i, plugin in enumerate(self.cfg.plugins):
for i, plugin in enumerate(self.config.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
@@ -107,7 +107,7 @@ class AgentManager(metaclass=Singleton):
if plugins_reply and plugins_reply != "":
messages.add("assistant", plugins_reply)
for plugin in self.cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)

View File

@@ -1,14 +1,8 @@
""" Command and Control """
import json
from typing import Dict, List, Union
from typing import Dict
from autogpt.agent.agent import Agent
from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.command import command
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.processing.text import summarize_text
from autogpt.speech import say_text
from autogpt.url_utils.validators import validate_url
def is_valid_int(value: str) -> bool:
@@ -124,117 +118,3 @@ def execute_command(
)
except Exception as e:
return f"Error: {str(e)}"
@command(
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
)
@validate_url
def get_text_summary(url: str, question: str, agent: Agent) -> str:
"""Get the text summary of a webpage
Args:
url (str): The url to scrape
question (str): The question to summarize the text for
Returns:
str: The summary of the text
"""
text = scrape_text(url, agent)
summary, _ = summarize_text(text, question=question)
return f""" "Result" : {summary}"""
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
@validate_url
def get_hyperlinks(url: str, agent: Agent) -> Union[str, List[str]]:
"""Get all hyperlinks on a webpage
Args:
url (str): The url to scrape
Returns:
str or list: The hyperlinks on the page
"""
return scrape_links(url, agent)
@command(
"start_agent",
"Start GPT Agent",
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
)
def start_agent(name: str, task: str, prompt: str, agent: Agent, model=None) -> str:
"""Start an agent with a given name, task, and prompt
Args:
name (str): The name of the agent
task (str): The task of the agent
prompt (str): The prompt for the agent
model (str): The model to use for the agent
Returns:
str: The response of the agent
"""
agent_manager = AgentManager()
# Remove underscores from name
voice_name = name.replace("_", " ")
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
agent_intro = f"{voice_name} here, Reporting for duty!"
# Create agent
if agent.config.speak_mode:
say_text(agent_intro, 1)
key, ack = agent_manager.create_agent(task, first_message, model)
if agent.config.speak_mode:
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
# Assign task (prompt), get response
agent_response = agent_manager.message_agent(key, prompt)
return f"Agent {name} created with key {key}. First response: {agent_response}"
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
def message_agent(key: str, message: str, agent: Agent) -> str:
"""Message an agent with a given key and message"""
# Check if the key is a valid integer
if is_valid_int(key):
agent_response = AgentManager().message_agent(int(key), message)
else:
return "Invalid key, must be an integer."
# Speak response
if agent.config.speak_mode:
say_text(agent_response, 1)
return agent_response
@command("list_agents", "List GPT Agents", "() -> str")
def list_agents(agent: Agent) -> str:
"""List all agents
Returns:
str: A list of all agents
"""
return "List of agents:\n" + "\n".join(
[str(x[0]) + ": " + x[1] for x in AgentManager().list_agents()]
)
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
def delete_agent(key: str, agent: Agent) -> str:
"""Delete an agent with a given key
Args:
key (str): The key of the agent to delete
Returns:
str: A message indicating whether the agent was deleted or not
"""
result = AgentManager().delete_agent(key)
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."

View File

@@ -0,0 +1,40 @@
import functools
from typing import Any, Callable, Dict, Optional
from autogpt.config import Config
from autogpt.models.command import Command
# Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
def command(
name: str,
description: str,
arguments: Dict[str, Dict[str, Any]],
enabled: bool | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
) -> Callable[..., Any]:
"""The command decorator is used to create Command objects from ordinary functions."""
def decorator(func: Callable[..., Any]) -> Command:
cmd = Command(
name=name,
description=description,
method=func,
signature=arguments,
enabled=enabled,
disabled_reason=disabled_reason,
)
@functools.wraps(func)
def wrapper(*args, **kwargs) -> Any:
return func(*args, **kwargs)
wrapper.command = cmd
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
return wrapper
return decorator

View File

@@ -1,34 +0,0 @@
"""Code evaluation module."""
from __future__ import annotations
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.llm.utils import call_ai_function
@command(
"analyze_code",
"Analyze Code",
'"code": "<full_code_string>"',
)
def analyze_code(code: str, agent: Agent) -> list[str]:
"""
A function that takes in a string and returns a response from create chat
completion api call.
Parameters:
code (str): Code to be evaluated.
Returns:
A result string from create chat completion. A list of suggestions to
improve the code.
"""
function_string = "def analyze_code(code: str) -> list[str]:"
args = [code]
description_string = (
"Analyzes the given code and returns a list of suggestions for improvements."
)
return call_ai_function(
function_string, args, description_string, config=agent.config
)

View File

@@ -1,71 +0,0 @@
"""Commands for converting audio to text."""
import json
import requests
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
@command(
"read_audio_from_file",
"Convert Audio to text",
'"filename": "<filename>"',
lambda config: config.huggingface_audio_to_text_model
and config.huggingface_api_token,
"Configure huggingface_audio_to_text_model and Hugging Face api token.",
)
def read_audio_from_file(filename: str, agent: Agent) -> str:
"""
Convert audio to text.
Args:
filename (str): The path to the audio file
Returns:
str: The text from the audio
"""
with open(filename, "rb") as audio_file:
audio = audio_file.read()
return read_audio(audio, agent.config)
def read_audio(audio: bytes, agent: Agent) -> str:
"""
Convert audio to text.
Args:
audio (bytes): The audio to convert
Returns:
str: The text from the audio
"""
if agent.config.audio_to_text_provider == "huggingface":
text = read_huggingface_audio(audio, agent.config)
if text:
return f"The audio says: {text}"
else:
return f"Error, couldn't convert audio to text"
return "Error: No audio to text provider given"
def read_huggingface_audio(audio: bytes, agent: Agent) -> str:
model = agent.config.huggingface_audio_to_text_model
api_url = f"https://api-inference.huggingface.co/models/{model}"
api_token = agent.config.huggingface_api_token
headers = {"Authorization": f"Bearer {api_token}"}
if api_token is None:
raise ValueError(
"You need to set your Hugging Face API token in the config file."
)
response = requests.post(
api_url,
headers=headers,
data=audio,
)
response_json = json.loads(response.content.decode("utf-8"))
return response_json.get("text")

View File

@@ -1,207 +0,0 @@
import functools
import importlib
import inspect
from inspect import Parameter
from typing import Any, Callable, Optional
from autogpt.config import Config
from autogpt.logs import logger
# Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
class Command:
"""A class representing a command.
Attributes:
name (str): The name of the command.
description (str): A brief description of what the command does.
signature (str): The signature of the function that the command executes. Defaults to None.
"""
def __init__(
self,
name: str,
description: str,
method: Callable[..., Any],
signature: str = "",
enabled: bool | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
):
self.name = name
self.description = description
self.method = method
self.signature = signature
self.enabled = enabled
self.disabled_reason = disabled_reason
def __call__(self, *args, **kwargs) -> Any:
if hasattr(kwargs, "config") and callable(self.enabled):
self.enabled = self.enabled(kwargs["config"])
if not self.enabled:
if self.disabled_reason:
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
return f"Command '{self.name}' is disabled"
return self.method(*args, **kwargs)
def __str__(self) -> str:
return f"{self.name}: {self.description}, args: {self.signature}"
class CommandRegistry:
"""
The CommandRegistry class is a manager for a collection of Command objects.
It allows the registration, modification, and retrieval of Command objects,
as well as the scanning and loading of command plugins from a specified
directory.
"""
def __init__(self):
self.commands = {}
def _import_module(self, module_name: str) -> Any:
return importlib.import_module(module_name)
def _reload_module(self, module: Any) -> Any:
return importlib.reload(module)
def register(self, cmd: Command) -> None:
if cmd.name in self.commands:
logger.warn(
f"Command '{cmd.name}' already registered and will be overwritten!"
)
self.commands[cmd.name] = cmd
def unregister(self, command_name: str):
if command_name in self.commands:
del self.commands[command_name]
else:
raise KeyError(f"Command '{command_name}' not found in registry.")
def reload_commands(self) -> None:
"""Reloads all loaded command plugins."""
for cmd_name in self.commands:
cmd = self.commands[cmd_name]
module = self._import_module(cmd.__module__)
reloaded_module = self._reload_module(module)
if hasattr(reloaded_module, "register"):
reloaded_module.register(self)
def get_command(self, name: str) -> Callable[..., Any]:
return self.commands[name]
def call(self, command_name: str, **kwargs) -> Any:
if command_name not in self.commands:
raise KeyError(f"Command '{command_name}' not found in registry.")
command = self.commands[command_name]
return command(**kwargs)
def command_prompt(self) -> str:
"""
Returns a string representation of all registered `Command` objects for use in a prompt
"""
commands_list = [
f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
]
return "\n".join(commands_list)
def import_commands(self, module_name: str) -> None:
"""
Imports the specified Python module containing command plugins.
This method imports the associated module and registers any functions or
classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
as `Command` objects. The registered `Command` objects are then added to the
`commands` dictionary of the `CommandRegistry` object.
Args:
module_name (str): The name of the module to import for command plugins.
"""
module = importlib.import_module(module_name)
for attr_name in dir(module):
attr = getattr(module, attr_name)
# Register decorated functions
if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
attr, AUTO_GPT_COMMAND_IDENTIFIER
):
self.register(attr.command)
# Register command classes
elif (
inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
):
cmd_instance = attr()
self.register(cmd_instance)
def command(
name: str,
description: str,
signature: str,
enabled: bool | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
) -> Callable[..., Any]:
"""The command decorator is used to create Command objects from ordinary functions."""
# TODO: Remove this in favor of better command management
CFG = Config()
if callable(enabled):
enabled = enabled(CFG)
if not enabled:
if disabled_reason is not None:
logger.debug(f"Command '{name}' is disabled: {disabled_reason}")
return lambda func: func
def decorator(func: Callable[..., Any]) -> Command:
cmd = Command(
name=name,
description=description,
method=func,
signature=signature,
enabled=enabled,
disabled_reason=disabled_reason,
)
@functools.wraps(func)
def wrapper(*args, **kwargs) -> Any:
return func(*args, **kwargs)
wrapper.command = cmd
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
return wrapper
return decorator
def ignore_unexpected_kwargs(func: Callable[..., Any]) -> Callable[..., Any]:
def filter_kwargs(kwargs: dict) -> dict:
sig = inspect.signature(func)
# Parameter.VAR_KEYWORD - a dict of keyword arguments that aren't bound to any other
if any(map(lambda p: p.kind == Parameter.VAR_KEYWORD, sig.parameters.values())):
# if **kwargs exist, return directly
return kwargs
_params = list(
filter(
lambda p: p.kind
in {Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD},
sig.parameters.values(),
)
)
res_kwargs = {
param.name: kwargs[param.name] for param in _params if param.name in kwargs
}
return res_kwargs
@functools.wraps(func)
def wrapper(*args, **kwargs) -> Any:
kwargs = filter_kwargs(kwargs)
return func(*args, **kwargs)
return wrapper

View File

@@ -7,10 +7,9 @@ import docker
from docker.errors import ImageNotFound
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.command_decorator import command
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.setup import CFG
from autogpt.workspace.workspace import Workspace
ALLOWLIST_CONTROL = "allowlist"
@@ -19,16 +18,27 @@ DENYLIST_CONTROL = "denylist"
@command(
"execute_python_code",
"Create a Python file and execute it",
'"code": "<code>", "basename": "<basename>"',
"Creates a Python file and executes it",
{
"code": {
"type": "string",
"description": "The Python code to run",
"required": True,
},
"name": {
"type": "string",
"description": "A name to be given to the python file",
"required": True,
},
},
)
def execute_python_code(code: str, basename: str, agent: Agent) -> str:
def execute_python_code(code: str, name: str, agent: Agent) -> str:
"""Create and execute a Python file in a Docker container and return the STDOUT of the
executed code. If there is any data that needs to be captured use a print statement
Args:
code (str): The Python code to run
basename (str): A name to be given to the Python file
name (str): A name to be given to the Python file
Returns:
str: The STDOUT captured from the code when it ran
@@ -37,10 +47,10 @@ def execute_python_code(code: str, basename: str, agent: Agent) -> str:
directory = os.path.join(agent.config.workspace_path, ai_name, "executed_code")
os.makedirs(directory, exist_ok=True)
if not basename.endswith(".py"):
basename = basename + ".py"
if not name.endswith(".py"):
name = name + ".py"
path = os.path.join(directory, basename)
path = os.path.join(directory, name)
try:
with open(path, "w+", encoding="utf-8") as f:
@@ -51,7 +61,17 @@ def execute_python_code(code: str, basename: str, agent: Agent) -> str:
return f"Error: {str(e)}"
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
@command(
"execute_python_file",
"Executes an existing Python file",
{
"filename": {
"type": "string",
"description": "The name of te file to execute",
"required": True,
},
},
)
def execute_python_file(filename: str, agent: Agent) -> str:
"""Execute a Python file in a Docker container and return the output
@@ -62,7 +82,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
str: The output of the file
"""
logger.info(
f"Executing python file '{filename}' in working directory '{CFG.workspace_path}'"
f"Executing python file '{filename}' in working directory '{agent.config.workspace_path}'"
)
if not filename.endswith(".py"):
@@ -84,7 +104,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
["python", str(path)],
capture_output=True,
encoding="utf8",
cwd=CFG.workspace_path,
cwd=agent.config.workspace_path,
)
if result.returncode == 0:
return result.stdout
@@ -153,6 +173,7 @@ def validate_command(command: str, config: Config) -> bool:
Args:
command (str): The command to validate
config (Config): The config to use to validate the command
Returns:
bool: True if the command is allowed, False otherwise
@@ -170,10 +191,16 @@ def validate_command(command: str, config: Config) -> bool:
@command(
"execute_shell",
"Execute Shell Command, non-interactive commands only",
'"command_line": "<command_line>"',
lambda cfg: cfg.execute_local_commands,
"You are not allowed to run local shell commands. To execute"
"Executes a Shell Command, non-interactive commands only",
{
"command_line": {
"type": "string",
"description": "The command line to execute",
"required": True,
}
},
enabled=lambda config: config.execute_local_commands,
disabled_reason="You are not allowed to run local shell commands. To execute"
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
"in your config file: .env - do not attempt to bypass the restriction.",
)
@@ -210,8 +237,14 @@ def execute_shell(command_line: str, agent: Agent) -> str:
@command(
"execute_shell_popen",
"Execute Shell Command, non-interactive commands only",
'"command_line": "<command_line>"',
"Executes a Shell Command, non-interactive commands only",
{
"query": {
"type": "string",
"description": "The search query",
"required": True,
}
},
lambda config: config.execute_local_commands,
"You are not allowed to run local shell commands. To execute"
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "

View File

@@ -4,22 +4,16 @@ from __future__ import annotations
import hashlib
import os
import os.path
import re
from typing import Generator, Literal
import requests
from colorama import Back, Fore
from confection import Config
from requests.adapters import HTTPAdapter, Retry
from autogpt.agent.agent import Agent
from autogpt.commands.command import command, ignore_unexpected_kwargs
from autogpt.command_decorator import command
from autogpt.commands.file_operations_utils import read_textual_file
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.memory.vector import MemoryItem, VectorMemory
from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size
Operation = Literal["write", "append", "delete"]
@@ -88,6 +82,7 @@ def is_duplicate_operation(
Args:
operation: The operation to check for
filename: The name of the file to check for
config: The agent config
checksum: The checksum of the contents to be written
Returns:
@@ -120,7 +115,17 @@ def log_operation(
)
@command("read_file", "Read a file", '"filename": "<filename>"')
@command(
"read_file",
"Read an existing file",
{
"filename": {
"type": "string",
"description": "The path of the file to read",
"required": True,
}
},
)
def read_file(filename: str, agent: Agent) -> str:
"""Read a file and return the contents
@@ -134,7 +139,7 @@ def read_file(filename: str, agent: Agent) -> str:
content = read_textual_file(filename, logger)
# TODO: invalidate/update memory when file is edited
file_memory = MemoryItem.from_text_file(content, filename)
file_memory = MemoryItem.from_text_file(content, filename, agent.config)
if len(file_memory.chunks) > 1:
return file_memory.summary
@@ -161,7 +166,7 @@ def ingest_file(
# TODO: differentiate between different types of files
file_memory = MemoryItem.from_text_file(content, filename)
logger.debug(f"Created memory: {file_memory.dump()}")
logger.debug(f"Created memory: {file_memory.dump(True)}")
memory.add(file_memory)
logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
@@ -169,7 +174,22 @@ def ingest_file(
logger.warn(f"Error while ingesting file '{filename}': {err}")
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
@command(
"write_to_file",
"Writes to a file",
{
"filename": {
"type": "string",
"description": "The name of the file to write to",
"required": True,
},
"text": {
"type": "string",
"description": "The text to write to the file",
"required": True,
},
},
)
def write_to_file(filename: str, text: str, agent: Agent) -> str:
"""Write text to a file
@@ -195,69 +215,20 @@ def write_to_file(filename: str, text: str, agent: Agent) -> str:
@command(
"replace_in_file",
"Replace text or code in a file",
'"filename": "<filename>", '
'"old_text": "<old_text>", "new_text": "<new_text>", '
'"occurrence_index": "<occurrence_index>"',
)
def replace_in_file(
filename: str, old_text: str, new_text: str, agent: Agent, occurrence_index=None
):
"""Update a file by replacing one or all occurrences of old_text with new_text using Python's built-in string
manipulation and regular expression modules for cross-platform file editing similar to sed and awk.
Args:
filename (str): The name of the file
old_text (str): String to be replaced. \n will be stripped from the end.
new_text (str): New string. \n will be stripped from the end.
occurrence_index (int): Optional index of the occurrence to replace. If None, all occurrences will be replaced.
Returns:
str: A message indicating whether the file was updated successfully or if there were no matches found for old_text
in the file.
Raises:
Exception: If there was an error updating the file.
"""
try:
with open(filename, "r", encoding="utf-8") as f:
content = f.read()
old_text = old_text.rstrip("\n")
new_text = new_text.rstrip("\n")
if occurrence_index is None:
new_content = content.replace(old_text, new_text)
else:
matches = list(re.finditer(re.escape(old_text), content))
if not matches:
return f"No matches found for {old_text} in {filename}"
if int(occurrence_index) >= len(matches):
return f"Occurrence index {occurrence_index} is out of range for {old_text} in {filename}"
match = matches[int(occurrence_index)]
start, end = match.start(), match.end()
new_content = content[:start] + new_text + content[end:]
if content == new_content:
return f"No matches found for {old_text} in {filename}"
with open(filename, "w", encoding="utf-8") as f:
f.write(new_content)
with open(filename, "r", encoding="utf-8") as f:
checksum = text_checksum(f.read())
log_operation("update", filename, agent, checksum=checksum)
return f"File {filename} updated successfully."
except Exception as e:
return "Error: " + str(e)
@command(
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
"append_to_file",
"Appends to a file",
{
"filename": {
"type": "string",
"description": "The name of the file to write to",
"required": True,
},
"text": {
"type": "string",
"description": "The text to write to the file",
"required": True,
},
},
)
def append_to_file(
filename: str, text: str, agent: Agent, should_log: bool = True
@@ -288,7 +259,17 @@ def append_to_file(
return f"Error: {err}"
@command("delete_file", "Delete file", '"filename": "<filename>"')
@command(
"delete_file",
"Deletes a file",
{
"filename": {
"type": "string",
"description": "The name of the file to delete",
"required": True,
}
},
)
def delete_file(filename: str, agent: Agent) -> str:
"""Delete a file
@@ -308,8 +289,17 @@ def delete_file(filename: str, agent: Agent) -> str:
return f"Error: {err}"
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
@ignore_unexpected_kwargs
@command(
"list_files",
"Lists Files in a Directory",
{
"directory": {
"type": "string",
"description": "The directory to list files in",
"required": True,
}
},
)
def list_files(directory: str, agent: Agent) -> list[str]:
"""lists files in a directory recursively
@@ -331,51 +321,3 @@ def list_files(directory: str, agent: Agent) -> list[str]:
found_files.append(relative_path)
return found_files
@command(
"download_file",
"Download File",
'"url": "<url>", "filename": "<filename>"',
lambda config: config.allow_downloads,
"Error: You do not have user authorization to download files locally.",
)
def download_file(url, filename, agent: Agent):
"""Downloads a file
Args:
url (str): URL of the file to download
filename (str): Filename to save the file as
"""
try:
directory = os.path.dirname(filename)
os.makedirs(directory, exist_ok=True)
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
with Spinner(message, plain_output=agent.config.plain_output) as spinner:
session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
total_size = 0
downloaded_size = 0
with session.get(url, allow_redirects=True, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get("Content-Length", 0))
downloaded_size = 0
with open(filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
downloaded_size += len(chunk)
# Update the progress message
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
spinner.update_message(f"{message} {progress}")
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
except requests.HTTPError as err:
return f"Got an HTTP Error whilst trying to download file: {err}"
except Exception as err:
return f"Error: {err}"

View File

@@ -3,14 +3,25 @@
from git.repo import Repo
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.command_decorator import command
from autogpt.url_utils.validators import validate_url
@command(
"clone_repository",
"Clone Repository",
'"url": "<repository_url>", "clone_path": "<clone_path>"',
"Clones a Repository",
{
"url": {
"type": "string",
"description": "The URL of the repository to clone",
"required": True,
},
"clone_path": {
"type": "string",
"description": "The path to clone the repository to",
"required": True,
},
},
lambda config: config.github_username and config.github_api_key,
"Configure github_username and github_api_key.",
)

View File

@@ -10,14 +10,20 @@ import requests
from PIL import Image
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.command_decorator import command
from autogpt.logs import logger
@command(
"generate_image",
"Generate Image",
'"prompt": "<prompt>"',
"Generates an Image",
{
"prompt": {
"type": "string",
"description": "The prompt used to generate the image",
"required": True,
},
},
lambda config: config.image_provider,
"Requires a image provider to be set.",
)
@@ -175,7 +181,7 @@ def generate_image_with_sd_webui(
"negative_prompt": negative_prompt,
"sampler_index": "DDIM",
"steps": 20,
"cfg_scale": 7.0,
"config_scale": 7.0,
"width": size,
"height": size,
"n_iter": 1,

View File

@@ -1,38 +0,0 @@
from __future__ import annotations
import json
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.llm.utils import call_ai_function
@command(
"improve_code",
"Get Improved Code",
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
)
def improve_code(suggestions: list[str], code: str, agent: Agent) -> str:
"""
A function that takes in code and suggestions and returns a response from create
chat completion api call.
Parameters:
suggestions (list): A list of suggestions around what needs to be improved.
code (str): Code to be improved.
Returns:
A result string from create chat completion. Improved code in response.
"""
function_string = (
"def generate_improved_code(suggestions: list[str], code: str) -> str:"
)
args = [json.dumps(suggestions), code]
description_string = (
"Improves the provided code based on the suggestions"
" provided, making no other changes."
)
return call_ai_function(
function_string, args, description_string, config=agent.config
)

View File

@@ -4,21 +4,27 @@ from __future__ import annotations
from typing import NoReturn
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.command_decorator import command
from autogpt.logs import logger
@command(
"task_complete",
"Task Complete (Shutdown)",
'"reason": "<reason>"',
"goals_accomplished",
"Goals are accomplished and there is nothing left to do",
{
"reason": {
"type": "string",
"description": "A summary to the user of how the goals were accomplished",
"required": True,
}
},
)
def task_complete(reason: str, agent: Agent) -> NoReturn:
"""
A function that takes in a string and exits the program
Parameters:
reason (str): The reason for shutting down.
reason (str): A summary to the user of how the goals were accomplished.
Returns:
A result string from create chat completion. A list of suggestions to
improve the code.

View File

@@ -1,82 +0,0 @@
"""Web scraping commands using Playwright"""
from __future__ import annotations
from autogpt.logs import logger
try:
from playwright.sync_api import sync_playwright
except ImportError:
logger.info(
"Playwright not installed. Please install it with 'pip install playwright' to use."
)
from bs4 import BeautifulSoup
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
def scrape_text(url: str) -> str:
"""Scrape text from a webpage
Args:
url (str): The URL to scrape text from
Returns:
str: The scraped text
"""
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
try:
page.goto(url)
html_content = page.content()
soup = BeautifulSoup(html_content, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
text = f"Error: {str(e)}"
finally:
browser.close()
return text
def scrape_links(url: str) -> str | list[str]:
"""Scrape links from a webpage
Args:
url (str): The URL to scrape links from
Returns:
Union[str, List[str]]: The scraped links
"""
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
try:
page.goto(url)
html_content = page.content()
soup = BeautifulSoup(html_content, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup, url)
formatted_links = format_hyperlinks(hyperlinks)
except Exception as e:
formatted_links = f"Error: {str(e)}"
finally:
browser.close()
return formatted_links

View File

@@ -1,104 +0,0 @@
"""Browse a webpage and summarize it using the LLM model"""
from __future__ import annotations
from typing import TYPE_CHECKING
import requests
from bs4 import BeautifulSoup
from requests import Response
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
from autogpt.url_utils.validators import validate_url
session = requests.Session()
if TYPE_CHECKING:
from autogpt.agent.agent import Agent
@validate_url
def get_response(
url: str, agent: Agent, timeout: int = 10
) -> tuple[None, str] | tuple[Response, None]:
"""Get the response from a URL
Args:
url (str): The URL to get the response from
timeout (int): The timeout for the HTTP request
Returns:
tuple[None, str] | tuple[Response, None]: The response and error message
Raises:
ValueError: If the URL is invalid
requests.exceptions.RequestException: If the HTTP request fails
"""
try:
session.headers.update({"User-Agent": agent.config.user_agent})
response = session.get(url, timeout=timeout)
# Check if the response contains an HTTP error
if response.status_code >= 400:
return None, f"Error: HTTP {str(response.status_code)} error"
return response, None
except ValueError as ve:
# Handle invalid URL format
return None, f"Error: {str(ve)}"
except requests.exceptions.RequestException as re:
# Handle exceptions related to the HTTP request
# (e.g., connection errors, timeouts, etc.)
return None, f"Error: {str(re)}"
def scrape_text(url: str, agent: Agent) -> str:
"""Scrape text from a webpage
Args:
url (str): The URL to scrape text from
Returns:
str: The scraped text
"""
response, error_message = get_response(url, agent)
if error_message:
return error_message
if not response:
return "Error: Could not get response"
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return text
def scrape_links(url: str, agent: Agent) -> str | list[str]:
"""Scrape links from a webpage
Args:
url (str): The URL to scrape links from
Returns:
str | list[str]: The scraped links
"""
response, error_message = get_response(url, agent)
if error_message:
return error_message
if not response:
return "Error: Could not get response"
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup, url)
return format_hyperlinks(hyperlinks)

View File

@@ -8,18 +8,23 @@ from itertools import islice
from duckduckgo_search import DDGS
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.command_decorator import command
DUCKDUCKGO_MAX_ATTEMPTS = 3
@command(
"google",
"Google Search",
'"query": "<query>"',
lambda config: not config.google_api_key,
"web_search",
"Searches the web",
{
"query": {
"type": "string",
"description": "The search query",
"required": True,
}
},
)
def google_search(query: str, agent: Agent, num_results: int = 8) -> str:
def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
"""Return the results of a Google search
Args:
@@ -52,14 +57,18 @@ def google_search(query: str, agent: Agent, num_results: int = 8) -> str:
@command(
"google",
"Google Search",
'"query": "<query>"',
{
"query": {
"type": "string",
"description": "The search query",
"required": True,
}
},
lambda config: bool(config.google_api_key)
and bool(config.google_custom_search_engine_id),
"Configure google_api_key and custom_search_engine_id.",
)
def google_official_search(
query: str, agent: Agent, num_results: int = 8
) -> str | list[str]:
def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
"""Return the results of a Google search using the official Google API
Args:

View File

@@ -28,7 +28,7 @@ from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.command_decorator import command
from autogpt.logs import logger
from autogpt.memory.vector import MemoryItem, get_memory
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
@@ -41,8 +41,15 @@ FILE_DIR = Path(__file__).parent.parent
@command(
"browse_website",
"Browse Website",
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
"Browses a Website",
{
"url": {"type": "string", "description": "The URL to visit", "required": True},
"question": {
"type": "string",
"description": "What you want to find on the website",
"required": True,
},
},
)
@validate_url
def browse_website(url: str, question: str, agent: Agent) -> str:
@@ -225,6 +232,6 @@ def summarize_memorize_webpage(
memory = get_memory(agent.config)
new_memory = MemoryItem.from_webpage(text, url, question=question)
new_memory = MemoryItem.from_webpage(text, url, agent.config, question=question)
memory.add(new_memory)
return new_memory.summary

View File

@@ -1,40 +0,0 @@
"""A module that contains a function to generate test cases for the submitted code."""
from __future__ import annotations
import json
from autogpt.agent.agent import Agent
from autogpt.commands.command import command
from autogpt.llm.utils import call_ai_function
@command(
"write_tests",
"Write Tests",
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
)
def write_tests(code: str, focus: list[str], agent: Agent) -> str:
"""
A function that takes in code and focus topics and returns a response from create
chat completion api call.
Parameters:
focus (list): A list of suggestions around what needs to be improved.
code (str): Code for test cases to be generated against.
Returns:
A result string from create chat completion. Test cases for the submitted code
in response.
"""
function_string = (
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
)
args = [code, json.dumps(focus)]
description_string = (
"Generates test cases for the existing code, focusing on"
" specific areas if required."
)
return call_ai_function(
function_string, args, description_string, config=agent.config
)

View File

@@ -13,7 +13,7 @@ import distro
import yaml
if TYPE_CHECKING:
from autogpt.commands.command import CommandRegistry
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.generator import PromptGenerator
# Soon this will go in a folder where it remembers more stuff about the run(s)
@@ -59,14 +59,14 @@ class AIConfig:
self.command_registry: CommandRegistry | None = None
@staticmethod
def load(config_file: str = SAVE_FILE) -> "AIConfig":
def load(ai_settings_file: str = SAVE_FILE) -> "AIConfig":
"""
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
yaml file if yaml file exists,
else returns class with no parameters.
Parameters:
config_file (int): The path to the config yaml file.
ai_settings_file (int): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
@@ -74,7 +74,7 @@ class AIConfig:
"""
try:
with open(config_file, encoding="utf-8") as file:
with open(ai_settings_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
except FileNotFoundError:
config_params = {}
@@ -91,12 +91,12 @@ class AIConfig:
# type: Type[AIConfig]
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
def save(self, config_file: str = SAVE_FILE) -> None:
def save(self, ai_settings_file: str = SAVE_FILE) -> None:
"""
Saves the class parameters to the specified file yaml file path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file.
ai_settings_file(str): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
@@ -109,11 +109,11 @@ class AIConfig:
"ai_goals": self.ai_goals,
"api_budget": self.api_budget,
}
with open(config_file, "w", encoding="utf-8") as file:
with open(ai_settings_file, "w", encoding="utf-8") as file:
yaml.dump(config, file, allow_unicode=True)
def construct_full_prompt(
self, prompt_generator: Optional[PromptGenerator] = None
self, config, prompt_generator: Optional[PromptGenerator] = None
) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
@@ -133,22 +133,20 @@ class AIConfig:
""
)
from autogpt.config import Config
from autogpt.prompts.prompt import build_default_prompt_generator
cfg = Config()
if prompt_generator is None:
prompt_generator = build_default_prompt_generator()
prompt_generator = build_default_prompt_generator(config)
prompt_generator.goals = self.ai_goals
prompt_generator.name = self.ai_name
prompt_generator.role = self.ai_role
prompt_generator.command_registry = self.command_registry
for plugin in cfg.plugins:
for plugin in config.plugins:
if not plugin.can_handle_post_prompt():
continue
prompt_generator = plugin.post_prompt(prompt_generator)
if cfg.execute_local_commands:
if config.execute_local_commands:
# add OS info to prompt
os_name = platform.system()
os_info = (

View File

@@ -300,10 +300,9 @@ class Config(metaclass=Singleton):
self.memory_backend = name
def check_openai_api_key() -> None:
def check_openai_api_key(config: Config) -> None:
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
cfg = Config()
if not cfg.openai_api_key:
if not config.openai_api_key:
print(
Fore.RED
+ "Please set your OpenAI API key in .env or as an environment variable."

View File

@@ -6,11 +6,8 @@ import yaml
from colorama import Fore
from autogpt import utils
from autogpt.config.config import Config
from autogpt.logs import logger
CFG = Config()
class PromptConfig:
"""
@@ -22,10 +19,7 @@ class PromptConfig:
performance_evaluations (list): Performance evaluation list for the prompt generator.
"""
def __init__(
self,
config_file: str = CFG.prompt_settings_file,
) -> None:
def __init__(self, prompt_settings_file: str) -> None:
"""
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
yaml file if yaml file exists,
@@ -39,13 +33,13 @@ class PromptConfig:
None
"""
# Validate file
(validated, message) = utils.validate_yaml_file(config_file)
(validated, message) = utils.validate_yaml_file(prompt_settings_file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
exit(1)
with open(config_file, encoding="utf-8") as file:
with open(prompt_settings_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
self.constraints = config_params.get("constraints", [])

View File

@@ -9,7 +9,6 @@ from jsonschema import Draft7Validator
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
@@ -23,7 +22,8 @@ def extract_json_from_response(response_content: str) -> dict:
try:
return ast.literal_eval(response_content)
except BaseException as e:
logger.error(f"Error parsing JSON response with literal_eval {e}")
logger.info(f"Error parsing JSON response with literal_eval {e}")
logger.debug(f"Invalid JSON received in response: {response_content}")
# TODO: How to raise an error here without causing the program to exit?
return {}
@@ -37,7 +37,7 @@ def llm_response_schema(
def validate_json(
json_object: object, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
json_object: object, config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
) -> bool:
"""
:type schema_name: object
@@ -54,7 +54,7 @@ def validate_json(
for error in errors:
logger.error(f"JSON Validation Error: {error}")
if CFG.debug_mode:
if config.debug_mode:
logger.error(
json.dumps(json_object, indent=4)
) # Replace 'json_object' with the variable containing the JSON data
@@ -67,29 +67,3 @@ def validate_json(
logger.debug("The JSON object is valid.")
return True
def validate_json_string(json_string: str, schema_name: str) -> dict | None:
"""
:type schema_name: object
:param schema_name: str
:type json_object: object
"""
try:
json_loaded = json.loads(json_string)
if not validate_json(json_loaded, schema_name):
return None
return json_loaded
except:
return None
def is_string_valid_json(json_string: str, schema_name: str) -> bool:
"""
:type schema_name: object
:param schema_name: str
:type json_object: object
"""
return validate_json_string(json_string, schema_name) is not None

View File

@@ -5,9 +5,7 @@ from typing import List, Optional
import openai
from openai import Model
from autogpt.config import Config
from autogpt.llm.base import CompletionModelInfo, MessageDict
from autogpt.llm.providers.openai import OPEN_AI_MODELS
from autogpt.llm.base import CompletionModelInfo
from autogpt.logs import logger
from autogpt.singleton import Singleton
@@ -27,52 +25,7 @@ class ApiManager(metaclass=Singleton):
self.total_budget = 0.0
self.models = None
def create_chat_completion(
self,
messages: list[MessageDict],
model: str | None = None,
temperature: float = None,
max_tokens: int | None = None,
deployment_id=None,
):
"""
Create a chat completion and update the cost.
Args:
messages (list): The list of messages to send to the API.
model (str): The model to use for the API call.
temperature (float): The temperature to use for the API call.
max_tokens (int): The maximum number of tokens for the API call.
Returns:
str: The AI's response.
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
if deployment_id is not None:
response = openai.ChatCompletion.create(
deployment_id=deployment_id,
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
if not hasattr(response, "error"):
logger.debug(f"Response: {response}")
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
return response
def update_cost(self, prompt_tokens, completion_tokens, model: str):
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
Update the total cost, prompt tokens, and completion tokens.
@@ -82,6 +35,8 @@ class ApiManager(metaclass=Singleton):
model (str): The model used for the API call.
"""
# the .model property in API responses can contain version suffixes like -v2
from autogpt.llm.providers.openai import OPEN_AI_MODELS
model = model[:-3] if model.endswith("-v2") else model
model_info = OPEN_AI_MODELS[model]

View File

@@ -7,6 +7,9 @@ from typing import List, Literal, TypedDict
MessageRole = Literal["system", "user", "assistant"]
MessageType = Literal["ai_response", "action_result"]
TText = list[int]
"""Token array representing tokenized text"""
class MessageDict(TypedDict):
role: MessageRole

View File

@@ -19,7 +19,7 @@ def chat_with_ai(
config: Config,
agent: Agent,
system_prompt: str,
user_input: str,
triggering_prompt: str,
token_limit: int,
model: str | None = None,
):
@@ -31,7 +31,7 @@ def chat_with_ai(
config (Config): The config to use.
agent (Agent): The agent to use.
system_prompt (str): The prompt explaining the rules to the AI.
user_input (str): The input from the user.
triggering_prompt (str): The input from the user.
token_limit (int): The maximum number of tokens allowed in the API call.
model (str, optional): The model to use. If None, the config.fast_llm_model will be used. Defaults to None.
@@ -90,13 +90,13 @@ def chat_with_ai(
# )
# Account for user input (appended later)
user_input_msg = Message("user", user_input)
user_input_msg = Message("user", triggering_prompt)
current_tokens_used += count_message_tokens([user_input_msg], model)
current_tokens_used += 500 # Reserve space for new_summary_message
# Add Messages until the token limit is reached or there are no more messages to add.
for cycle in reversed(list(agent.history.per_cycle())):
for cycle in reversed(list(agent.history.per_cycle(agent.config))):
messages_to_add = [msg for msg in cycle if msg is not None]
tokens_to_add = count_message_tokens(messages_to_add, model)
if current_tokens_used + tokens_to_add > send_token_limit:
@@ -110,14 +110,14 @@ def chat_with_ai(
# Update & add summary of trimmed messages
if len(agent.history) > 0:
new_summary_message, trimmed_messages = agent.history.trim_messages(
current_message_chain=list(message_sequence),
current_message_chain=list(message_sequence), config=agent.config
)
tokens_to_add = count_message_tokens([new_summary_message], model)
message_sequence.insert(insertion_index, new_summary_message)
current_tokens_used += tokens_to_add - 500
# FIXME: uncomment when memory is back in use
# memory_store = get_memory(cfg)
# memory_store = get_memory(config)
# for _, ai_msg, result_msg in agent.history.per_cycle(trimmed_messages):
# memory_to_add = MemoryItem.from_ai_action(ai_msg, result_msg)
# logger.debug(f"Storing the following memory:\n{memory_to_add.dump()}")
@@ -192,6 +192,7 @@ def chat_with_ai(
# temperature and other settings we care about
assistant_reply = create_chat_completion(
prompt=message_sequence,
config=agent.config,
max_tokens=tokens_remaining,
)

View File

@@ -1,4 +1,22 @@
from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo, TextModelInfo
import functools
import time
from typing import List
from unittest.mock import patch
import openai
import openai.api_resources.abstract.engine_api_resource as engine_api_resource
from colorama import Fore, Style
from openai.error import APIError, RateLimitError, Timeout
from openai.openai_object import OpenAIObject
from autogpt.llm.base import (
ChatModelInfo,
EmbeddingModelInfo,
MessageDict,
TextModelInfo,
TText,
)
from autogpt.logs import logger
OPEN_AI_CHAT_MODELS = {
info.name: info
@@ -88,3 +106,164 @@ OPEN_AI_MODELS: dict[str, ChatModelInfo | EmbeddingModelInfo | TextModelInfo] =
**OPEN_AI_TEXT_MODELS,
**OPEN_AI_EMBEDDING_MODELS,
}
def meter_api(func):
"""Adds ApiManager metering to functions which make OpenAI API calls"""
from autogpt.llm.api_manager import ApiManager
api_manager = ApiManager()
openai_obj_processor = openai.util.convert_to_openai_object
def update_usage_with_response(response: OpenAIObject):
try:
usage = response.usage
logger.debug(f"Reported usage from call to model {response.model}: {usage}")
api_manager.update_cost(
response.usage.prompt_tokens,
response.usage.completion_tokens if "completion_tokens" in usage else 0,
response.model,
)
except Exception as err:
logger.warn(f"Failed to update API costs: {err.__class__.__name__}: {err}")
def metering_wrapper(*args, **kwargs):
openai_obj = openai_obj_processor(*args, **kwargs)
if isinstance(openai_obj, OpenAIObject) and "usage" in openai_obj:
update_usage_with_response(openai_obj)
return openai_obj
def metered_func(*args, **kwargs):
with patch.object(
engine_api_resource.util,
"convert_to_openai_object",
side_effect=metering_wrapper,
):
return func(*args, **kwargs)
return metered_func
def retry_api(
num_retries: int = 10,
backoff_base: float = 2.0,
warn_user: bool = True,
):
"""Retry an OpenAI API call.
Args:
num_retries int: Number of retries. Defaults to 10.
backoff_base float: Base for exponential backoff. Defaults to 2.
warn_user bool: Whether to warn the user. Defaults to True.
"""
retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}"
api_key_error_msg = (
f"Please double check that you have setup a "
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
)
backoff_msg = (
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
)
def _wrapper(func):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
user_warned = not warn_user
num_attempts = num_retries + 1 # +1 for the first attempt
for attempt in range(1, num_attempts + 1):
try:
return func(*args, **kwargs)
except RateLimitError:
if attempt == num_attempts:
raise
logger.debug(retry_limit_msg)
if not user_warned:
logger.double_check(api_key_error_msg)
user_warned = True
except (APIError, Timeout) as e:
if (e.http_status not in [429, 502, 503]) or (
attempt == num_attempts
):
raise
backoff = backoff_base ** (attempt + 2)
logger.debug(backoff_msg.format(backoff=backoff))
time.sleep(backoff)
return _wrapped
return _wrapper
@meter_api
@retry_api()
def create_chat_completion(
messages: List[MessageDict],
*_,
**kwargs,
) -> OpenAIObject:
"""Create a chat completion using the OpenAI API
Args:
messages: A list of messages to feed to the chatbot.
kwargs: Other arguments to pass to the OpenAI API chat completion call.
Returns:
OpenAIObject: The ChatCompletion response from OpenAI
"""
completion: OpenAIObject = openai.ChatCompletion.create(
messages=messages,
**kwargs,
)
if not hasattr(completion, "error"):
logger.debug(f"Response: {completion}")
return completion
@meter_api
@retry_api()
def create_text_completion(
prompt: str,
*_,
**kwargs,
) -> OpenAIObject:
"""Create a text completion using the OpenAI API
Args:
prompt: A text prompt to feed to the LLM
kwargs: Other arguments to pass to the OpenAI API text completion call.
Returns:
OpenAIObject: The Completion response from OpenAI
"""
return openai.Completion.create(
prompt=prompt,
**kwargs,
)
@meter_api
@retry_api()
def create_embedding(
input: str | TText | List[str] | List[TText],
*_,
**kwargs,
) -> OpenAIObject:
"""Create an embedding using the OpenAI API
Args:
input: The text to embed.
kwargs: Other arguments to pass to the OpenAI API embedding call.
Returns:
OpenAIObject: The Embedding response from OpenAI
"""
return openai.Embedding.create(
input=input,
**kwargs,
)

View File

@@ -1,122 +1,24 @@
from __future__ import annotations
import functools
import time
from typing import List, Literal, Optional
from unittest.mock import patch
import openai
import openai.api_resources.abstract.engine_api_resource as engine_api_resource
import openai.util
from colorama import Fore, Style
from openai.error import APIError, RateLimitError
from openai.openai_object import OpenAIObject
from colorama import Fore
from autogpt.config import Config
from autogpt.logs import logger
from ..api_manager import ApiManager
from ..base import ChatSequence, Message
from ..providers.openai import OPEN_AI_CHAT_MODELS
from ..providers import openai as iopenai
from .token_counter import *
def metered(func):
"""Adds ApiManager metering to functions which make OpenAI API calls"""
api_manager = ApiManager()
openai_obj_processor = openai.util.convert_to_openai_object
def update_usage_with_response(response: OpenAIObject):
try:
usage = response.usage
logger.debug(f"Reported usage from call to model {response.model}: {usage}")
api_manager.update_cost(
response.usage.prompt_tokens,
response.usage.completion_tokens if "completion_tokens" in usage else 0,
response.model,
)
except Exception as err:
logger.warn(f"Failed to update API costs: {err.__class__.__name__}: {err}")
def metering_wrapper(*args, **kwargs):
openai_obj = openai_obj_processor(*args, **kwargs)
if isinstance(openai_obj, OpenAIObject) and "usage" in openai_obj:
update_usage_with_response(openai_obj)
return openai_obj
def metered_func(*args, **kwargs):
with patch.object(
engine_api_resource.util,
"convert_to_openai_object",
side_effect=metering_wrapper,
):
return func(*args, **kwargs)
return metered_func
def retry_openai_api(
num_retries: int = 10,
backoff_base: float = 2.0,
warn_user: bool = True,
):
"""Retry an OpenAI API call.
Args:
num_retries int: Number of retries. Defaults to 10.
backoff_base float: Base for exponential backoff. Defaults to 2.
warn_user bool: Whether to warn the user. Defaults to True.
"""
retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}"
api_key_error_msg = (
f"Please double check that you have setup a "
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
)
backoff_msg = (
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
)
def _wrapper(func):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
user_warned = not warn_user
num_attempts = num_retries + 1 # +1 for the first attempt
for attempt in range(1, num_attempts + 1):
try:
return func(*args, **kwargs)
except RateLimitError:
if attempt == num_attempts:
raise
logger.debug(retry_limit_msg)
if not user_warned:
logger.double_check(api_key_error_msg)
user_warned = True
except APIError as e:
if (e.http_status not in [429, 502, 503]) or (
attempt == num_attempts
):
raise
backoff = backoff_base ** (attempt + 2)
logger.debug(backoff_msg.format(backoff=backoff))
time.sleep(backoff)
return _wrapped
return _wrapper
def call_ai_function(
function: str,
args: list,
description: str,
model: str | None = None,
config: Config = None,
model: Optional[str] = None,
config: Optional[Config] = None,
) -> str:
"""Call an AI function
@@ -153,43 +55,41 @@ def call_ai_function(
return create_chat_completion(prompt=prompt, temperature=0)
@metered
@retry_openai_api()
def create_text_completion(
prompt: str,
config: Config,
model: Optional[str],
temperature: Optional[float],
max_output_tokens: Optional[int],
) -> str:
cfg = Config()
if model is None:
model = cfg.fast_llm_model
model = config.fast_llm_model
if temperature is None:
temperature = cfg.temperature
temperature = config.temperature
if cfg.use_azure:
kwargs = {"deployment_id": cfg.get_azure_deployment_id_for_model(model)}
if config.use_azure:
kwargs = {"deployment_id": config.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
response = openai.Completion.create(
**kwargs,
response = iopenai.create_text_completion(
prompt=prompt,
**kwargs,
temperature=temperature,
max_tokens=max_output_tokens,
api_key=cfg.openai_api_key,
api_key=config.openai_api_key,
)
logger.debug(f"Response: {response}")
return response.choices[0].text
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
@metered
@retry_openai_api()
def create_chat_completion(
prompt: ChatSequence,
config: Config,
model: Optional[str] = None,
temperature: float = None,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
) -> str:
"""Create a chat completion using the OpenAI API
@@ -203,52 +103,56 @@ def create_chat_completion(
Returns:
str: The response from the chat completion
"""
cfg = Config()
if model is None:
model = prompt.model.name
if temperature is None:
temperature = cfg.temperature
if max_tokens is None:
max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt.token_length
temperature = config.temperature
logger.debug(
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
)
for plugin in cfg.plugins:
chat_completion_kwargs = {
"model": model,
"temperature": temperature,
"max_tokens": max_tokens,
}
for plugin in config.plugins:
if plugin.can_handle_chat_completion(
messages=prompt.raw(),
model=model,
temperature=temperature,
max_tokens=max_tokens,
**chat_completion_kwargs,
):
message = plugin.handle_chat_completion(
messages=prompt.raw(),
model=model,
temperature=temperature,
max_tokens=max_tokens,
**chat_completion_kwargs,
)
if message is not None:
return message
api_manager = ApiManager()
response = None
if cfg.use_azure:
kwargs = {"deployment_id": cfg.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
chat_completion_kwargs["api_key"] = config.openai_api_key
if config.use_azure:
chat_completion_kwargs[
"deployment_id"
] = config.get_azure_deployment_id_for_model(model)
response = api_manager.create_chat_completion(
**kwargs,
response = iopenai.create_chat_completion(
messages=prompt.raw(),
temperature=temperature,
max_tokens=max_tokens,
**chat_completion_kwargs,
)
logger.debug(f"Response: {response}")
resp = response.choices[0].message.content
for plugin in cfg.plugins:
resp = ""
if not hasattr(response, "error"):
resp = response.choices[0].message["content"]
else:
logger.error(response.error)
raise RuntimeError(response.error)
for plugin in config.plugins:
if not plugin.can_handle_on_response():
continue
resp = plugin.on_response(resp)
return resp

View File

@@ -9,6 +9,7 @@ from typing import Any
from colorama import Fore, Style
from autogpt.config import Config
from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter
from autogpt.singleton import Singleton
from autogpt.speech import say_text
@@ -254,7 +255,7 @@ logger = Logger()
def print_assistant_thoughts(
ai_name: object,
assistant_reply_json_valid: object,
speak_mode: bool = False,
config: Config,
) -> None:
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
@@ -288,7 +289,7 @@ def print_assistant_thoughts(
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
# Speak the assistant's thoughts
if assistant_thoughts_speak:
if speak_mode:
say_text(assistant_thoughts_speak)
if config.speak_mode:
say_text(assistant_thoughts_speak, config)
else:
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")

View File

@@ -6,11 +6,11 @@ from pathlib import Path
from colorama import Fore, Style
from autogpt.agent import Agent
from autogpt.commands.command import CommandRegistry
from autogpt.config import Config, check_openai_api_key
from autogpt.configurator import create_config
from autogpt.logs import logger
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
from autogpt.utils import (
@@ -23,16 +23,10 @@ from autogpt.workspace import Workspace
from scripts.install_plugin_deps import install_plugin_dependencies
COMMAND_CATEGORIES = [
"autogpt.commands.analyze_code",
"autogpt.commands.audio_text",
"autogpt.commands.execute_code",
"autogpt.commands.file_operations",
"autogpt.commands.git_operations",
"autogpt.commands.google_search",
"autogpt.commands.image_gen",
"autogpt.commands.improve_code",
"autogpt.commands.web_search",
"autogpt.commands.web_selenium",
"autogpt.commands.write_tests",
"autogpt.app",
"autogpt.commands.task_statuses",
]
@@ -59,12 +53,12 @@ def run_auto_gpt(
logger.set_level(logging.DEBUG if debug else logging.INFO)
logger.speak_mode = speak
cfg = Config()
config = Config()
# TODO: fill in llm values here
check_openai_api_key()
check_openai_api_key(config)
create_config(
cfg,
config,
continuous,
continuous_limit,
ai_settings,
@@ -80,17 +74,17 @@ def run_auto_gpt(
skip_news,
)
if cfg.continuous_mode:
if config.continuous_mode:
for line in get_legal_warning().split("\n"):
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
if not cfg.skip_news:
if not config.skip_news:
motd, is_new_motd = get_latest_bulletin()
if motd:
motd = markdown_to_ansi_style(motd)
for motd_line in motd.split("\n"):
logger.info(motd_line, "NEWS:", Fore.GREEN)
if is_new_motd and not cfg.chat_messages_enabled:
if is_new_motd and not config.chat_messages_enabled:
input(
Fore.MAGENTA
+ Style.BRIGHT
@@ -129,7 +123,7 @@ def run_auto_gpt(
# TODO: pass in the ai_settings file and the env file and have them cloned into
# the workspace directory so we can bind them to the agent.
workspace_directory = Workspace.make_workspace(workspace_directory)
cfg.workspace_path = str(workspace_directory)
config.workspace_path = str(workspace_directory)
# HACK: doing this here to collect some globals that depend on the workspace.
file_logger_path = workspace_directory / "file_logger.txt"
@@ -137,17 +131,17 @@ def run_auto_gpt(
with file_logger_path.open(mode="w", encoding="utf-8") as f:
f.write("File Operation Logger ")
cfg.file_logger_path = str(file_logger_path)
config.file_logger_path = str(file_logger_path)
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
config.set_plugins(scan_plugins(config, config.debug_mode))
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry()
logger.debug(
f"The following command categories are disabled: {cfg.disabled_command_categories}"
f"The following command categories are disabled: {config.disabled_command_categories}"
)
enabled_command_categories = [
x for x in COMMAND_CATEGORIES if x not in cfg.disabled_command_categories
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
]
logger.debug(
@@ -158,7 +152,7 @@ def run_auto_gpt(
command_registry.import_commands(command_category)
ai_name = ""
ai_config = construct_main_ai_config()
ai_config = construct_main_ai_config(config)
ai_config.command_registry = command_registry
if ai_config.ai_name:
ai_name = ai_config.ai_name
@@ -167,21 +161,22 @@ def run_auto_gpt(
next_action_count = 0
# add chat plugins capable of report to logger
if cfg.chat_messages_enabled:
for plugin in cfg.plugins:
if config.chat_messages_enabled:
for plugin in config.plugins:
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
logger.chat_plugins.append(plugin)
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
memory = get_memory(config)
memory.clear()
logger.typewriter_log(
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
system_prompt = ai_config.construct_full_prompt()
if cfg.debug_mode:
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
system_prompt = ai_config.construct_full_prompt(config)
if config.debug_mode:
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
agent = Agent(
@@ -193,6 +188,6 @@ def run_auto_gpt(
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
workspace_directory=workspace_directory,
ai_config=ai_config,
config=cfg,
config=config,
)
agent.start_interaction_loop()

View File

@@ -9,11 +9,7 @@ if TYPE_CHECKING:
from autogpt.agent import Agent
from autogpt.config import Config
from autogpt.json_utils.utilities import (
LLM_DEFAULT_RESPONSE_FORMAT,
extract_json_from_response,
is_string_valid_json,
)
from autogpt.json_utils.utilities import extract_json_from_response
from autogpt.llm.base import ChatSequence, Message, MessageRole, MessageType
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.llm.utils import count_string_tokens, create_chat_completion
@@ -51,8 +47,7 @@ class MessageHistory:
return self.messages.append(message)
def trim_messages(
self,
current_message_chain: list[Message],
self, current_message_chain: list[Message], config: Config
) -> tuple[Message, list[Message]]:
"""
Returns a list of trimmed messages: messages which are in the message history
@@ -60,6 +55,7 @@ class MessageHistory:
Args:
current_message_chain (list[Message]): The messages currently in the context.
config (Config): The config to use.
Returns:
Message: A message with the new running summary after adding the trimmed messages.
@@ -79,7 +75,7 @@ class MessageHistory:
return self.summary_message(), []
new_summary_message = self.update_running_summary(
new_events=new_messages_not_in_chain
new_events=new_messages_not_in_chain, config=config
)
# Find the index of the last message processed
@@ -88,7 +84,7 @@ class MessageHistory:
return new_summary_message, new_messages_not_in_chain
def per_cycle(self, messages: list[Message] | None = None):
def per_cycle(self, config: Config, messages: list[Message] | None = None):
"""
Yields:
Message: a message containing user input
@@ -105,8 +101,8 @@ class MessageHistory:
)
result_message = messages[i + 1]
try:
assert is_string_valid_json(
ai_message.content, LLM_DEFAULT_RESPONSE_FORMAT
assert (
extract_json_from_response(ai_message.content) != {}
), "AI response is not a valid JSON object"
assert result_message.type == "action_result"
@@ -122,7 +118,9 @@ class MessageHistory:
f"This reminds you of these events from your past: \n{self.summary}",
)
def update_running_summary(self, new_events: list[Message]) -> Message:
def update_running_summary(
self, new_events: list[Message], config: Config
) -> Message:
"""
This function takes a list of dictionaries representing new events and combines them with the current summary,
focusing on key and potentially important information to remember. The updated summary is returned in a message
@@ -139,8 +137,6 @@ class MessageHistory:
update_running_summary(new_events)
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
"""
cfg = Config()
if not new_events:
return self.summary_message()
@@ -160,7 +156,7 @@ class MessageHistory:
event.content = json.dumps(content_dict)
except json.JSONDecodeError as e:
logger.error(f"Error: Invalid JSON: {e}")
if cfg.debug_mode:
if config.debug_mode:
logger.error(f"{event.content}")
elif event.role.lower() == "system":
@@ -175,23 +171,23 @@ class MessageHistory:
# Assume an upper bound length for the summary prompt template, i.e. Your task is to create a concise running summary...., in summarize_batch func
# TODO make this default dynamic
prompt_template_length = 100
max_tokens = OPEN_AI_CHAT_MODELS.get(cfg.fast_llm_model).max_tokens
summary_tlength = count_string_tokens(str(self.summary), cfg.fast_llm_model)
max_tokens = OPEN_AI_CHAT_MODELS.get(config.fast_llm_model).max_tokens
summary_tlength = count_string_tokens(str(self.summary), config.fast_llm_model)
batch = []
batch_tlength = 0
# TODO Can put a cap on length of total new events and drop some previous events to save API cost, but need to think thru more how to do it without losing the context
for event in new_events:
event_tlength = count_string_tokens(str(event), cfg.fast_llm_model)
event_tlength = count_string_tokens(str(event), config.fast_llm_model)
if (
batch_tlength + event_tlength
> max_tokens - prompt_template_length - summary_tlength
):
# The batch is full. Summarize it and start a new one.
self.summarize_batch(batch, cfg)
self.summarize_batch(batch, config)
summary_tlength = count_string_tokens(
str(self.summary), cfg.fast_llm_model
str(self.summary), config.fast_llm_model
)
batch = [event]
batch_tlength = event_tlength
@@ -201,11 +197,11 @@ class MessageHistory:
if batch:
# There's an unprocessed batch. Summarize it.
self.summarize_batch(batch, cfg)
self.summarize_batch(batch, config)
return self.summary_message()
def summarize_batch(self, new_events_batch, cfg):
def summarize_batch(self, new_events_batch, config):
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
You will receive the current summary and your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
@@ -221,7 +217,9 @@ Latest Development:
"""
'''
prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
prompt = ChatSequence.for_model(
config.fast_llm_model, [Message("user", prompt)]
)
self.agent.log_cycle_handler.log_cycle(
self.agent.ai_name,
self.agent.created_at,
@@ -230,7 +228,7 @@ Latest Development:
PROMPT_SUMMARY_FILE_NAME,
)
self.summary = create_chat_completion(prompt)
self.summary = create_chat_completion(prompt, config)
self.agent.log_cycle_handler.log_cycle(
self.agent.ai_name,

View File

@@ -39,12 +39,12 @@ supported_memory = ["json_file", "no_memory"]
# MilvusMemory = None
def get_memory(cfg: Config, init=False) -> VectorMemory:
def get_memory(config: Config) -> VectorMemory:
memory = None
match cfg.memory_backend:
match config.memory_backend:
case "json_file":
memory = JSONFileMemory(cfg)
memory = JSONFileMemory(config)
case "pinecone":
raise NotImplementedError(
@@ -59,8 +59,8 @@ def get_memory(cfg: Config, init=False) -> VectorMemory:
# " to use Pinecone as a memory backend."
# )
# else:
# memory = PineconeMemory(cfg)
# if init:
# memory = PineconeMemory(config)
# if clear:
# memory.clear()
case "redis":
@@ -74,7 +74,7 @@ def get_memory(cfg: Config, init=False) -> VectorMemory:
# " use Redis as a memory backend."
# )
# else:
# memory = RedisMemory(cfg)
# memory = RedisMemory(config)
case "weaviate":
raise NotImplementedError(
@@ -89,7 +89,7 @@ def get_memory(cfg: Config, init=False) -> VectorMemory:
# " use Weaviate as a memory backend."
# )
# else:
# memory = WeaviateMemory(cfg)
# memory = WeaviateMemory(config)
case "milvus":
raise NotImplementedError(
@@ -104,18 +104,18 @@ def get_memory(cfg: Config, init=False) -> VectorMemory:
# "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
# )
# else:
# memory = MilvusMemory(cfg)
# memory = MilvusMemory(config)
case "no_memory":
memory = NoMemory()
case _:
raise ValueError(
f"Unknown memory backend '{cfg.memory_backend}'. Please check your config."
f"Unknown memory backend '{config.memory_backend}'. Please check your config."
)
if memory is None:
memory = JSONFileMemory(cfg)
memory = JSONFileMemory(config)
return memory

View File

@@ -36,19 +36,19 @@ class MemoryItem:
def from_text(
text: str,
source_type: MemoryDocType,
config: Config,
metadata: dict = {},
how_to_summarize: str | None = None,
question_for_summary: str | None = None,
):
cfg = Config()
logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n")
chunks = [
chunk
for chunk, _ in (
split_text(text, cfg.embedding_model)
split_text(text, config.embedding_model, config)
if source_type != "code_file"
else chunk_content(text, cfg.embedding_model)
else chunk_content(text, config.embedding_model)
)
]
logger.debug("Chunks: " + str(chunks))
@@ -58,6 +58,7 @@ class MemoryItem:
for summary, _ in [
summarize_text(
text_chunk,
config,
instruction=how_to_summarize,
question=question_for_summary,
)
@@ -66,7 +67,7 @@ class MemoryItem:
]
logger.debug("Chunk summaries: " + str(chunk_summaries))
e_chunks = get_embedding(chunks)
e_chunks = get_embedding(chunks, config)
summary = (
chunk_summaries[0]
@@ -81,7 +82,7 @@ class MemoryItem:
# TODO: investigate search performance of weighted average vs summary
# e_average = np.average(e_chunks, axis=0, weights=[len(c) for c in chunks])
e_summary = get_embedding(summary)
e_summary = get_embedding(summary, config)
metadata["source_type"] = source_type
@@ -96,8 +97,8 @@ class MemoryItem:
)
@staticmethod
def from_text_file(content: str, path: str):
return MemoryItem.from_text(content, "text_file", {"location": path})
def from_text_file(content: str, path: str, config: Config):
return MemoryItem.from_text(content, "text_file", config, {"location": path})
@staticmethod
def from_code_file(content: str, path: str):
@@ -109,21 +110,21 @@ class MemoryItem:
# The result_message contains either user feedback
# or the result of the command specified in ai_message
if ai_message["role"] != "assistant":
raise ValueError(f"Invalid role on 'ai_message': {ai_message['role']}")
if ai_message.role != "assistant":
raise ValueError(f"Invalid role on 'ai_message': {ai_message.role}")
result = (
result_message["content"]
if result_message["content"].startswith("Command")
result_message.content
if result_message.content.startswith("Command")
else "None"
)
user_input = (
result_message["content"]
if result_message["content"].startswith("Human feedback")
result_message.content
if result_message.content.startswith("Human feedback")
else "None"
)
memory_content = (
f"Assistant Reply: {ai_message['content']}"
f"Assistant Reply: {ai_message.content}"
"\n\n"
f"Result: {result}"
"\n\n"
@@ -137,19 +138,25 @@ class MemoryItem:
)
@staticmethod
def from_webpage(content: str, url: str, question: str | None = None):
def from_webpage(
content: str, url: str, config: Config, question: str | None = None
):
return MemoryItem.from_text(
text=content,
source_type="webpage",
config=config,
metadata={"location": url},
question_for_summary=question,
)
def dump(self) -> str:
token_length = count_string_tokens(self.raw_content, Config().embedding_model)
def dump(self, calculate_length=False) -> str:
if calculate_length:
token_length = count_string_tokens(
self.raw_content, Config().embedding_model
)
return f"""
=============== MemoryItem ===============
Length: {token_length} tokens in {len(self.e_chunks)} chunks
Size: {f'{token_length} tokens in ' if calculate_length else ''}{len(self.e_chunks)} chunks
Metadata: {json.dumps(self.metadata, indent=2)}
---------------- SUMMARY -----------------
{self.summary}
@@ -158,6 +165,31 @@ Metadata: {json.dumps(self.metadata, indent=2)}
==========================================
"""
def __eq__(self, other: MemoryItem):
return (
self.raw_content == other.raw_content
and self.chunks == other.chunks
and self.chunk_summaries == other.chunk_summaries
# Embeddings can either be list[float] or np.ndarray[float32],
# and for comparison they must be of the same type
and np.array_equal(
self.e_summary
if isinstance(self.e_summary, np.ndarray)
else np.array(self.e_summary, dtype=np.float32),
other.e_summary
if isinstance(other.e_summary, np.ndarray)
else np.array(other.e_summary, dtype=np.float32),
)
and np.array_equal(
self.e_chunks
if isinstance(self.e_chunks[0], np.ndarray)
else [np.array(c, dtype=np.float32) for c in self.e_chunks],
other.e_chunks
if isinstance(other.e_chunks[0], np.ndarray)
else [np.array(c, dtype=np.float32) for c in other.e_chunks],
)
)
@dataclasses.dataclass
class MemoryItemRelevance:

View File

@@ -17,25 +17,29 @@ class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
def __init__(self, config: Config):
pass
def get(self, query: str) -> MemoryItemRelevance | None:
def get(self, query: str, config: Config) -> MemoryItemRelevance | None:
"""
Gets the data from the memory that is most relevant to the given query.
Args:
data: The data to compare to.
query: The query used to retrieve information.
config: The config Object.
Returns: The most relevant Memory
"""
result = self.get_relevant(query, 1)
result = self.get_relevant(query, 1, config)
return result[0] if result else None
def get_relevant(self, query: str, k: int) -> Sequence[MemoryItemRelevance]:
def get_relevant(
self, query: str, k: int, config: Config
) -> Sequence[MemoryItemRelevance]:
"""
Returns the top-k most relevant memories for the given query
Args:
query: the query to compare stored memories to
k: the number of relevant memories to fetch
config: The config Object.
Returns:
list[MemoryItemRelevance] containing the top [k] relevant memories
@@ -48,7 +52,7 @@ class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
f"{len(self)} memories in index"
)
relevances = self.score_memories_for_relevance(query)
relevances = self.score_memories_for_relevance(query, config)
logger.debug(f"Memory relevance scores: {[str(r) for r in relevances]}")
# take last k items and reverse
@@ -57,13 +61,13 @@ class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
return [relevances[i] for i in top_k_indices]
def score_memories_for_relevance(
self, for_query: str
self, for_query: str, config: Config
) -> Sequence[MemoryItemRelevance]:
"""
Returns MemoryItemRelevance for every memory in the index.
Implementations may override this function for performance purposes.
"""
e_query: Embedding = get_embedding(for_query)
e_query: Embedding = get_embedding(for_query, config)
return [m.relevance_for(for_query, e_query) for m in self]
def get_stats(self) -> tuple[int, int]:

View File

@@ -20,22 +20,29 @@ class JSONFileMemory(VectorMemoryProvider):
file_path: Path
memories: list[MemoryItem]
def __init__(self, cfg: Config) -> None:
def __init__(self, config: Config) -> None:
"""Initialize a class instance
Args:
cfg: Config object
config: Config object
Returns:
None
"""
workspace_path = Path(cfg.workspace_path)
self.file_path = workspace_path / f"{cfg.memory_index}.json"
workspace_path = Path(config.workspace_path)
self.file_path = workspace_path / f"{config.memory_index}.json"
self.file_path.touch()
logger.debug(f"Initialized {__name__} with index path {self.file_path}")
logger.debug(
f"Initialized {__class__.__name__} with index path {self.file_path}"
)
self.memories = []
self.save_index()
try:
self.load_index()
logger.debug(f"Loaded {len(self.memories)} MemoryItems from file")
except Exception as e:
logger.warn(f"Could not load MemoryItems from file: {e}")
self.save_index()
def __iter__(self) -> Iterator[MemoryItem]:
return iter(self.memories)
@@ -48,6 +55,7 @@ class JSONFileMemory(VectorMemoryProvider):
def add(self, item: MemoryItem):
self.memories.append(item)
logger.debug(f"Adding item to memory: {item.dump()}")
self.save_index()
return len(self.memories)
@@ -62,6 +70,17 @@ class JSONFileMemory(VectorMemoryProvider):
self.memories.clear()
self.save_index()
def load_index(self):
"""Loads all memories from the index file"""
if not self.file_path.is_file():
logger.debug(f"Index file '{self.file_path}' does not exist")
return
with self.file_path.open("r") as f:
logger.debug(f"Loading memories from index file '{self.file_path}'")
json_index = orjson.loads(f.read())
for memory_item_dict in json_index:
self.memories.append(MemoryItem(**memory_item_dict))
def save_index(self):
logger.debug(f"Saving memory index to file {self.file_path}")
with self.file_path.open("wb") as f:

View File

@@ -1,16 +1,14 @@
from typing import Any, overload
import numpy as np
import openai
from autogpt.config import Config
from autogpt.llm.utils import metered, retry_openai_api
from autogpt.llm.base import TText
from autogpt.llm.providers import openai as iopenai
from autogpt.logs import logger
Embedding = list[np.float32] | np.ndarray[Any, np.dtype[np.float32]]
"""Embedding vector"""
TText = list[int]
"""Token array representing text"""
@overload
@@ -23,10 +21,8 @@ def get_embedding(input: list[str] | list[TText]) -> list[Embedding]:
...
@metered
@retry_openai_api()
def get_embedding(
input: str | TText | list[str] | list[TText],
input: str | TText | list[str] | list[TText], config: Config
) -> Embedding | list[Embedding]:
"""Get an embedding from the ada model.
@@ -37,7 +33,6 @@ def get_embedding(
Returns:
List[float]: The embedding.
"""
cfg = Config()
multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input)
if isinstance(input, str):
@@ -45,22 +40,22 @@ def get_embedding(
elif multiple and isinstance(input[0], str):
input = [text.replace("\n", " ") for text in input]
model = cfg.embedding_model
if cfg.use_azure:
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
model = config.embedding_model
if config.use_azure:
kwargs = {"engine": config.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
logger.debug(
f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
f" with model '{model}'"
+ (f" via Azure deployment '{kwargs['engine']}'" if cfg.use_azure else "")
+ (f" via Azure deployment '{kwargs['engine']}'" if config.use_azure else "")
)
embeddings = openai.Embedding.create(
input=input,
api_key=cfg.openai_api_key,
embeddings = iopenai.create_embedding(
input,
**kwargs,
api_key=config.openai_api_key,
).data
if not multiple:

41
autogpt/models/command.py Normal file
View File

@@ -0,0 +1,41 @@
from typing import Any, Callable, Dict, Optional
from autogpt.config import Config
class Command:
"""A class representing a command.
Attributes:
name (str): The name of the command.
description (str): A brief description of what the command does.
signature (str): The signature of the function that the command executes. Defaults to None.
"""
def __init__(
self,
name: str,
description: str,
method: Callable[..., Any],
signature: Dict[str, Dict[str, Any]],
enabled: bool | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
):
self.name = name
self.description = description
self.method = method
self.signature = signature
self.enabled = enabled
self.disabled_reason = disabled_reason
def __call__(self, *args, **kwargs) -> Any:
if hasattr(kwargs, "config") and callable(self.enabled):
self.enabled = self.enabled(kwargs["config"])
if not self.enabled:
if self.disabled_reason:
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
return f"Command '{self.name}' is disabled"
return self.method(*args, **kwargs)
def __str__(self) -> str:
return f"{self.name}: {self.description}, args: {self.signature}"

View File

@@ -0,0 +1,94 @@
import importlib
import inspect
from typing import Any, Callable
from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER
from autogpt.logs import logger
from autogpt.models.command import Command
class CommandRegistry:
"""
The CommandRegistry class is a manager for a collection of Command objects.
It allows the registration, modification, and retrieval of Command objects,
as well as the scanning and loading of command plugins from a specified
directory.
"""
def __init__(self):
self.commands = {}
def _import_module(self, module_name: str) -> Any:
return importlib.import_module(module_name)
def _reload_module(self, module: Any) -> Any:
return importlib.reload(module)
def register(self, cmd: Command) -> None:
if cmd.name in self.commands:
logger.warn(
f"Command '{cmd.name}' already registered and will be overwritten!"
)
self.commands[cmd.name] = cmd
def unregister(self, command_name: str):
if command_name in self.commands:
del self.commands[command_name]
else:
raise KeyError(f"Command '{command_name}' not found in registry.")
def reload_commands(self) -> None:
"""Reloads all loaded command plugins."""
for cmd_name in self.commands:
cmd = self.commands[cmd_name]
module = self._import_module(cmd.__module__)
reloaded_module = self._reload_module(module)
if hasattr(reloaded_module, "register"):
reloaded_module.register(self)
def get_command(self, name: str) -> Callable[..., Any]:
return self.commands[name]
def call(self, command_name: str, **kwargs) -> Any:
if command_name not in self.commands:
raise KeyError(f"Command '{command_name}' not found in registry.")
command = self.commands[command_name]
return command(**kwargs)
def command_prompt(self) -> str:
"""
Returns a string representation of all registered `Command` objects for use in a prompt
"""
commands_list = [
f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
]
return "\n".join(commands_list)
def import_commands(self, module_name: str) -> None:
"""
Imports the specified Python module containing command plugins.
This method imports the associated module and registers any functions or
classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
as `Command` objects. The registered `Command` objects are then added to the
`commands` dictionary of the `CommandRegistry` object.
Args:
module_name (str): The name of the module to import for command plugins.
"""
module = importlib.import_module(module_name)
for attr_name in dir(module):
attr = getattr(module, attr_name)
# Register decorated functions
if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
attr, AUTO_GPT_COMMAND_IDENTIFIER
):
self.register(attr.command)
# Register command classes
elif (
inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
):
cmd_instance = attr()
self.register(cmd_instance)

View File

@@ -58,7 +58,7 @@ def write_dict_to_json_file(data: dict, file_path: str) -> None:
json.dump(data, file, indent=4)
def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
def fetch_openai_plugins_manifest_and_spec(config: Config) -> dict:
"""
Fetch the manifest for a list of OpenAI plugins.
Args:
@@ -68,8 +68,8 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
"""
# TODO add directory scan
manifests = {}
for url in cfg.plugins_openai:
openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}"
for url in config.plugins_openai:
openai_plugin_client_dir = f"{config.plugins_dir}/openai/{urlparse(url).netloc}"
create_directory_if_not_exists(openai_plugin_client_dir)
if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
try:
@@ -134,18 +134,18 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
def initialize_openai_plugins(
manifests_specs: dict, cfg: Config, debug: bool = False
manifests_specs: dict, config: Config, debug: bool = False
) -> dict:
"""
Initialize OpenAI plugins.
Args:
manifests_specs (dict): per url dictionary of manifest and spec.
cfg (Config): Config instance including plugins config
config (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
dict: per url dictionary of manifest, spec and client.
"""
openai_plugins_dir = f"{cfg.plugins_dir}/openai"
openai_plugins_dir = f"{config.plugins_dir}/openai"
if create_directory_if_not_exists(openai_plugins_dir):
for url, manifest_spec in manifests_specs.items():
openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
@@ -188,13 +188,13 @@ def initialize_openai_plugins(
def instantiate_openai_plugin_clients(
manifests_specs_clients: dict, cfg: Config, debug: bool = False
manifests_specs_clients: dict, config: Config, debug: bool = False
) -> dict:
"""
Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
Args:
manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
cfg (Config): Config instance including plugins config
config (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
@@ -206,11 +206,11 @@ def instantiate_openai_plugin_clients(
return plugins
def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
"""Scan the plugins directory for plugins and loads them.
Args:
cfg (Config): Config instance including plugins config
config (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
@@ -218,11 +218,11 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
"""
loaded_plugins = []
# Generic plugins
plugins_path_path = Path(cfg.plugins_dir)
plugins_config = cfg.plugins_config
plugins_path_path = Path(config.plugins_dir)
plugins_config = config.plugins_config
# Directory-based plugins
for plugin_path in [f.path for f in os.scandir(cfg.plugins_dir) if f.is_dir()]:
for plugin_path in [f.path for f in os.scandir(config.plugins_dir) if f.is_dir()]:
# Avoid going into __pycache__ or other hidden directories
if plugin_path.startswith("__"):
continue
@@ -286,11 +286,11 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
)
# OpenAI plugins
if cfg.plugins_openai:
manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
if config.plugins_openai:
manifests_specs = fetch_openai_plugins_manifest_and_spec(config)
if manifests_specs.keys():
manifests_specs_clients = initialize_openai_plugins(
manifests_specs, cfg, debug
manifests_specs, config, debug
)
for url, openai_plugin_meta in manifests_specs_clients.items():
if not plugins_config.is_enabled(url):

View File

@@ -12,8 +12,6 @@ from autogpt.llm.utils import count_string_tokens, create_chat_completion
from autogpt.logs import logger
from autogpt.utils import batch
CFG = Config()
def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
model_max_input_tokens = OPEN_AI_MODELS[model].max_tokens - 1
@@ -60,13 +58,18 @@ def chunk_content(
def summarize_text(
text: str, instruction: Optional[str] = None, question: Optional[str] = None
text: str,
config: Config,
instruction: Optional[str] = None,
question: Optional[str] = None,
) -> tuple[str, None | list[tuple[str, str]]]:
"""Summarize text using the OpenAI API
Args:
text (str): The text to summarize
config (Config): The config object
instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text"
question (str): Question to answer in the summary
Returns:
str: The summary of the text
@@ -79,7 +82,7 @@ def summarize_text(
if instruction and question:
raise ValueError("Parameters 'question' and 'instructions' cannot both be set")
model = CFG.fast_llm_model
model = config.fast_llm_model
if question:
instruction = (
@@ -111,14 +114,18 @@ def summarize_text(
logger.debug(f"Summarizing with {model}:\n{summarization_prompt.dump()}\n")
summary = create_chat_completion(
summarization_prompt, temperature=0, max_tokens=500
summarization_prompt, config, temperature=0, max_tokens=500
)
logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n")
return summary.strip(), None
summaries: list[str] = []
chunks = list(split_text(text, for_model=model, max_chunk_length=max_chunk_length))
chunks = list(
split_text(
text, for_model=model, config=config, max_chunk_length=max_chunk_length
)
)
for i, (chunk, chunk_length) in enumerate(chunks):
logger.info(
@@ -138,7 +145,8 @@ def summarize_text(
def split_text(
text: str,
for_model: str = CFG.fast_llm_model,
for_model: str,
config: Config,
with_overlap=True,
max_chunk_length: Optional[int] = None,
):
@@ -147,7 +155,9 @@ def split_text(
Args:
text (str): The text to split
for_model (str): The model to chunk for; determines tokenizer and constraints
max_length (int, optional): The maximum length of each chunk
config (Config): The config object
with_overlap (bool, optional): Whether to allow overlap between chunks
max_chunk_length (int, optional): The maximum length of a chunk
Yields:
str: The next chunk of text
@@ -155,6 +165,7 @@ def split_text(
Raises:
ValueError: when a sentence is longer than the maximum length
"""
max_length = _max_chunk_length(for_model, max_chunk_length)
# flatten paragraphs to improve performance
@@ -168,7 +179,7 @@ def split_text(
n_chunks = ceil(text_length / max_length)
target_chunk_length = ceil(text_length / n_chunks)
nlp: spacy.language.Language = spacy.load(CFG.browse_spacy_language_model)
nlp: spacy.language.Language = spacy.load(config.browse_spacy_language_model)
nlp.add_pipe("sentencizer")
doc = nlp(text)
sentences = [sentence.text.strip() for sentence in doc.sents]

View File

@@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from autogpt.json_utils.utilities import llm_response_schema
if TYPE_CHECKING:
from autogpt.commands.command import CommandRegistry
from autogpt.models.command_registry import CommandRegistry
class PromptGenerator:

View File

@@ -9,12 +9,10 @@ from autogpt.prompts.generator import PromptGenerator
from autogpt.setup import prompt_user
from autogpt.utils import clean_input
CFG = Config()
DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:"
def build_default_prompt_generator() -> PromptGenerator:
def build_default_prompt_generator(config: Config) -> PromptGenerator:
"""
This function generates a prompt string that includes various constraints,
commands, resources, and performance evaluations.
@@ -27,7 +25,7 @@ def build_default_prompt_generator() -> PromptGenerator:
prompt_generator = PromptGenerator()
# Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
prompt_config = PromptConfig(CFG.prompt_settings_file)
prompt_config = PromptConfig(config.prompt_settings_file)
# Add constraints to the PromptGenerator object
for constraint in prompt_config.constraints:
@@ -44,70 +42,71 @@ def build_default_prompt_generator() -> PromptGenerator:
return prompt_generator
def construct_main_ai_config() -> AIConfig:
def construct_main_ai_config(config: Config) -> AIConfig:
"""Construct the prompt for the AI to respond to
Returns:
str: The prompt string
"""
config = AIConfig.load(CFG.ai_settings_file)
if CFG.skip_reprompt and config.ai_name:
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
ai_config = AIConfig.load(config.ai_settings_file)
if config.skip_reprompt and ai_config.ai_name:
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
logger.typewriter_log(
"API Budget:",
Fore.GREEN,
"infinite" if config.api_budget <= 0 else f"${config.api_budget}",
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
)
elif config.ai_name:
elif ai_config.ai_name:
logger.typewriter_log(
"Welcome back! ",
Fore.GREEN,
f"Would you like me to return to being {config.ai_name}?",
f"Would you like me to return to being {ai_config.ai_name}?",
speak_text=True,
)
should_continue = clean_input(
config,
f"""Continue with the last settings?
Name: {config.ai_name}
Role: {config.ai_role}
Goals: {config.ai_goals}
API Budget: {"infinite" if config.api_budget <= 0 else f"${config.api_budget}"}
Continue ({CFG.authorise_key}/{CFG.exit_key}): """
Name: {ai_config.ai_name}
Role: {ai_config.ai_role}
Goals: {ai_config.ai_goals}
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
Continue ({config.authorise_key}/{config.exit_key}): """,
)
if should_continue.lower() == CFG.exit_key:
config = AIConfig()
if should_continue.lower() == config.exit_key:
ai_config = AIConfig()
if not config.ai_name:
config = prompt_user()
config.save(CFG.ai_settings_file)
if not ai_config.ai_name:
ai_config = prompt_user(config)
ai_config.save(config.ai_settings_file)
if CFG.restrict_to_workspace:
if config.restrict_to_workspace:
logger.typewriter_log(
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
Fore.YELLOW,
f"{CFG.workspace_path}",
f"{config.workspace_path}",
)
# set the total api budget
api_manager = ApiManager()
api_manager.set_total_budget(config.api_budget)
api_manager.set_total_budget(ai_config.api_budget)
# Agent Created, print message
logger.typewriter_log(
config.ai_name,
ai_config.ai_name,
Fore.LIGHTBLUE_EX,
"has been created with the following details:",
speak_text=True,
)
# Print the ai config details
# Print the ai_config details
# Name
logger.typewriter_log("Name:", Fore.GREEN, config.ai_name, speak_text=False)
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
# Role
logger.typewriter_log("Role:", Fore.GREEN, config.ai_role, speak_text=False)
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
# Goals
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
for goal in config.ai_goals:
for goal in ai_config.ai_goals:
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
return config
return ai_config

View File

@@ -16,10 +16,8 @@ from autogpt.prompts.default_prompts import (
DEFAULT_USER_DESIRE_PROMPT,
)
CFG = Config()
def prompt_user() -> AIConfig:
def prompt_user(config: Config) -> AIConfig:
"""Prompt the user for input
Returns:
@@ -45,7 +43,7 @@ def prompt_user() -> AIConfig:
)
user_desire = utils.clean_input(
f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
)
if user_desire == "":
@@ -58,11 +56,11 @@ def prompt_user() -> AIConfig:
Fore.GREEN,
speak_text=True,
)
return generate_aiconfig_manual()
return generate_aiconfig_manual(config)
else:
try:
return generate_aiconfig_automatic(user_desire)
return generate_aiconfig_automatic(user_desire, config)
except Exception as e:
logger.typewriter_log(
"Unable to automatically generate AI Config based on user desire.",
@@ -71,10 +69,10 @@ def prompt_user() -> AIConfig:
speak_text=True,
)
return generate_aiconfig_manual()
return generate_aiconfig_manual(config)
def generate_aiconfig_manual() -> AIConfig:
def generate_aiconfig_manual(config: Config) -> AIConfig:
"""
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
@@ -99,7 +97,7 @@ def generate_aiconfig_manual() -> AIConfig:
logger.typewriter_log(
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
)
ai_name = utils.clean_input("AI Name: ")
ai_name = utils.clean_input(config, "AI Name: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
@@ -114,7 +112,7 @@ def generate_aiconfig_manual() -> AIConfig:
"For example, 'an AI designed to autonomously develop and run businesses with"
" the sole goal of increasing your net worth.'",
)
ai_role = utils.clean_input(f"{ai_name} is: ")
ai_role = utils.clean_input(config, f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the"
" sole goal of increasing your net worth."
@@ -129,7 +127,9 @@ def generate_aiconfig_manual() -> AIConfig:
logger.info("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
ai_goal = utils.clean_input(
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
)
if ai_goal == "":
break
ai_goals.append(ai_goal)
@@ -148,7 +148,7 @@ def generate_aiconfig_manual() -> AIConfig:
)
logger.info("Enter nothing to let the AI run without monetary limit")
api_budget_input = utils.clean_input(
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
config, f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
)
if api_budget_input == "":
api_budget = 0.0
@@ -164,7 +164,7 @@ def generate_aiconfig_manual() -> AIConfig:
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
def generate_aiconfig_automatic(user_prompt) -> AIConfig:
def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
"""Generates an AIConfig object from the given string.
Returns:
@@ -178,12 +178,13 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig:
# Call LLM with the string as user input
output = create_chat_completion(
ChatSequence.for_model(
CFG.fast_llm_model,
config.fast_llm_model,
[
Message("system", system_prompt),
Message("user", prompt_ai_config_automatic),
],
)
),
config,
)
# Debug LLM Output

View File

@@ -3,6 +3,7 @@ import abc
import re
from threading import Lock
from autogpt.config import Config
from autogpt.singleton import AbstractSingleton
@@ -11,7 +12,7 @@ class VoiceBase(AbstractSingleton):
Base class for all voice classes.
"""
def __init__(self):
def __init__(self, config: Config):
"""
Initialize the voice class.
"""
@@ -20,7 +21,7 @@ class VoiceBase(AbstractSingleton):
self._api_key = None
self._voices = []
self._mutex = Lock()
self._setup()
self._setup(config)
def say(self, text: str, voice_index: int = 0) -> bool:
"""

View File

@@ -13,14 +13,13 @@ PLACEHOLDERS = {"your-voice-id"}
class ElevenLabsSpeech(VoiceBase):
"""ElevenLabs speech class"""
def _setup(self) -> None:
def _setup(self, config: Config) -> None:
"""Set up the voices, API key, etc.
Returns:
None: None
"""
cfg = Config()
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
voice_options = {
"Rachel": "21m00Tcm4TlvDq8ikWAM",
@@ -35,15 +34,15 @@ class ElevenLabsSpeech(VoiceBase):
}
self._headers = {
"Content-Type": "application/json",
"xi-api-key": cfg.elevenlabs_api_key,
"xi-api-key": config.elevenlabs_api_key,
}
self._voices = default_voices.copy()
if cfg.elevenlabs_voice_id in voice_options:
cfg.elevenlabs_voice_id = voice_options[cfg.elevenlabs_voice_id]
if cfg.elevenlabs_voice_2_id in voice_options:
cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id]
self._use_custom_voice(cfg.elevenlabs_voice_id, 0)
self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
if config.elevenlabs_voice_id in voice_options:
config.elevenlabs_voice_id = voice_options[config.elevenlabs_voice_id]
if config.elevenlabs_voice_2_id in voice_options:
config.elevenlabs_voice_2_id = voice_options[config.elevenlabs_voice_2_id]
self._use_custom_voice(config.elevenlabs_voice_id, 0)
self._use_custom_voice(config.elevenlabs_voice_2_id, 1)
def _use_custom_voice(self, voice, voice_index) -> None:
"""Use a custom voice if provided and not a placeholder

View File

@@ -14,10 +14,9 @@ _QUEUE_SEMAPHORE = Semaphore(
) # The amount of sounds to queue before blocking the main thread
def say_text(text: str, voice_index: int = 0) -> None:
def say_text(text: str, config: Config, voice_index: int = 0) -> None:
"""Speak the given text using the given voice index"""
cfg = Config()
default_voice_engine, voice_engine = _get_voice_engine(cfg)
default_voice_engine, voice_engine = _get_voice_engine(config)
def speak() -> None:
success = voice_engine.say(text, voice_index)
@@ -35,7 +34,7 @@ def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
"""Get the voice engine to use for the given configuration"""
tts_provider = config.text_to_speech_provider
if tts_provider == "elevenlabs":
voice_engine = ElevenLabsSpeech()
voice_engine = ElevenLabsSpeech(config)
elif tts_provider == "macos":
voice_engine = MacOSTTS()
elif tts_provider == "streamelements":

View File

@@ -23,11 +23,10 @@ def batch(iterable, max_batch_length: int, overlap: int = 0):
yield iterable[i : i + max_batch_length]
def clean_input(prompt: str = "", talk=False):
def clean_input(config: Config, prompt: str = "", talk=False):
try:
cfg = Config()
if cfg.chat_messages_enabled:
for plugin in cfg.plugins:
if config.chat_messages_enabled:
for plugin in config.plugins:
if not hasattr(plugin, "can_handle_user_input"):
continue
if not plugin.can_handle_user_input(user_input=prompt):
@@ -44,14 +43,14 @@ def clean_input(prompt: str = "", talk=False):
"sure",
"alright",
]:
return cfg.authorise_key
return config.authorise_key
elif plugin_response.lower() in [
"no",
"nope",
"n",
"negative",
]:
return cfg.exit_key
return config.exit_key
return plugin_response
# ask for input, default when just pressing Enter is y

View File

@@ -5,7 +5,7 @@ from autogpt.commands.file_operations import ingest_file, list_files
from autogpt.config import Config
from autogpt.memory.vector import VectorMemory, get_memory
cfg = Config()
config = Config()
def configure_logging():
@@ -70,7 +70,9 @@ def main() -> None:
args = parser.parse_args()
# Initialize memory
memory = get_memory(cfg, init=args.init)
memory = get_memory(config)
if args.init:
memory.clear()
logger.debug("Using memory of type: " + memory.__class__.__name__)
if args.file:

View File

@@ -52,7 +52,7 @@ def kubernetes_agent(
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
Config().set_continuous_mode(False)
agent_test_config.set_continuous_mode(False)
agent = Agent(
# We also give the AI a name
ai_name="Kubernetes-Demo",

View File

@@ -10,7 +10,7 @@ Configuration is controlled through the `Config` object. You can set configurati
- `BROWSE_CHUNK_MAX_LENGTH`: When browsing website, define the length of chunks to summarize. Default: 3000
- `BROWSE_SPACY_LANGUAGE_MODEL`: [spaCy language model](https://spacy.io/usage/models) to use when creating chunks. Default: en_core_web_sm
- `CHAT_MESSAGES_ENABLED`: Enable chat messages. Optional
- `DISABLED_COMMAND_CATEGORIES`: Command categories to disable. Command categories are Python module names, e.g. autogpt.commands.analyze_code. See the directory `autogpt/commands` in the source for all command modules. Default: None
- `DISABLED_COMMAND_CATEGORIES`: Command categories to disable. Command categories are Python module names, e.g. autogpt.commands.execute_code. See the directory `autogpt/commands` in the source for all command modules. Default: None
- `ELEVENLABS_API_KEY`: ElevenLabs API Key. Optional.
- `ELEVENLABS_VOICE_ID`: ElevenLabs Voice ID. Optional.
- `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: text-embedding-ada-002
@@ -50,4 +50,4 @@ Configuration is controlled through the `Config` object. You can set configurati
- `USER_AGENT`: User-Agent given when browsing websites. Default: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
- `USE_AZURE`: Use Azure's LLM Default: False
- `USE_WEB_BROWSER`: Which web browser to use. Options are `chrome`, `firefox`, `safari` or `edge` Default: chrome
- `WIPE_REDIS_ON_START`: Wipes data / index on start. Default: True
- `WIPE_REDIS_ON_START`: Wipes data / index on start. Default: True

View File

@@ -104,5 +104,5 @@ If you want to selectively disable some command groups, you can use the `DISABLE
For example, to disable coding related features, set it to the value below:
```ini
DISABLED_COMMAND_CATEGORIES=autogpt.commands.analyze_code,autogpt.commands.execute_code,autogpt.commands.git_operations,autogpt.commands.improve_code,autogpt.commands.write_tests
DISABLED_COMMAND_CATEGORIES=autogpt.commands.execute_code
```

View File

@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "agpt"
version = "0.4.1"
version = "0.4.2"
authors = [
{ name="Torantulino", email="support@agpt.co" },
]

View File

@@ -2,7 +2,7 @@
"basic_abilities": {
"browse_website": {
"max_level": 1,
"max_level_beaten": 1
"max_level_beaten": null
},
"write_file": {
"max_level": 2,

View File

@@ -7,12 +7,12 @@ import yaml
from pytest_mock import MockerFixture
from autogpt.agent.agent import Agent
from autogpt.commands.command import CommandRegistry
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import TypingConsoleHandler
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
@@ -94,9 +94,10 @@ def agent(config: Config, workspace: Workspace) -> Agent:
ai_config.command_registry = command_registry
config.set_memory_backend("json_file")
memory_json_file = get_memory(config, init=True)
memory_json_file = get_memory(config)
memory_json_file.clear()
system_prompt = ai_config.construct_full_prompt()
system_prompt = ai_config.construct_full_prompt(config)
return Agent(
ai_name=ai_config.ai_name,

View File

@@ -1,10 +1,10 @@
import pytest
from autogpt.agent import Agent
from autogpt.commands.command import CommandRegistry
from autogpt.config import AIConfig, Config
from autogpt.main import COMMAND_CATEGORIES
from autogpt.memory.vector import NoMemory, get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
@@ -28,7 +28,9 @@ def memory_json_file(agent_test_config: Config):
was_memory_backend = agent_test_config.memory_backend
agent_test_config.set_memory_backend("json_file")
yield get_memory(agent_test_config, init=True)
memory = get_memory(agent_test_config)
memory.clear()
yield memory
agent_test_config.set_memory_backend(was_memory_backend)
@@ -53,7 +55,7 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace
)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
system_prompt = ai_config.construct_full_prompt(agent_test_config)
agent = Agent(
ai_name="",
@@ -89,8 +91,8 @@ def file_system_agents(
ai_goals=[ai_goal],
)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
Config().set_continuous_mode(False)
system_prompt = ai_config.construct_full_prompt(agent_test_config)
agent_test_config.set_continuous_mode(False)
agents.append(
Agent(
ai_name="File System Agent",
@@ -121,7 +123,7 @@ def memory_management_agent(agent_test_config, memory_json_file, workspace: Work
)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
system_prompt = ai_config.construct_full_prompt(agent_test_config)
agent = Agent(
ai_name="Follow-Instructions-GPT",
@@ -157,8 +159,8 @@ def information_retrieval_agents(
ai_goals=[ai_goal],
)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
Config().set_continuous_mode(False)
system_prompt = ai_config.construct_full_prompt(agent_test_config)
agent_test_config.set_continuous_mode(False)
agents.append(
Agent(
ai_name="Information Retrieval Agent",
@@ -193,8 +195,8 @@ def kubernetes_agent(
)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
Config().set_continuous_mode(False)
system_prompt = ai_config.construct_full_prompt(agent_test_config)
agent_test_config.set_continuous_mode(False)
agent = Agent(
ai_name="Kubernetes-Demo",
memory=memory_json_file,
@@ -226,8 +228,8 @@ def get_nobel_prize_agent(agent_test_config, memory_json_file, workspace: Worksp
)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
Config().set_continuous_mode(False)
system_prompt = ai_config.construct_full_prompt(agent_test_config)
agent_test_config.set_continuous_mode(False)
agent = Agent(
ai_name="Get-PhysicsNobelPrize",
@@ -252,7 +254,7 @@ def debug_code_agents(agent_test_config, memory_json_file, workspace: Workspace)
"1- Run test.py using the execute_python_file command.",
"2- Read code.py using the read_file command.",
"3- Modify code.py using the write_to_file command."
"Repeat step 1, 2 and 3 until test.py runs without errors.",
"Repeat step 1, 2 and 3 until test.py runs without errors. Do not modify the test.py file.",
],
[
"1- Run test.py.",
@@ -271,8 +273,8 @@ def debug_code_agents(agent_test_config, memory_json_file, workspace: Workspace)
)
command_registry = get_command_registry(agent_test_config)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
Config().set_continuous_mode(False)
system_prompt = ai_config.construct_full_prompt(agent_test_config)
agent_test_config.set_continuous_mode(False)
agents.append(
Agent(
ai_name="Debug Code Agent",

View File

@@ -34,7 +34,9 @@ def test_json_memory_init_with_backing_empty_file(config: Config, workspace: Wor
assert index_file.read_text() == "[]"
def test_json_memory_init_with_backing_file(config: Config, workspace: Workspace):
def test_json_memory_init_with_backing_invalid_file(
config: Config, workspace: Workspace
):
index_file = workspace.root / f"{config.memory_index}.json"
index_file.touch()
@@ -69,33 +71,58 @@ def test_json_memory_clear(config: Config, memory_item: MemoryItem):
def test_json_memory_get(config: Config, memory_item: MemoryItem, mock_get_embedding):
index = JSONFileMemory(config)
assert (
index.get("test") == None
index.get("test", config) == None
), "Cannot test get() because initial index is not empty"
index.add(memory_item)
retrieved = index.get("test")
retrieved = index.get("test", config)
assert retrieved is not None
assert retrieved.memory_item == memory_item
def test_json_memory_load_index(config: Config, memory_item: MemoryItem):
index = JSONFileMemory(config)
index.add(memory_item)
try:
assert index.file_path.exists(), "index was not saved to file"
assert len(index) == 1, f"index constains {len(index)} items instead of 1"
assert index.memories[0] == memory_item, "item in index != added mock item"
except AssertionError as e:
raise ValueError(f"Setting up for load_index test failed: {e}")
index.memories = []
index.load_index()
assert len(index) == 1
assert index.memories[0] == memory_item
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_json_memory_get_relevant(config: Config, patched_api_requestor: None) -> None:
index = JSONFileMemory(config)
mem1 = MemoryItem.from_text_file("Sample text", "sample.txt")
mem2 = MemoryItem.from_text_file("Grocery list:\n- Pancake mix", "groceries.txt")
mem3 = MemoryItem.from_text_file("What is your favorite color?", "color.txt")
mem1 = MemoryItem.from_text_file("Sample text", "sample.txt", config)
mem2 = MemoryItem.from_text_file(
"Grocery list:\n- Pancake mix", "groceries.txt", config
)
mem3 = MemoryItem.from_text_file(
"What is your favorite color?", "color.txt", config
)
lipsum = "Lorem ipsum dolor sit amet"
mem4 = MemoryItem.from_text_file(" ".join([lipsum] * 100), "lipsum.txt")
mem4 = MemoryItem.from_text_file(" ".join([lipsum] * 100), "lipsum.txt", config)
index.add(mem1)
index.add(mem2)
index.add(mem3)
index.add(mem4)
assert index.get_relevant(mem1.raw_content, 1)[0].memory_item == mem1
assert index.get_relevant(mem2.raw_content, 1)[0].memory_item == mem2
assert index.get_relevant(mem3.raw_content, 1)[0].memory_item == mem3
assert [mr.memory_item for mr in index.get_relevant(lipsum, 2)] == [mem4, mem1]
assert index.get_relevant(mem1.raw_content, 1, config)[0].memory_item == mem1
assert index.get_relevant(mem2.raw_content, 1, config)[0].memory_item == mem2
assert index.get_relevant(mem3.raw_content, 1, config)[0].memory_item == mem3
assert [mr.memory_item for mr in index.get_relevant(lipsum, 2, config)] == [
mem4,
mem1,
]
def test_json_memory_get_stats(config: Config, memory_item: MemoryItem) -> None:

View File

@@ -0,0 +1,54 @@
from unittest.mock import MagicMock, patch
import pytest
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.providers import openai
api_manager = ApiManager()
@pytest.fixture(autouse=True)
def reset_api_manager():
api_manager.reset()
yield
class TestProviderOpenAI:
@staticmethod
def test_create_chat_completion_debug_mode(caplog):
"""Test if debug mode logs response."""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
]
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 10
mock_response.usage.completion_tokens = 20
mock_create.return_value = mock_response
openai.create_chat_completion(messages, model=model)
assert "Response" in caplog.text
@staticmethod
def test_create_chat_completion_empty_messages():
"""Test if empty messages result in zero tokens and cost."""
messages = []
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 0
mock_response.usage.completion_tokens = 0
mock_create.return_value = mock_response
openai.create_chat_completion(messages, model=model)
assert api_manager.get_total_prompt_tokens() == 0
assert api_manager.get_total_completion_tokens() == 0
assert api_manager.get_total_cost() == 0

View File

@@ -9,10 +9,10 @@ from tests.utils import requires_api_key
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_generate_aiconfig_automatic_default(patched_api_requestor):
def test_generate_aiconfig_automatic_default(patched_api_requestor, config):
user_inputs = [""]
with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user()
ai_config = prompt_user(config)
assert isinstance(ai_config, AIConfig)
assert ai_config.ai_name is not None
@@ -22,9 +22,9 @@ def test_generate_aiconfig_automatic_default(patched_api_requestor):
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_generate_aiconfig_automatic_typical(patched_api_requestor):
def test_generate_aiconfig_automatic_typical(patched_api_requestor, config):
user_prompt = "Help me create a rock opera about cybernetic giraffes"
ai_config = generate_aiconfig_automatic(user_prompt)
ai_config = generate_aiconfig_automatic(user_prompt, config)
assert isinstance(ai_config, AIConfig)
assert ai_config.ai_name is not None
@@ -34,7 +34,7 @@ def test_generate_aiconfig_automatic_typical(patched_api_requestor):
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
def test_generate_aiconfig_automatic_fallback(patched_api_requestor, config):
user_inputs = [
"T&GF£OIBECC()!*",
"Chef-GPT",
@@ -45,7 +45,7 @@ def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
"",
]
with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user()
ai_config = prompt_user(config)
assert isinstance(ai_config, AIConfig)
assert ai_config.ai_name == "Chef-GPT"
@@ -55,7 +55,7 @@ def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_prompt_user_manual_mode(patched_api_requestor):
def test_prompt_user_manual_mode(patched_api_requestor, config):
user_inputs = [
"--manual",
"Chef-GPT",
@@ -66,7 +66,7 @@ def test_prompt_user_manual_mode(patched_api_requestor):
"",
]
with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user()
ai_config = prompt_user(config)
assert isinstance(ai_config, AIConfig)
assert ai_config.ai_name == "Chef-GPT"

View File

@@ -1,8 +1,13 @@
from autogpt.commands.command import command
from autogpt.command_decorator import command
@command(
"function_based", "Function-based test command", "(arg1: int, arg2: str) -> str"
"function_based",
"Function-based test command",
{
"arg1": {"type": "int", "description": "arg 1", "required": True},
"arg2": {"type": "str", "description": "arg 2", "required": True},
},
)
def function_based(arg1: int, arg2: str) -> str:
"""A function-based test command that returns a string with the two arguments separated by a dash."""

View File

@@ -5,9 +5,9 @@ from autogpt.llm.chat import create_chat_completion
@pytest.fixture
def agent_manager():
def agent_manager(config):
# Hack, real gross. Singletons are not good times.
yield AgentManager()
yield AgentManager(config)
del AgentManager._instances[AgentManager]

View File

@@ -19,10 +19,10 @@ ai_name: McFamished
ai_role: A hungry AI
api_budget: 0.0
"""
config_file = tmp_path / "ai_settings.yaml"
config_file.write_text(yaml_content)
ai_settings_file = tmp_path / "ai_settings.yaml"
ai_settings_file.write_text(yaml_content)
ai_config = AIConfig.load(config_file)
ai_config = AIConfig.load(ai_settings_file)
assert len(ai_config.ai_goals) == 4
assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich"
@@ -30,8 +30,8 @@ api_budget: 0.0
assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep"
assert ai_config.ai_goals[3] == "Goal 4: Wake up"
config_file.write_text("")
ai_config.save(config_file)
ai_settings_file.write_text("")
ai_config.save(ai_settings_file)
yaml_content2 = """ai_goals:
- 'Goal 1: Make a sandwich'
@@ -42,15 +42,15 @@ ai_name: McFamished
ai_role: A hungry AI
api_budget: 0.0
"""
assert config_file.read_text() == yaml_content2
assert ai_settings_file.read_text() == yaml_content2
def test_ai_config_file_not_exists(workspace):
"""Test if file does not exist."""
config_file = workspace.get_path("ai_settings.yaml")
ai_settings_file = workspace.get_path("ai_settings.yaml")
ai_config = AIConfig.load(str(config_file))
ai_config = AIConfig.load(str(ai_settings_file))
assert ai_config.ai_name == ""
assert ai_config.ai_role == ""
assert ai_config.ai_goals == []
@@ -62,10 +62,10 @@ def test_ai_config_file_not_exists(workspace):
def test_ai_config_file_is_empty(workspace):
"""Test if file does not exist."""
config_file = workspace.get_path("ai_settings.yaml")
config_file.write_text("")
ai_settings_file = workspace.get_path("ai_settings.yaml")
ai_settings_file.write_text("")
ai_config = AIConfig.load(str(config_file))
ai_config = AIConfig.load(str(ai_settings_file))
assert ai_config.ai_name == ""
assert ai_config.ai_role == ""
assert ai_config.ai_goals == []

View File

@@ -1,9 +1,10 @@
from unittest.mock import MagicMock, patch
from unittest.mock import patch
import pytest
from pytest_mock import MockerFixture
from autogpt.llm.api_manager import OPEN_AI_MODELS, ApiManager
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, OPEN_AI_EMBEDDING_MODELS
api_manager = ApiManager()
@@ -17,80 +18,18 @@ def reset_api_manager():
@pytest.fixture(autouse=True)
def mock_costs(mocker: MockerFixture):
mocker.patch.multiple(
OPEN_AI_MODELS["gpt-3.5-turbo"],
OPEN_AI_CHAT_MODELS["gpt-3.5-turbo"],
prompt_token_cost=0.0013,
completion_token_cost=0.0025,
)
mocker.patch.multiple(
OPEN_AI_MODELS["text-embedding-ada-002"],
OPEN_AI_EMBEDDING_MODELS["text-embedding-ada-002"],
prompt_token_cost=0.0004,
)
yield
class TestApiManager:
@staticmethod
def test_create_chat_completion_debug_mode(caplog):
"""Test if debug mode logs response."""
api_manager_debug = ApiManager(debug=True)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
]
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 10
mock_response.usage.completion_tokens = 20
mock_create.return_value = mock_response
api_manager_debug.create_chat_completion(messages, model=model)
assert "Response" in caplog.text
@staticmethod
def test_create_chat_completion_empty_messages():
"""Test if empty messages result in zero tokens and cost."""
messages = []
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 0
mock_response.usage.completion_tokens = 0
mock_create.return_value = mock_response
api_manager.create_chat_completion(messages, model=model)
assert api_manager.get_total_prompt_tokens() == 0
assert api_manager.get_total_completion_tokens() == 0
assert api_manager.get_total_cost() == 0
@staticmethod
def test_create_chat_completion_valid_inputs():
"""Test if valid inputs result in correct tokens and cost."""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
]
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 10
mock_response.usage.completion_tokens = 20
mock_create.return_value = mock_response
api_manager.create_chat_completion(messages, model=model)
assert api_manager.get_total_prompt_tokens() == 10
assert api_manager.get_total_completion_tokens() == 20
assert api_manager.get_total_cost() == (10 * 0.0013 + 20 * 0.0025) / 1000
def test_getter_methods(self):
"""Test the getter methods for total tokens, cost, and budget."""
api_manager.update_cost(600, 1200, "gpt-3.5-turbo")

View File

@@ -1,119 +0,0 @@
# Generated by CodiumAI
# Dependencies:
# pip install pytest-mock
from autogpt.agent.agent import Agent
from autogpt.commands.web_requests import scrape_links
"""
Code Analysis
Objective:
The objective of the 'scrape_links' function is to scrape hyperlinks from a
given URL and return them in a formatted way.
Inputs:
- url: a string representing the URL to be scraped.
Flow:
1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
2. Check if the response contains an HTTP error. If it does, return "error".
3. Parse the HTML content of the response using the BeautifulSoup library.
4. Remove any script and style tags from the parsed HTML.
5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function.
6. Format the extracted hyperlinks using the 'format_hyperlinks' function.
7. Return the formatted hyperlinks.
Outputs:
- A list of formatted hyperlinks.
Additional aspects:
- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP
requests and parse HTML content, respectively.
- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML.
- The 'format_hyperlinks' function is called to format the extracted hyperlinks.
- The function checks for HTTP errors and returns "error" if any are found.
"""
class TestScrapeLinks:
"""
Tests that the function returns a list of formatted hyperlinks when
provided with a valid url that returns a webpage with hyperlinks.
"""
def test_valid_url_with_hyperlinks(self, agent: Agent):
url = "https://www.google.com"
result = scrape_links(url, agent=agent)
assert len(result) > 0
assert isinstance(result, list)
assert isinstance(result[0], str)
def test_valid_url(self, mocker, agent: Agent):
"""Test that the function returns correctly formatted hyperlinks when given a valid url."""
# Mock the requests.get() function to return a response with sample HTML containing hyperlinks
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.text = (
"<html><body><a href='https://www.google.com'>Google</a></body></html>"
)
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function with a valid URL
result = scrape_links("https://www.example.com", agent)
# Assert that the function returns correctly formatted hyperlinks
assert result == ["Google (https://www.google.com)"]
def test_invalid_url(self, mocker, agent: Agent):
"""Test that the function returns "error" when given an invalid url."""
# Mock the requests.get() function to return an HTTP error response
mock_response = mocker.Mock()
mock_response.status_code = 404
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function with an invalid URL
result = scrape_links("https://www.invalidurl.com", agent)
# Assert that the function returns "error"
assert "Error:" in result
def test_no_hyperlinks(self, mocker, agent: Agent):
"""Test that the function returns an empty list when the html contains no hyperlinks."""
# Mock the requests.get() function to return a response with sample HTML containing no hyperlinks
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.text = "<html><body><p>No hyperlinks here</p></body></html>"
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function with a URL containing no hyperlinks
result = scrape_links("https://www.example.com", agent)
# Assert that the function returns an empty list
assert result == []
def test_scrape_links_with_few_hyperlinks(self, mocker, agent: Agent):
"""Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks."""
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.text = """
<html>
<body>
<div id="google-link"><a href="https://www.google.com">Google</a></div>
<div id="github"><a href="https://github.com">GitHub</a></div>
<div id="CodiumAI"><a href="https://www.codium.ai">CodiumAI</a></div>
</body>
</html>
"""
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function being tested
result = scrape_links("https://www.example.com", agent)
# Assert that the function returns a list of formatted hyperlinks
assert isinstance(result, list)
assert len(result) == 3
assert result[0] == "Google (https://www.google.com)"
assert result[1] == "GitHub (https://github.com)"
assert result[2] == "CodiumAI (https://www.codium.ai)"

View File

@@ -1,117 +0,0 @@
# Generated by CodiumAI
import pytest
import requests
from autogpt.agent.agent import Agent
from autogpt.commands.web_requests import scrape_text
"""
Code Analysis
Objective:
The objective of the "scrape_text" function is to scrape the text content from
a given URL and return it as a string, after removing any unwanted HTML tags and
scripts.
Inputs:
- url: a string representing the URL of the webpage to be scraped.
Flow:
1. Send a GET request to the given URL using the requests library and the user agent
header from the config file.
2. Check if the response contains an HTTP error. If it does, return an error message.
3. Use BeautifulSoup to parse the HTML content of the response and extract all script
and style tags.
4. Get the text content of the remaining HTML using the get_text() method of
BeautifulSoup.
5. Split the text into lines and then into chunks, removing any extra whitespace.
6. Join the chunks into a single string with newline characters between them.
7. Return the cleaned text.
Outputs:
- A string representing the cleaned text content of the webpage.
Additional aspects:
- The function uses the requests library and BeautifulSoup to handle the HTTP request
and HTML parsing, respectively.
- The function removes script and style tags from the HTML to avoid including unwanted
content in the text output.
- The function uses a generator expression to split the text into lines and chunks,
which can improve performance for large amounts of text.
"""
class TestScrapeText:
def test_scrape_text_with_valid_url(self, mocker, agent: Agent):
"""Tests that scrape_text() returns the expected text when given a valid URL."""
# Mock the requests.get() method to return a response with expected text
expected_text = "This is some sample text"
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.text = (
"<html><body><div><p style='color: blue;'>"
f"{expected_text}</p></div></body></html>"
)
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function with a valid URL and assert that it returns the
# expected text
url = "http://www.example.com"
assert scrape_text(url, agent) == expected_text
def test_invalid_url(self, agent: Agent):
"""Tests that an error is raised when an invalid url is provided."""
url = "invalidurl.com"
pytest.raises(ValueError, scrape_text, url, agent)
def test_unreachable_url(self, mocker, agent: Agent):
"""Test that scrape_text returns an error message when an invalid or unreachable url is provided."""
# Mock the requests.get() method to raise an exception
mocker.patch(
"requests.Session.get", side_effect=requests.exceptions.RequestException
)
# Call the function with an invalid URL and assert that it returns an error
# message
url = "http://thiswebsitedoesnotexist.net/"
error_message = scrape_text(url, agent)
assert "Error:" in error_message
def test_no_text(self, mocker, agent: Agent):
"""Test that scrape_text returns an empty string when the html page contains no text to be scraped."""
# Mock the requests.get() method to return a response with no text
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.text = "<html><body></body></html>"
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function with a valid URL and assert that it returns an empty string
url = "http://www.example.com"
assert scrape_text(url, agent) == ""
def test_http_error(self, mocker, agent: Agent):
"""Test that scrape_text returns an error message when the response status code is an http error (>=400)."""
# Mock the requests.get() method to return a response with a 404 status code
mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
# Call the function with a URL
result = scrape_text("https://www.example.com", agent)
# Check that the function returns an error message
assert result == "Error: HTTP 404 error"
def test_scrape_text_with_html_tags(self, mocker, agent: Agent):
"""Test that scrape_text() properly handles HTML tags."""
# Create a mock response object with HTML containing tags
html = "<html><body><p>This is <b>bold</b> text.</p></body></html>"
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.text = html
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function with a URL
result = scrape_text("https://www.example.com", agent)
# Check that the function properly handles HTML tags
assert result == "This is bold text."

View File

@@ -5,7 +5,8 @@ from pathlib import Path
import pytest
from autogpt.commands.command import Command, CommandRegistry
from autogpt.models.command import Command
from autogpt.models.command_registry import CommandRegistry
SIGNATURE = "(arg1: int, arg2: str) -> str"
@@ -40,6 +41,13 @@ class TestCommand:
name="example",
description="Example command",
method=self.example_command_method,
signature={
"prompt": {
"type": "string",
"description": "The prompt used to generate the image",
"required": True,
},
},
)
result = cmd(arg1=1, arg2="test")
assert result == "1 - test"

View File

@@ -1,5 +1,5 @@
"""
Test cases for the Config class, which handles the configuration settings
Test cases for the config class, which handles the configuration settings
for the AI and ensures it behaves as a singleton.
"""
from unittest import mock
@@ -7,14 +7,14 @@ from unittest.mock import patch
import pytest
from autogpt.config.config import Config
from autogpt.config import Config
from autogpt.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
from autogpt.workspace.workspace import Workspace
def test_initial_values(config: Config):
"""
Test if the initial values of the Config class attributes are set correctly.
Test if the initial values of the config class attributes are set correctly.
"""
assert config.debug_mode == False
assert config.continuous_mode == False

View File

@@ -13,9 +13,9 @@ from pytest_mock import MockerFixture
import autogpt.commands.file_operations as file_ops
from autogpt.agent.agent import Agent
from autogpt.config import Config
from autogpt.memory.vector.memory_item import MemoryItem
from autogpt.memory.vector.utils import Embedding
from autogpt.utils import readable_file_size
from autogpt.workspace import Workspace
@@ -25,11 +25,13 @@ def file_content():
@pytest.fixture()
def mock_MemoryItem_from_text(mocker: MockerFixture, mock_embedding: Embedding):
def mock_MemoryItem_from_text(
mocker: MockerFixture, mock_embedding: Embedding, config: Config
):
mocker.patch.object(
file_ops.MemoryItem,
"from_text",
new=lambda content, source_type, metadata: MemoryItem(
new=lambda content, source_type, config, metadata: MemoryItem(
raw_content=content,
summary=f"Summary of content '{content}'",
chunk_summaries=[f"Summary of content '{content}'"],
@@ -243,53 +245,6 @@ def test_write_file_succeeds_if_content_different(
assert result == "File written to successfully."
# Update file testing
def test_replace_in_file_all_occurrences(test_file, test_file_path, agent: Agent):
old_content = "This is a test file.\n we test file here\na test is needed"
expected_content = (
"This is a update file.\n we update file here\na update is needed"
)
test_file.write(old_content)
test_file.close()
file_ops.replace_in_file(test_file_path, "test", "update", agent=agent)
with open(test_file_path) as f:
new_content = f.read()
print(new_content)
print(expected_content)
assert new_content == expected_content
def test_replace_in_file_one_occurrence(test_file, test_file_path, agent: Agent):
old_content = "This is a test file.\n we test file here\na test is needed"
expected_content = "This is a test file.\n we update file here\na test is needed"
test_file.write(old_content)
test_file.close()
file_ops.replace_in_file(
test_file_path, "test", "update", agent=agent, occurrence_index=1
)
with open(test_file_path) as f:
new_content = f.read()
assert new_content == expected_content
def test_replace_in_file_multiline_old_text(test_file, test_file_path, agent: Agent):
old_content = "This is a multi_line\ntest for testing\nhow well this function\nworks when the input\nis multi-lined"
expected_content = "This is a multi_line\nfile. succeeded test\nis multi-lined"
test_file.write(old_content)
test_file.close()
file_ops.replace_in_file(
test_file_path,
"\ntest for testing\nhow well this function\nworks when the input\n",
"\nfile. succeeded test\n",
agent=agent,
)
with open(test_file_path) as f:
new_content = f.read()
assert new_content == expected_content
def test_append_to_file(test_nested_file: Path, agent: Agent):
append_text = "This is appended text.\n"
file_ops.write_to_file(test_nested_file, append_text, agent=agent)
@@ -373,26 +328,3 @@ def test_list_files(workspace: Workspace, test_directory: Path, agent: Agent):
non_existent_file = "non_existent_file.txt"
files = file_ops.list_files("", agent=agent)
assert non_existent_file not in files
def test_download_file(workspace: Workspace, agent: Agent):
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz"
local_name = workspace.get_path("auto-gpt.tar.gz")
size = 365023
readable_size = readable_file_size(size)
assert (
file_ops.download_file(url, local_name, agent=agent)
== f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})'
)
assert os.path.isfile(local_name) is True
assert os.path.getsize(local_name) == size
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz"
assert "Got an HTTP Error whilst trying to download file" in file_ops.download_file(
url, local_name, agent=agent
)
url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz"
assert "Failed to establish a new connection:" in file_ops.download_file(
url, local_name, agent=agent
)

View File

@@ -1,62 +0,0 @@
from datetime import datetime
from pytest_mock import MockerFixture
from autogpt.agent.agent import Agent
from autogpt.config import AIConfig
from autogpt.config.config import Config
from autogpt.llm.chat import create_chat_completion
from autogpt.log_cycle.log_cycle import LogCycleHandler
def test_get_self_feedback(config: Config, mocker: MockerFixture):
# Define a sample thoughts dictionary
thoughts = {
"reasoning": "Sample reasoning.",
"plan": "Sample plan.",
"thoughts": "Sample thoughts.",
}
# Define a fake response for the create_chat_completion function
fake_response = (
"The AI Agent has demonstrated a reasonable thought process, but there is room for improvement. "
"For example, the reasoning could be elaborated to better justify the plan, and the plan itself "
"could be more detailed to ensure its effectiveness. In addition, the AI Agent should focus more "
"on its core role and prioritize thoughts that align with that role."
)
# Mock the create_chat_completion function
mock_create_chat_completion = mocker.patch(
"autogpt.agent.agent.create_chat_completion", wraps=create_chat_completion
)
mock_create_chat_completion.return_value = fake_response
# Create a MagicMock object to replace the Agent instance
agent_mock = mocker.MagicMock(spec=Agent)
# Mock the config attribute of the Agent instance
agent_mock.config = config
agent_mock.ai_config = AIConfig()
# Mock the log_cycle_handler attribute of the Agent instance
agent_mock.log_cycle_handler = LogCycleHandler()
# Mock the create_nested_directory method of the LogCycleHandler instance
agent_mock.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
# Mock the cycle_count attribute of the Agent instance
agent_mock.cycle_count = 0
# Call the get_self_feedback method
feedback = Agent.get_self_feedback(
agent_mock,
thoughts,
"gpt-3.5-turbo",
)
# Check if the response is a non-empty string
assert isinstance(feedback, str) and len(feedback) > 0
# Check if certain keywords from input thoughts are present in the feedback response
for keyword in ["reasoning", "plan", "thoughts"]:
assert keyword in feedback

View File

@@ -1,25 +0,0 @@
from unittest.mock import MagicMock
from pytest_mock import MockerFixture
from autogpt.agent.agent import Agent
from autogpt.app import list_agents, start_agent
def test_make_agent(agent: Agent, mocker: MockerFixture) -> None:
"""Test that an agent can be created"""
mock = mocker.patch("openai.ChatCompletion.create")
response = MagicMock()
response.choices[0].message.content = "Test message"
response.usage.prompt_tokens = 1
response.usage.completion_tokens = 1
del response.error
mock.return_value = response
start_agent("Test Agent", "chat", "Hello, how are you?", agent, "gpt-3.5-turbo")
agents = list_agents(agent)
assert "List of agents:\n0: chat" == agents
start_agent("Test Agent 2", "write", "Hello, how are you?", agent, "gpt-3.5-turbo")
agents = list_agents(agent.config)
assert "List of agents:\n0: chat\n1: write" == agents

View File

@@ -38,8 +38,7 @@ def agent(config: Config):
return agent
def test_message_history_batch_summary(mocker, agent):
config = Config()
def test_message_history_batch_summary(mocker, agent, config):
history = MessageHistory(agent)
model = config.fast_llm_model
message_tlength = 0
@@ -114,7 +113,7 @@ def test_message_history_batch_summary(mocker, agent):
history.append(user_input_msg)
# only take the last cycle of the message history, trim the rest of previous messages, and generate a summary for them
for cycle in reversed(list(history.per_cycle())):
for cycle in reversed(list(history.per_cycle(config))):
messages_to_add = [msg for msg in cycle if msg is not None]
message_sequence.insert(insertion_index, *messages_to_add)
break
@@ -127,7 +126,7 @@ def test_message_history_batch_summary(mocker, agent):
# test the main trim_message function
new_summary_message, trimmed_messages = history.trim_messages(
current_message_chain=list(message_sequence),
current_message_chain=list(message_sequence), config=config
)
expected_call_count = math.ceil(

View File

@@ -23,10 +23,10 @@ performance_evaluations:
- Another test performance evaluation
- A third test performance evaluation
"""
config_file = tmp_path / "test_prompt_settings.yaml"
config_file.write_text(yaml_content)
prompt_settings_file = tmp_path / "test_prompt_settings.yaml"
prompt_settings_file.write_text(yaml_content)
prompt_config = PromptConfig(config_file)
prompt_config = PromptConfig(prompt_settings_file)
assert len(prompt_config.constraints) == 3
assert prompt_config.constraints[0] == "A test constraint"

View File

@@ -0,0 +1,110 @@
import pytest
from openai.error import APIError, RateLimitError
from autogpt.llm.providers import openai
@pytest.fixture(params=[RateLimitError, APIError])
def error(request):
if request.param == APIError:
return request.param("Error", http_status=502)
else:
return request.param("Error")
def error_factory(error_instance, error_count, retry_count, warn_user=True):
"""Creates errors"""
class RaisesError:
def __init__(self):
self.count = 0
@openai.retry_api(
num_retries=retry_count, backoff_base=0.001, warn_user=warn_user
)
def __call__(self):
self.count += 1
if self.count <= error_count:
raise error_instance
return self.count
return RaisesError()
def test_retry_open_api_no_error(capsys):
"""Tests the retry functionality with no errors expected"""
@openai.retry_api()
def f():
return 1
result = f()
assert result == 1
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
@pytest.mark.parametrize(
"error_count, retry_count, failure",
[(2, 10, False), (2, 2, False), (10, 2, True), (3, 2, True), (1, 0, True)],
ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"],
)
def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure):
"""Tests the retry with simulated errors [RateLimitError, APIError], but should ulimately pass"""
call_count = min(error_count, retry_count) + 1
raises = error_factory(error, error_count, retry_count)
if failure:
with pytest.raises(type(error)):
raises()
else:
result = raises()
assert result == call_count
assert raises.count == call_count
output = capsys.readouterr()
if error_count and retry_count:
if type(error) == RateLimitError:
assert "Reached rate limit, passing..." in output.out
assert "Please double check" in output.out
if type(error) == APIError:
assert "API Bad gateway" in output.out
else:
assert output.out == ""
def test_retry_open_api_rate_limit_no_warn(capsys):
"""Tests the retry logic with a rate limit error"""
error_count = 2
retry_count = 10
raises = error_factory(RateLimitError, error_count, retry_count, warn_user=False)
result = raises()
call_count = min(error_count, retry_count) + 1
assert result == call_count
assert raises.count == call_count
output = capsys.readouterr()
assert "Reached rate limit, passing..." in output.out
assert "Please double check" not in output.out
def test_retry_openapi_other_api_error(capsys):
"""Tests the Retry logic with a non rate limit error such as HTTP500"""
error_count = 2
retry_count = 10
raises = error_factory(APIError("Error", http_status=500), error_count, retry_count)
with pytest.raises(APIError):
raises()
call_count = 1
assert raises.count == call_count
output = capsys.readouterr()
assert output.out == ""

View File

@@ -4,6 +4,7 @@ from unittest.mock import patch
import pytest
import requests
from autogpt.config import Config
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
from autogpt.utils import (
get_bulletin_from_web,
@@ -185,12 +186,12 @@ def test_get_current_git_branch_failure(mock_repo):
assert branch_name == ""
def test_validate_json_valid(valid_json_response):
assert validate_json(valid_json_response)
def test_validate_json_valid(valid_json_response, config: Config):
assert validate_json(valid_json_response, config)
def test_validate_json_invalid(invalid_json_response):
assert not validate_json(valid_json_response)
def test_validate_json_invalid(invalid_json_response, config: Config):
assert not validate_json(valid_json_response, config)
def test_extract_json_from_response(valid_json_response: dict):

View File

@@ -4,11 +4,7 @@ import pytest
from googleapiclient.errors import HttpError
from autogpt.agent.agent import Agent
from autogpt.commands.google_search import (
google_official_search,
google_search,
safe_google_results,
)
from autogpt.commands.web_search import google, safe_google_results, web_search
@pytest.mark.parametrize(
@@ -45,8 +41,8 @@ def test_google_search(
mock_ddg = mocker.Mock()
mock_ddg.return_value = return_value
mocker.patch("autogpt.commands.google_search.DDGS.text", mock_ddg)
actual_output = google_search(query, agent=agent, num_results=num_results)
mocker.patch("autogpt.commands.web_search.DDGS.text", mock_ddg)
actual_output = web_search(query, agent=agent, num_results=num_results)
expected_output = safe_google_results(expected_output)
assert actual_output == expected_output
@@ -88,7 +84,7 @@ def test_google_official_search(
agent: Agent,
):
mock_googleapiclient.return_value = search_results
actual_output = google_official_search(query, agent=agent, num_results=num_results)
actual_output = google(query, agent=agent, num_results=num_results)
assert actual_output == safe_google_results(expected_output)
@@ -136,5 +132,5 @@ def test_google_official_search_errors(
)
mock_googleapiclient.side_effect = error
actual_output = google_official_search(query, agent=agent, num_results=num_results)
actual_output = google(query, agent=agent, num_results=num_results)
assert actual_output == safe_google_results(expected_output)