mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-24 01:14:22 +01:00
Pass agent to commands instead of config (#4645)
* Add config as attribute to Agent, rename old config to ai_config * Code review: Pass ai_config * Pass agent to commands instead of config * Lint * Fix merge error * Fix memory challenge a --------- Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: merwanehamadi <merwanehamadi@gmail.com>
This commit is contained in:
@@ -4,7 +4,6 @@ from datetime import datetime
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
@@ -89,6 +88,9 @@ class Agent:
|
||||
).max_tokens
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Avoid circular imports
|
||||
from autogpt.app import execute_command, get_command
|
||||
|
||||
# Interaction Loop
|
||||
self.cycle_count = 0
|
||||
command_name = None
|
||||
@@ -287,8 +289,7 @@ class Agent:
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.ai_config.prompt_generator,
|
||||
config=self.config,
|
||||
agent=self,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
import json
|
||||
from typing import Dict, List, Union
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
@@ -87,9 +86,8 @@ def map_command_synonyms(command_name: str):
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
config: Config,
|
||||
arguments: dict[str, str],
|
||||
agent: Agent,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
@@ -105,7 +103,7 @@ def execute_command(
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments, config=config)
|
||||
return cmd(**arguments, agent=agent)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
@@ -113,7 +111,7 @@ def execute_command(
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
for command in prompt.commands:
|
||||
for command in agent.prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
@@ -132,7 +130,7 @@ def execute_command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
def get_text_summary(url: str, question: str, agent: Agent) -> str:
|
||||
"""Get the text summary of a webpage
|
||||
|
||||
Args:
|
||||
@@ -142,7 +140,7 @@ def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url, config)
|
||||
text = scrape_text(url, agent)
|
||||
summary, _ = summarize_text(text, question=question)
|
||||
|
||||
return f""" "Result" : {summary}"""
|
||||
@@ -150,7 +148,7 @@ def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
|
||||
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
def get_hyperlinks(url: str, agent: Agent) -> Union[str, List[str]]:
|
||||
"""Get all hyperlinks on a webpage
|
||||
|
||||
Args:
|
||||
@@ -159,7 +157,7 @@ def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url, config)
|
||||
return scrape_links(url, agent)
|
||||
|
||||
|
||||
@command(
|
||||
@@ -167,7 +165,7 @@ def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -> str:
|
||||
def start_agent(name: str, task: str, prompt: str, agent: Agent, model=None) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
@@ -188,11 +186,11 @@ def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if config.speak_mode:
|
||||
if agent.config.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = agent_manager.create_agent(task, first_message, model)
|
||||
|
||||
if config.speak_mode:
|
||||
if agent.config.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
@@ -202,7 +200,7 @@ def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str, config: Config) -> str:
|
||||
def message_agent(key: str, message: str, agent: Agent) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
@@ -211,13 +209,13 @@ def message_agent(key: str, message: str, config: Config) -> str:
|
||||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if config.speak_mode:
|
||||
if agent.config.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "() -> str")
|
||||
def list_agents(config: Config) -> str:
|
||||
def list_agents(agent: Agent) -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
@@ -229,7 +227,7 @@ def list_agents(config: Config) -> str:
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str, config: Config) -> str:
|
||||
def delete_agent(key: str, agent: Agent) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,21 +1,17 @@
|
||||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"analyze_code",
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str, config: Config) -> list[str]:
|
||||
def analyze_code(code: str, agent: Agent) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
@@ -33,4 +29,6 @@ def analyze_code(code: str, config: Config) -> list[str]:
|
||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(
|
||||
function_string, args, description_string, config=agent.config
|
||||
)
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
@@ -19,7 +15,7 @@ if TYPE_CHECKING:
|
||||
and config.huggingface_api_token,
|
||||
"Configure huggingface_audio_to_text_model and Hugging Face api token.",
|
||||
)
|
||||
def read_audio_from_file(filename: str, config: Config) -> str:
|
||||
def read_audio_from_file(filename: str, agent: Agent) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
@@ -31,10 +27,10 @@ def read_audio_from_file(filename: str, config: Config) -> str:
|
||||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio, config)
|
||||
return read_audio(audio, agent.config)
|
||||
|
||||
|
||||
def read_audio(audio: bytes, config: Config) -> str:
|
||||
def read_audio(audio: bytes, agent: Agent) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
@@ -44,8 +40,8 @@ def read_audio(audio: bytes, config: Config) -> str:
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
if config.audio_to_text_provider == "huggingface":
|
||||
text = read_huggingface_audio(audio, config)
|
||||
if agent.config.audio_to_text_provider == "huggingface":
|
||||
text = read_huggingface_audio(audio, agent.config)
|
||||
if text:
|
||||
return f"The audio says: {text}"
|
||||
else:
|
||||
@@ -54,10 +50,10 @@ def read_audio(audio: bytes, config: Config) -> str:
|
||||
return "Error: No audio to text provider given"
|
||||
|
||||
|
||||
def read_huggingface_audio(audio: bytes, config: Config) -> str:
|
||||
model = config.huggingface_audio_to_text_model
|
||||
def read_huggingface_audio(audio: bytes, agent: Agent) -> str:
|
||||
model = agent.config.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = config.huggingface_api_token
|
||||
api_token = agent.config.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
|
||||
@@ -4,11 +4,11 @@ import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
from confection import Config
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.logs import logger
|
||||
from autogpt.setup import CFG
|
||||
from autogpt.workspace.workspace import Workspace
|
||||
@@ -22,7 +22,7 @@ DENYLIST_CONTROL = "denylist"
|
||||
"Create a Python file and execute it",
|
||||
'"code": "<code>", "basename": "<basename>"',
|
||||
)
|
||||
def execute_python_code(code: str, basename: str, config: Config) -> str:
|
||||
def execute_python_code(code: str, basename: str, agent: Agent) -> str:
|
||||
"""Create and execute a Python file in a Docker container and return the STDOUT of the
|
||||
executed code. If there is any data that needs to be captured use a print statement
|
||||
|
||||
@@ -33,8 +33,8 @@ def execute_python_code(code: str, basename: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The STDOUT captured from the code when it ran
|
||||
"""
|
||||
ai_name = AIConfig.load(config.ai_settings_file).ai_name
|
||||
directory = os.path.join(config.workspace_path, ai_name, "executed_code")
|
||||
ai_name = agent.ai_name
|
||||
directory = os.path.join(agent.config.workspace_path, ai_name, "executed_code")
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
|
||||
if not basename.endswith(".py"):
|
||||
@@ -46,13 +46,13 @@ def execute_python_code(code: str, basename: str, config: Config) -> str:
|
||||
with open(path, "w+", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
return execute_python_file(f.name, config)
|
||||
return execute_python_file(f.name, agent)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str, config: Config) -> str:
|
||||
def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
@@ -68,7 +68,9 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
workspace = Workspace(config.workspace_path, config.restrict_to_workspace)
|
||||
workspace = Workspace(
|
||||
agent.config.workspace_path, agent.config.restrict_to_workspace
|
||||
)
|
||||
|
||||
path = workspace.get_path(filename)
|
||||
if not path.is_file():
|
||||
@@ -116,7 +118,7 @@ def execute_python_file(filename: str, config: Config) -> str:
|
||||
image_name,
|
||||
["python", str(path.relative_to(workspace.root))],
|
||||
volumes={
|
||||
config.workspace_path: {
|
||||
agent.config.workspace_path: {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
@@ -175,7 +177,7 @@ def validate_command(command: str, config: Config) -> bool:
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str, config: Config) -> str:
|
||||
def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
Args:
|
||||
@@ -184,14 +186,14 @@ def execute_shell(command_line: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = Path.cwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(config.workspace_path):
|
||||
os.chdir(config.workspace_path)
|
||||
if not current_dir.is_relative_to(agent.config.workspace_path):
|
||||
os.chdir(agent.config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
@@ -215,7 +217,7 @@ def execute_shell(command_line: str, config: Config) -> str:
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line, config: Config) -> str:
|
||||
def execute_shell_popen(command_line, agent: Agent) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
@@ -225,14 +227,14 @@ def execute_shell_popen(command_line, config: Config) -> str:
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if config.workspace_path not in current_dir:
|
||||
os.chdir(config.workspace_path)
|
||||
if agent.config.workspace_path not in current_dir:
|
||||
os.chdir(agent.config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
|
||||
@@ -5,12 +5,14 @@ import hashlib
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Generator, Literal
|
||||
from typing import Generator, Literal
|
||||
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from confection import Config
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.commands.file_operations_utils import read_textual_file
|
||||
from autogpt.logs import logger
|
||||
@@ -18,10 +20,6 @@ from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
|
||||
|
||||
@@ -103,7 +101,7 @@ def is_duplicate_operation(
|
||||
|
||||
|
||||
def log_operation(
|
||||
operation: str, filename: str, config: Config, checksum: str | None = None
|
||||
operation: str, filename: str, agent: Agent, checksum: str | None = None
|
||||
) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
@@ -116,7 +114,9 @@ def log_operation(
|
||||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(config.file_logger_path, f"{log_entry}\n", config, should_log=False)
|
||||
append_to_file(
|
||||
agent.config.file_logger_path, f"{log_entry}\n", agent, should_log=False
|
||||
)
|
||||
|
||||
|
||||
def split_file(
|
||||
@@ -152,7 +152,7 @@ def split_file(
|
||||
|
||||
|
||||
@command("read_file", "Read a file", '"filename": "<filename>"')
|
||||
def read_file(filename: str, config: Config) -> str:
|
||||
def read_file(filename: str, agent: Agent) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
Args:
|
||||
@@ -201,7 +201,7 @@ def ingest_file(
|
||||
|
||||
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||
def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
Args:
|
||||
@@ -212,14 +212,14 @@ def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, config, checksum):
|
||||
if is_duplicate_operation("write", filename, agent.config, checksum):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename, config, checksum)
|
||||
log_operation("write", filename, agent, checksum)
|
||||
return "File written to successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
@@ -233,7 +233,7 @@ def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||
'"occurrence_index": "<occurrence_index>"',
|
||||
)
|
||||
def replace_in_file(
|
||||
filename: str, old_text: str, new_text: str, config: Config, occurrence_index=None
|
||||
filename: str, old_text: str, new_text: str, agent: Agent, occurrence_index=None
|
||||
):
|
||||
"""Update a file by replacing one or all occurrences of old_text with new_text using Python's built-in string
|
||||
manipulation and regular expression modules for cross-platform file editing similar to sed and awk.
|
||||
@@ -280,7 +280,7 @@ def replace_in_file(
|
||||
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("update", filename, config, checksum=checksum)
|
||||
log_operation("update", filename, agent, checksum=checksum)
|
||||
|
||||
return f"File {filename} updated successfully."
|
||||
except Exception as e:
|
||||
@@ -291,7 +291,7 @@ def replace_in_file(
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
)
|
||||
def append_to_file(
|
||||
filename: str, text: str, config: Config, should_log: bool = True
|
||||
filename: str, text: str, agent: Agent, should_log: bool = True
|
||||
) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
@@ -312,7 +312,7 @@ def append_to_file(
|
||||
if should_log:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, config, checksum=checksum)
|
||||
log_operation("append", filename, agent, checksum=checksum)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as err:
|
||||
@@ -320,7 +320,7 @@ def append_to_file(
|
||||
|
||||
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str, config: Config) -> str:
|
||||
def delete_file(filename: str, agent: Agent) -> str:
|
||||
"""Delete a file
|
||||
|
||||
Args:
|
||||
@@ -329,18 +329,18 @@ def delete_file(filename: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if is_duplicate_operation("delete", filename, config):
|
||||
if is_duplicate_operation("delete", filename, agent.config):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename, config)
|
||||
log_operation("delete", filename, agent)
|
||||
return "File deleted successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
|
||||
def list_files(directory: str, config: Config) -> list[str]:
|
||||
def list_files(directory: str, agent: Agent) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
|
||||
Args:
|
||||
@@ -356,7 +356,7 @@ def list_files(directory: str, config: Config) -> list[str]:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), config.workspace_path
|
||||
os.path.join(root, file), agent.config.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
@@ -370,7 +370,7 @@ def list_files(directory: str, config: Config) -> list[str]:
|
||||
lambda config: config.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename, config: Config):
|
||||
def download_file(url, filename, agent: Agent):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
@@ -380,7 +380,7 @@ def download_file(url, filename, config: Config):
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message, plain_output=config.plain_output) as spinner:
|
||||
with Spinner(message, plain_output=agent.config.plain_output) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
"""Git operations for autogpt"""
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"clone_repository",
|
||||
@@ -19,7 +15,7 @@ if TYPE_CHECKING:
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str, config: Config) -> str:
|
||||
def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
@@ -30,9 +26,11 @@ def clone_repository(url: str, clone_path: str, config: Config) -> str:
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = f"//{config.github_username}:{config.github_api_key}@".join(
|
||||
auth_repo_url = (
|
||||
f"//{agent.config.github_username}:{agent.config.github_api_key}@".join(
|
||||
split_url
|
||||
)
|
||||
)
|
||||
try:
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
|
||||
@@ -4,15 +4,12 @@ from __future__ import annotations
|
||||
import json
|
||||
import time
|
||||
from itertools import islice
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from duckduckgo_search import DDGS
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
|
||||
|
||||
@@ -22,7 +19,7 @@ DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
'"query": "<query>"',
|
||||
lambda config: not config.google_api_key,
|
||||
)
|
||||
def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||
def google_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
@@ -61,7 +58,7 @@ def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
)
|
||||
def google_official_search(
|
||||
query: str, config: Config, num_results: int = 8
|
||||
query: str, agent: Agent, num_results: int = 8
|
||||
) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
@@ -78,8 +75,8 @@ def google_official_search(
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = config.google_api_key
|
||||
custom_search_engine_id = config.google_custom_search_engine_id
|
||||
api_key = agent.config.google_api_key
|
||||
custom_search_engine_id = agent.config.google_custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
|
||||
@@ -4,19 +4,15 @@ import json
|
||||
import time
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"generate_image",
|
||||
@@ -25,7 +21,7 @@ if TYPE_CHECKING:
|
||||
lambda config: config.image_provider,
|
||||
"Requires a image provider to be set.",
|
||||
)
|
||||
def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
||||
def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
@@ -35,21 +31,21 @@ def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
filename = f"{agent.config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, config)
|
||||
if agent.config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, agent)
|
||||
# HuggingFace
|
||||
elif config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, config)
|
||||
elif agent.config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, agent)
|
||||
# SD WebUI
|
||||
elif config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, config, size)
|
||||
elif agent.config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, agent, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||
def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
|
||||
"""Generate an image with HuggingFace's API.
|
||||
|
||||
Args:
|
||||
@@ -59,15 +55,13 @@ def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = (
|
||||
f"https://api-inference.huggingface.co/models/{config.huggingface_image_model}"
|
||||
)
|
||||
if config.huggingface_api_token is None:
|
||||
API_URL = f"https://api-inference.huggingface.co/models/{agent.config.huggingface_image_model}"
|
||||
if agent.config.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {config.huggingface_api_token}",
|
||||
"Authorization": f"Bearer {agent.config.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
@@ -110,7 +104,7 @@ def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||
|
||||
|
||||
def generate_image_with_dalle(
|
||||
prompt: str, filename: str, size: int, config: Config
|
||||
prompt: str, filename: str, size: int, agent: Agent
|
||||
) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
@@ -136,7 +130,7 @@ def generate_image_with_dalle(
|
||||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=config.openai_api_key,
|
||||
api_key=agent.config.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
@@ -152,7 +146,7 @@ def generate_image_with_dalle(
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
config: Config,
|
||||
agent: Agent,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
@@ -169,13 +163,13 @@ def generate_image_with_sd_webui(
|
||||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if config.sd_webui_auth:
|
||||
username, password = config.sd_webui_auth.split(":")
|
||||
if agent.config.sd_webui_auth:
|
||||
username, password = agent.config.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
f"{agent.config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"improve_code",
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
||||
def improve_code(suggestions: list[str], code: str, agent: Agent) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
@@ -36,4 +33,6 @@ def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
||||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(
|
||||
function_string, args, description_string, config=agent.config
|
||||
)
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
"""Task Statuses module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, NoReturn
|
||||
from typing import NoReturn
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"task_complete",
|
||||
"Task Complete (Shutdown)",
|
||||
'"reason": "<reason>"',
|
||||
)
|
||||
def task_complete(reason: str, config: Config) -> NoReturn:
|
||||
def task_complete(reason: str, agent: Agent) -> NoReturn:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agent.agent import Agent
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, config: Config, timeout: int = 10
|
||||
url: str, agent: Agent, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
@@ -30,7 +34,7 @@ def get_response(
|
||||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
session.headers.update({"User-Agent": config.user_agent})
|
||||
session.headers.update({"User-Agent": agent.config.user_agent})
|
||||
response = session.get(url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
@@ -48,7 +52,7 @@ def get_response(
|
||||
return None, f"Error: {str(re)}"
|
||||
|
||||
|
||||
def scrape_text(url: str, config: Config) -> str:
|
||||
def scrape_text(url: str, agent: Agent) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
@@ -57,7 +61,7 @@ def scrape_text(url: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
response, error_message = get_response(url, config)
|
||||
response, error_message = get_response(url, agent)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
@@ -76,7 +80,7 @@ def scrape_text(url: str, config: Config) -> str:
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||
def scrape_links(url: str, agent: Agent) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
@@ -85,7 +89,7 @@ def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||
Returns:
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url, config)
|
||||
response, error_message = get_response(url, agent)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import TYPE_CHECKING, Optional, Type
|
||||
from typing import Optional, Type
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
@@ -27,15 +27,13 @@ from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, get_memory
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
@@ -47,7 +45,7 @@ FILE_DIR = Path(__file__).parent.parent
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str, config: Config) -> str:
|
||||
def browse_website(url: str, question: str, agent: Agent) -> str:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
@@ -58,7 +56,7 @@ def browse_website(url: str, question: str, config: Config) -> str:
|
||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url, config)
|
||||
driver, text = scrape_text_with_selenium(url, agent)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
@@ -66,7 +64,7 @@ def browse_website(url: str, question: str, config: Config) -> str:
|
||||
return f"Error: {msg}"
|
||||
|
||||
add_header(driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, config, driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, agent, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to 5
|
||||
@@ -76,7 +74,7 @@ def browse_website(url: str, question: str, config: Config) -> str:
|
||||
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
@@ -94,23 +92,23 @@ def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]
|
||||
"safari": SafariOptions,
|
||||
}
|
||||
|
||||
options: BrowserOptions = options_available[config.selenium_web_browser]()
|
||||
options: BrowserOptions = options_available[agent.config.selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if config.selenium_web_browser == "firefox":
|
||||
if config.selenium_headless:
|
||||
if agent.config.selenium_web_browser == "firefox":
|
||||
if agent.config.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
)
|
||||
elif config.selenium_web_browser == "edge":
|
||||
elif agent.config.selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif config.selenium_web_browser == "safari":
|
||||
elif agent.config.selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = SafariDriver(options=options)
|
||||
@@ -120,7 +118,7 @@ def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if config.selenium_headless:
|
||||
if agent.config.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
@@ -205,7 +203,7 @@ def summarize_memorize_webpage(
|
||||
url: str,
|
||||
text: str,
|
||||
question: str,
|
||||
config: Config,
|
||||
agent: Agent,
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
@@ -225,7 +223,7 @@ def summarize_memorize_webpage(
|
||||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
memory = get_memory(config)
|
||||
memory = get_memory(agent.config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, question=question)
|
||||
memory.add(new_memory)
|
||||
|
||||
@@ -2,21 +2,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"write_tests",
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str], config: Config) -> str:
|
||||
def write_tests(code: str, focus: list[str], agent: Agent) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
@@ -38,4 +35,6 @@ def write_tests(code: str, focus: list[str], config: Config) -> str:
|
||||
" specific areas if required."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(
|
||||
function_string, args, description_string, config=agent.config
|
||||
)
|
||||
|
||||
@@ -181,7 +181,7 @@ def chat_with_ai(
|
||||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.ai_config.ai_name,
|
||||
agent.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
message_sequence.raw(),
|
||||
|
||||
@@ -184,7 +184,7 @@ Latest Development:
|
||||
|
||||
prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.ai_config.ai_name,
|
||||
self.agent.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
prompt.raw(),
|
||||
@@ -194,7 +194,7 @@ Latest Development:
|
||||
self.summary = create_chat_completion(prompt)
|
||||
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.ai_config.ai_name,
|
||||
self.agent.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
self.summary,
|
||||
|
||||
@@ -4,7 +4,6 @@ import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
@@ -23,7 +22,6 @@ def test_write_file(
|
||||
file_system_agents: List[Agent],
|
||||
patched_api_requestor: None,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
file_system_agent = file_system_agents[level_to_run - 1]
|
||||
@@ -35,7 +33,7 @@ def test_write_file(
|
||||
|
||||
for file_name, expected_lines in expected_outputs.items():
|
||||
file_path = get_workspace_path(file_system_agent, file_name)
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, file_system_agent)
|
||||
for expected_line in expected_lines:
|
||||
assert (
|
||||
expected_line in content
|
||||
|
||||
@@ -5,7 +5,6 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.execute_code import execute_python_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import (
|
||||
copy_file_into_workspace,
|
||||
@@ -28,7 +27,6 @@ def test_debug_code_challenge_a(
|
||||
debug_code_agents: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -37,7 +35,6 @@ def test_debug_code_challenge_a(
|
||||
:param debug_code_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:patched_api_requestor: Sends api requests to our API CI pipeline
|
||||
:config: The config object for the agent.
|
||||
:level_to_run: The level to run.
|
||||
"""
|
||||
debug_code_agent = debug_code_agents[level_to_run - 1]
|
||||
@@ -48,7 +45,7 @@ def test_debug_code_challenge_a(
|
||||
run_interaction_loop(monkeypatch, debug_code_agent, CYCLE_COUNT)
|
||||
|
||||
output = execute_python_file(
|
||||
get_workspace_path(debug_code_agent, TEST_FILE_PATH), config
|
||||
get_workspace_path(debug_code_agent, TEST_FILE_PATH), debug_code_agent
|
||||
)
|
||||
|
||||
assert "error" not in output.lower(), f"Errors found in output: {output}!"
|
||||
|
||||
@@ -2,7 +2,6 @@ import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
@@ -21,7 +20,6 @@ def test_information_retrieval_challenge_a(
|
||||
information_retrieval_agents: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -34,7 +32,7 @@ def test_information_retrieval_challenge_a(
|
||||
run_interaction_loop(monkeypatch, information_retrieval_agent, CYCLE_COUNT)
|
||||
|
||||
file_path = get_workspace_path(information_retrieval_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, information_retrieval_agent)
|
||||
expected_revenues = EXPECTED_REVENUES[level_to_run - 1]
|
||||
for revenue in expected_revenues:
|
||||
assert (
|
||||
|
||||
@@ -5,7 +5,6 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
@@ -22,7 +21,6 @@ def test_information_retrieval_challenge_b(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
config: Config,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_b function in a given agent by mocking user inputs and checking the output file content.
|
||||
@@ -31,14 +29,13 @@ def test_information_retrieval_challenge_b(
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:param patched_api_requestor: APIRequestor Patch to override the openai.api_requestor module for testing.
|
||||
:param level_to_run: The level to run.
|
||||
:param config: The config object.
|
||||
"""
|
||||
|
||||
with contextlib.suppress(SystemExit):
|
||||
run_interaction_loop(monkeypatch, get_nobel_prize_agent, CYCLE_COUNT)
|
||||
file_path = get_workspace_path(get_nobel_prize_agent, OUTPUT_LOCATION)
|
||||
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, get_nobel_prize_agent)
|
||||
assert "Andre Geim" in content, "Expected the file to contain Andre Geim"
|
||||
assert (
|
||||
"Konstantin Novoselov" in content
|
||||
|
||||
@@ -4,7 +4,6 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
@@ -20,7 +19,6 @@ def test_kubernetes_template_challenge_a(
|
||||
kubernetes_agent: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -30,13 +28,12 @@ def test_kubernetes_template_challenge_a(
|
||||
Args:
|
||||
kubernetes_agent (Agent)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
config (Config)
|
||||
level_to_run (int)
|
||||
"""
|
||||
run_interaction_loop(monkeypatch, kubernetes_agent, CYCLE_COUNT)
|
||||
|
||||
file_path = get_workspace_path(kubernetes_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, kubernetes_agent)
|
||||
|
||||
for word in ["apiVersion", "kind", "metadata", "spec"]:
|
||||
assert word in content, f"Expected the file to contain {word}"
|
||||
|
||||
@@ -3,7 +3,6 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
@@ -18,7 +17,6 @@ def test_memory_challenge_a(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -28,17 +26,16 @@ def test_memory_challenge_a(
|
||||
memory_management_agent (Agent)
|
||||
patched_api_requestor (MockerFixture)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
config (Config)
|
||||
level_to_run (int)
|
||||
"""
|
||||
|
||||
task_id = "2314"
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_id, config)
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_id)
|
||||
|
||||
run_interaction_loop(monkeypatch, memory_management_agent, level_to_run + 2)
|
||||
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, memory_management_agent)
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
|
||||
@@ -46,7 +43,6 @@ def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
num_files: int,
|
||||
task_id: str,
|
||||
config: Config,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
@@ -61,7 +57,7 @@ def create_instructions_files(
|
||||
content = generate_content(i, task_id, base_filename, num_files)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
write_to_file(file_path, content, config)
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(
|
||||
|
||||
@@ -3,7 +3,6 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import (
|
||||
generate_noise,
|
||||
@@ -23,7 +22,6 @@ def test_memory_challenge_b(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -37,12 +35,12 @@ def test_memory_challenge_b(
|
||||
level_to_run (int)
|
||||
"""
|
||||
task_ids = [str(i * 1111) for i in range(1, level_to_run + 1)]
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_ids, config)
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_ids)
|
||||
|
||||
run_interaction_loop(monkeypatch, memory_management_agent, level_to_run + 2)
|
||||
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, memory_management_agent)
|
||||
for task_id in task_ids:
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
@@ -51,7 +49,6 @@ def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
config: Config,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
@@ -68,7 +65,7 @@ def create_instructions_files(
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
|
||||
write_to_file(file_path, content, config)
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
|
||||
|
||||
@@ -3,7 +3,6 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import (
|
||||
generate_noise,
|
||||
@@ -24,7 +23,6 @@ def test_memory_challenge_c(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -36,7 +34,6 @@ def test_memory_challenge_c(
|
||||
memory_management_agent (Agent)
|
||||
patched_api_requestor (MockerFixture)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
config (Config)
|
||||
level_to_run (int)
|
||||
"""
|
||||
silly_phrases = [
|
||||
@@ -54,12 +51,14 @@ def test_memory_challenge_c(
|
||||
|
||||
level_silly_phrases = silly_phrases[:level_to_run]
|
||||
create_instructions_files(
|
||||
memory_management_agent, level_to_run, level_silly_phrases, config=config
|
||||
memory_management_agent,
|
||||
level_to_run,
|
||||
level_silly_phrases,
|
||||
)
|
||||
|
||||
run_interaction_loop(monkeypatch, memory_management_agent, level_to_run + 2)
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, agent=memory_management_agent)
|
||||
for phrase in level_silly_phrases:
|
||||
assert phrase in content, f"Expected the file to contain {phrase}"
|
||||
|
||||
@@ -68,7 +67,6 @@ def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
config: Config,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
@@ -84,7 +82,7 @@ def create_instructions_files(
|
||||
content = generate_content(i, task_ids, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
write_to_file(file_path, content, config)
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(
|
||||
|
||||
@@ -6,7 +6,6 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
@@ -23,7 +22,6 @@ def test_memory_challenge_d(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -41,12 +39,12 @@ def test_memory_challenge_d(
|
||||
]
|
||||
level_sally_anne_test_phrases = sally_anne_test_phrases[:level_to_run]
|
||||
create_instructions_files(
|
||||
memory_management_agent, level_to_run, level_sally_anne_test_phrases, config
|
||||
memory_management_agent, level_to_run, level_sally_anne_test_phrases
|
||||
)
|
||||
run_interaction_loop(monkeypatch, memory_management_agent, level_to_run + 2)
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
|
||||
content = read_file(file_path, config)
|
||||
content = read_file(file_path, memory_management_agent)
|
||||
check_beliefs(content, level_to_run)
|
||||
|
||||
|
||||
@@ -177,7 +175,6 @@ def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
test_phrases: list,
|
||||
config: Config,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
@@ -186,14 +183,13 @@ def create_instructions_files(
|
||||
level:
|
||||
memory_management_agent (Agent)
|
||||
test_phrases (list)
|
||||
config (Config)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, level + 1):
|
||||
content = generate_content(i, test_phrases, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
write_to_file(file_path, content, config)
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(
|
||||
|
||||
@@ -4,9 +4,14 @@ from pathlib import Path
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import TypingConsoleHandler
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
pytest_plugins = [
|
||||
@@ -57,3 +62,32 @@ def patch_emit(monkeypatch):
|
||||
print(self.format(record))
|
||||
|
||||
monkeypatch.setattr(TypingConsoleHandler, "emit", quick_emit)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent(config: Config, workspace: Workspace) -> Agent:
|
||||
ai_config = AIConfig(
|
||||
ai_name="Base",
|
||||
ai_role="A base AI",
|
||||
ai_goals=[],
|
||||
)
|
||||
|
||||
command_registry = CommandRegistry()
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
config.set_memory_backend("json_file")
|
||||
memory_json_file = get_memory(config, init=True)
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
return Agent(
|
||||
ai_name=ai_config.ai_name,
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
ai_config=ai_config,
|
||||
config=config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
@@ -7,8 +7,8 @@ from typing import Callable
|
||||
import pytest
|
||||
|
||||
import autogpt.commands.execute_code as sut # system under testing
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -31,54 +31,54 @@ def random_string():
|
||||
return "".join(random.choice(string.ascii_lowercase) for _ in range(10))
|
||||
|
||||
|
||||
def test_execute_python_file(python_test_file: str, random_string: str, config):
|
||||
result: str = sut.execute_python_file(python_test_file, config)
|
||||
def test_execute_python_file(python_test_file: str, random_string: str, agent: Agent):
|
||||
result: str = sut.execute_python_file(python_test_file, agent=agent)
|
||||
assert result.replace("\r", "") == f"Hello {random_string}!\n"
|
||||
|
||||
|
||||
def test_execute_python_code(random_code: str, random_string: str, config: Config):
|
||||
ai_name = AIConfig.load(config.ai_settings_file).ai_name
|
||||
def test_execute_python_code(random_code: str, random_string: str, agent: Agent):
|
||||
ai_name = agent.ai_name
|
||||
|
||||
result: str = sut.execute_python_code(random_code, "test_code", config)
|
||||
result: str = sut.execute_python_code(random_code, "test_code", agent=agent)
|
||||
assert result.replace("\r", "") == f"Hello {random_string}!\n"
|
||||
|
||||
# Check that the code is stored
|
||||
destination = os.path.join(
|
||||
config.workspace_path, ai_name, "executed_code", "test_code.py"
|
||||
agent.config.workspace_path, ai_name, "executed_code", "test_code.py"
|
||||
)
|
||||
with open(destination) as f:
|
||||
assert f.read() == random_code
|
||||
|
||||
|
||||
def test_execute_python_code_overwrites_file(
|
||||
random_code: str, random_string: str, config: Config
|
||||
random_code: str, random_string: str, agent: Agent
|
||||
):
|
||||
ai_name = AIConfig.load(config.ai_settings_file).ai_name
|
||||
ai_name = agent.ai_name
|
||||
destination = os.path.join(
|
||||
config.workspace_path, ai_name, "executed_code", "test_code.py"
|
||||
agent.config.workspace_path, ai_name, "executed_code", "test_code.py"
|
||||
)
|
||||
os.makedirs(os.path.dirname(destination), exist_ok=True)
|
||||
|
||||
with open(destination, "w+") as f:
|
||||
f.write("This will be overwritten")
|
||||
|
||||
sut.execute_python_code(random_code, "test_code.py", config)
|
||||
sut.execute_python_code(random_code, "test_code.py", agent=agent)
|
||||
|
||||
# Check that the file is updated with the new code
|
||||
with open(destination) as f:
|
||||
assert f.read() == random_code
|
||||
|
||||
|
||||
def test_execute_python_file_invalid(config: Config):
|
||||
def test_execute_python_file_invalid(agent: Agent):
|
||||
assert all(
|
||||
s in sut.execute_python_file("not_python", config).lower()
|
||||
s in sut.execute_python_file("not_python", agent).lower()
|
||||
for s in ["error:", "invalid", ".py"]
|
||||
)
|
||||
|
||||
|
||||
def test_execute_python_file_not_found(config: Config):
|
||||
def test_execute_python_file_not_found(agent: Agent):
|
||||
assert all(
|
||||
s in sut.execute_python_file("notexist.py", config).lower()
|
||||
s in sut.execute_python_file("notexist.py", agent).lower()
|
||||
for s in [
|
||||
"python: can't open file 'notexist.py'",
|
||||
"[errno 2] no such file or directory",
|
||||
@@ -86,43 +86,43 @@ def test_execute_python_file_not_found(config: Config):
|
||||
)
|
||||
|
||||
|
||||
def test_execute_shell(random_string: str, config: Config):
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", config)
|
||||
def test_execute_shell(random_string: str, agent: Agent):
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
|
||||
assert f"Hello {random_string}!" in result
|
||||
|
||||
|
||||
def test_execute_shell_local_commands_not_allowed(random_string: str, config: Config):
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", config)
|
||||
def test_execute_shell_local_commands_not_allowed(random_string: str, agent: Agent):
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
|
||||
assert f"Hello {random_string}!" in result
|
||||
|
||||
|
||||
def test_execute_shell_denylist_should_deny(config: Config, random_string: str):
|
||||
config.shell_denylist = ["echo"]
|
||||
def test_execute_shell_denylist_should_deny(agent: Agent, random_string: str):
|
||||
agent.config.shell_denylist = ["echo"]
|
||||
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", config)
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
|
||||
assert "Error:" in result and "not allowed" in result
|
||||
|
||||
|
||||
def test_execute_shell_denylist_should_allow(config: Config, random_string: str):
|
||||
config.shell_denylist = ["cat"]
|
||||
def test_execute_shell_denylist_should_allow(agent: Agent, random_string: str):
|
||||
agent.config.shell_denylist = ["cat"]
|
||||
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", config)
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
|
||||
assert "Hello" in result and random_string in result
|
||||
assert "Error" not in result
|
||||
|
||||
|
||||
def test_execute_shell_allowlist_should_deny(config: Config, random_string: str):
|
||||
config.shell_command_control = sut.ALLOWLIST_CONTROL
|
||||
config.shell_allowlist = ["cat"]
|
||||
def test_execute_shell_allowlist_should_deny(agent: Agent, random_string: str):
|
||||
agent.config.shell_command_control = sut.ALLOWLIST_CONTROL
|
||||
agent.config.shell_allowlist = ["cat"]
|
||||
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", config)
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
|
||||
assert "Error:" in result and "not allowed" in result
|
||||
|
||||
|
||||
def test_execute_shell_allowlist_should_allow(config: Config, random_string: str):
|
||||
config.shell_command_control = sut.ALLOWLIST_CONTROL
|
||||
config.shell_allowlist = ["echo"]
|
||||
def test_execute_shell_allowlist_should_allow(agent: Agent, random_string: str):
|
||||
agent.config.shell_command_control = sut.ALLOWLIST_CONTROL
|
||||
agent.config.shell_allowlist = ["echo"]
|
||||
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", config)
|
||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
|
||||
assert "Hello" in result and random_string in result
|
||||
assert "Error" not in result
|
||||
|
||||
@@ -6,6 +6,7 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
@@ -18,10 +19,10 @@ def image_size(request):
|
||||
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@pytest.mark.vcr
|
||||
def test_dalle(config, workspace, image_size, patched_api_requestor):
|
||||
def test_dalle(agent: Agent, workspace, image_size, patched_api_requestor):
|
||||
"""Test DALL-E image generation."""
|
||||
generate_and_validate(
|
||||
config,
|
||||
agent,
|
||||
workspace,
|
||||
image_provider="dalle",
|
||||
image_size=image_size,
|
||||
@@ -36,10 +37,10 @@ def test_dalle(config, workspace, image_size, patched_api_requestor):
|
||||
"image_model",
|
||||
["CompVis/stable-diffusion-v1-4", "stabilityai/stable-diffusion-2-1"],
|
||||
)
|
||||
def test_huggingface(config, workspace, image_size, image_model):
|
||||
def test_huggingface(agent: Agent, workspace, image_size, image_model):
|
||||
"""Test HuggingFace image generation."""
|
||||
generate_and_validate(
|
||||
config,
|
||||
agent,
|
||||
workspace,
|
||||
image_provider="huggingface",
|
||||
image_size=image_size,
|
||||
@@ -48,10 +49,10 @@ def test_huggingface(config, workspace, image_size, image_model):
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="SD WebUI call does not work.")
|
||||
def test_sd_webui(config, workspace, image_size):
|
||||
def test_sd_webui(agent: Agent, workspace, image_size):
|
||||
"""Test SD WebUI image generation."""
|
||||
generate_and_validate(
|
||||
config,
|
||||
agent,
|
||||
workspace,
|
||||
image_provider="sd_webui",
|
||||
image_size=image_size,
|
||||
@@ -59,11 +60,11 @@ def test_sd_webui(config, workspace, image_size):
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="SD WebUI call does not work.")
|
||||
def test_sd_webui_negative_prompt(config, workspace, image_size):
|
||||
def test_sd_webui_negative_prompt(agent: Agent, workspace, image_size):
|
||||
gen_image = functools.partial(
|
||||
generate_image_with_sd_webui,
|
||||
prompt="astronaut riding a horse",
|
||||
config=config,
|
||||
agent=agent,
|
||||
size=image_size,
|
||||
extra={"seed": 123},
|
||||
)
|
||||
@@ -87,7 +88,7 @@ def lst(txt):
|
||||
|
||||
|
||||
def generate_and_validate(
|
||||
config,
|
||||
agent: Agent,
|
||||
workspace,
|
||||
image_size,
|
||||
image_provider,
|
||||
@@ -95,11 +96,11 @@ def generate_and_validate(
|
||||
**kwargs,
|
||||
):
|
||||
"""Generate an image and validate the output."""
|
||||
config.image_provider = image_provider
|
||||
config.huggingface_image_model = hugging_face_image_model
|
||||
agent.config.image_provider = image_provider
|
||||
agent.config.huggingface_image_model = hugging_face_image_model
|
||||
prompt = "astronaut riding a horse"
|
||||
|
||||
image_path = lst(generate_image(prompt, config, image_size, **kwargs))
|
||||
image_path = lst(generate_image(prompt, agent, image_size, **kwargs))
|
||||
assert image_path.exists()
|
||||
with Image.open(image_path) as img:
|
||||
assert img.size == (image_size, image_size)
|
||||
@@ -120,7 +121,7 @@ def generate_and_validate(
|
||||
)
|
||||
@pytest.mark.parametrize("delay", [10, 0])
|
||||
def test_huggingface_fail_request_with_delay(
|
||||
config, workspace, image_size, image_model, return_text, delay
|
||||
agent: Agent, workspace, image_size, image_model, return_text, delay
|
||||
):
|
||||
return_text = return_text.replace("[model]", image_model).replace(
|
||||
"[delay]", str(delay)
|
||||
@@ -138,13 +139,13 @@ def test_huggingface_fail_request_with_delay(
|
||||
mock_post.return_value.ok = False
|
||||
mock_post.return_value.text = return_text
|
||||
|
||||
config.image_provider = "huggingface"
|
||||
config.huggingface_image_model = image_model
|
||||
agent.config.image_provider = "huggingface"
|
||||
agent.config.huggingface_image_model = image_model
|
||||
prompt = "astronaut riding a horse"
|
||||
|
||||
with patch("time.sleep") as mock_sleep:
|
||||
# Verify request fails.
|
||||
result = generate_image(prompt, config, image_size)
|
||||
result = generate_image(prompt, agent, image_size)
|
||||
assert result == "Error creating image."
|
||||
|
||||
# Verify retry was called with delay if delay is in return_text
|
||||
@@ -154,8 +155,8 @@ def test_huggingface_fail_request_with_delay(
|
||||
mock_sleep.assert_not_called()
|
||||
|
||||
|
||||
def test_huggingface_fail_request_with_delay(mocker, config):
|
||||
config.huggingface_api_token = "1"
|
||||
def test_huggingface_fail_request_with_delay(mocker, agent: Agent):
|
||||
agent.config.huggingface_api_token = "1"
|
||||
|
||||
# Mock requests.post
|
||||
mock_post = mocker.patch("requests.post")
|
||||
@@ -166,10 +167,10 @@ def test_huggingface_fail_request_with_delay(mocker, config):
|
||||
# Mock time.sleep
|
||||
mock_sleep = mocker.patch("time.sleep")
|
||||
|
||||
config.image_provider = "huggingface"
|
||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
agent.config.image_provider = "huggingface"
|
||||
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
result = generate_image("astronaut riding a horse", config, 512)
|
||||
result = generate_image("astronaut riding a horse", agent, 512)
|
||||
|
||||
assert result == "Error creating image."
|
||||
|
||||
@@ -177,8 +178,8 @@ def test_huggingface_fail_request_with_delay(mocker, config):
|
||||
mock_sleep.assert_called_with(0)
|
||||
|
||||
|
||||
def test_huggingface_fail_request_no_delay(mocker, config):
|
||||
config.huggingface_api_token = "1"
|
||||
def test_huggingface_fail_request_no_delay(mocker, agent: Agent):
|
||||
agent.config.huggingface_api_token = "1"
|
||||
|
||||
# Mock requests.post
|
||||
mock_post = mocker.patch("requests.post")
|
||||
@@ -191,10 +192,10 @@ def test_huggingface_fail_request_no_delay(mocker, config):
|
||||
# Mock time.sleep
|
||||
mock_sleep = mocker.patch("time.sleep")
|
||||
|
||||
config.image_provider = "huggingface"
|
||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
agent.config.image_provider = "huggingface"
|
||||
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
result = generate_image("astronaut riding a horse", config, 512)
|
||||
result = generate_image("astronaut riding a horse", agent, 512)
|
||||
|
||||
assert result == "Error creating image."
|
||||
|
||||
@@ -202,8 +203,8 @@ def test_huggingface_fail_request_no_delay(mocker, config):
|
||||
mock_sleep.assert_not_called()
|
||||
|
||||
|
||||
def test_huggingface_fail_request_bad_json(mocker, config):
|
||||
config.huggingface_api_token = "1"
|
||||
def test_huggingface_fail_request_bad_json(mocker, agent: Agent):
|
||||
agent.config.huggingface_api_token = "1"
|
||||
|
||||
# Mock requests.post
|
||||
mock_post = mocker.patch("requests.post")
|
||||
@@ -214,10 +215,10 @@ def test_huggingface_fail_request_bad_json(mocker, config):
|
||||
# Mock time.sleep
|
||||
mock_sleep = mocker.patch("time.sleep")
|
||||
|
||||
config.image_provider = "huggingface"
|
||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
agent.config.image_provider = "huggingface"
|
||||
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
result = generate_image("astronaut riding a horse", config, 512)
|
||||
result = generate_image("astronaut riding a horse", agent, 512)
|
||||
|
||||
assert result == "Error creating image."
|
||||
|
||||
@@ -225,28 +226,28 @@ def test_huggingface_fail_request_bad_json(mocker, config):
|
||||
mock_sleep.assert_not_called()
|
||||
|
||||
|
||||
def test_huggingface_fail_request_bad_image(mocker, config):
|
||||
config.huggingface_api_token = "1"
|
||||
def test_huggingface_fail_request_bad_image(mocker, agent: Agent):
|
||||
agent.config.huggingface_api_token = "1"
|
||||
|
||||
# Mock requests.post
|
||||
mock_post = mocker.patch("requests.post")
|
||||
mock_post.return_value.status_code = 200
|
||||
|
||||
config.image_provider = "huggingface"
|
||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
agent.config.image_provider = "huggingface"
|
||||
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
result = generate_image("astronaut riding a horse", config, 512)
|
||||
result = generate_image("astronaut riding a horse", agent, 512)
|
||||
|
||||
assert result == "Error creating image."
|
||||
|
||||
|
||||
def test_huggingface_fail_missing_api_token(mocker, config):
|
||||
config.image_provider = "huggingface"
|
||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
def test_huggingface_fail_missing_api_token(mocker, agent: Agent):
|
||||
agent.config.image_provider = "huggingface"
|
||||
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
# Mock requests.post to raise ValueError
|
||||
mock_post = mocker.patch("requests.post", side_effect=ValueError)
|
||||
|
||||
# Verify request raises an error.
|
||||
with pytest.raises(ValueError):
|
||||
generate_image("astronaut riding a horse", config, 512)
|
||||
generate_image("astronaut riding a horse", agent, 512)
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.web_selenium import browse_website
|
||||
from autogpt.config import Config
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_browse_website(config: Config, patched_api_requestor: MockerFixture):
|
||||
def test_browse_website(agent: Agent, patched_api_requestor: MockerFixture):
|
||||
url = "https://barrel-roll.com"
|
||||
question = "How to execute a barrel roll"
|
||||
|
||||
response = browse_website(url, question, config)
|
||||
response = browse_website(url, question, agent)
|
||||
assert "Error" in response
|
||||
# Sanity check that the response is not too long
|
||||
assert len(response) < 200
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# Dependencies:
|
||||
# pip install pytest-mock
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.web_requests import scrape_links
|
||||
|
||||
"""
|
||||
@@ -42,14 +43,14 @@ class TestScrapeLinks:
|
||||
provided with a valid url that returns a webpage with hyperlinks.
|
||||
"""
|
||||
|
||||
def test_valid_url_with_hyperlinks(self, config):
|
||||
def test_valid_url_with_hyperlinks(self, agent: Agent):
|
||||
url = "https://www.google.com"
|
||||
result = scrape_links(url, config=config)
|
||||
result = scrape_links(url, agent=agent)
|
||||
assert len(result) > 0
|
||||
assert isinstance(result, list)
|
||||
assert isinstance(result[0], str)
|
||||
|
||||
def test_valid_url(self, mocker, config):
|
||||
def test_valid_url(self, mocker, agent: Agent):
|
||||
"""Test that the function returns correctly formatted hyperlinks when given a valid url."""
|
||||
# Mock the requests.get() function to return a response with sample HTML containing hyperlinks
|
||||
mock_response = mocker.Mock()
|
||||
@@ -60,12 +61,12 @@ class TestScrapeLinks:
|
||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a valid URL
|
||||
result = scrape_links("https://www.example.com", config)
|
||||
result = scrape_links("https://www.example.com", agent)
|
||||
|
||||
# Assert that the function returns correctly formatted hyperlinks
|
||||
assert result == ["Google (https://www.google.com)"]
|
||||
|
||||
def test_invalid_url(self, mocker, config):
|
||||
def test_invalid_url(self, mocker, agent: Agent):
|
||||
"""Test that the function returns "error" when given an invalid url."""
|
||||
# Mock the requests.get() function to return an HTTP error response
|
||||
mock_response = mocker.Mock()
|
||||
@@ -73,12 +74,12 @@ class TestScrapeLinks:
|
||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||
|
||||
# Call the function with an invalid URL
|
||||
result = scrape_links("https://www.invalidurl.com", config)
|
||||
result = scrape_links("https://www.invalidurl.com", agent)
|
||||
|
||||
# Assert that the function returns "error"
|
||||
assert "Error:" in result
|
||||
|
||||
def test_no_hyperlinks(self, mocker, config):
|
||||
def test_no_hyperlinks(self, mocker, agent: Agent):
|
||||
"""Test that the function returns an empty list when the html contains no hyperlinks."""
|
||||
# Mock the requests.get() function to return a response with sample HTML containing no hyperlinks
|
||||
mock_response = mocker.Mock()
|
||||
@@ -87,12 +88,12 @@ class TestScrapeLinks:
|
||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a URL containing no hyperlinks
|
||||
result = scrape_links("https://www.example.com", config)
|
||||
result = scrape_links("https://www.example.com", agent)
|
||||
|
||||
# Assert that the function returns an empty list
|
||||
assert result == []
|
||||
|
||||
def test_scrape_links_with_few_hyperlinks(self, mocker, config):
|
||||
def test_scrape_links_with_few_hyperlinks(self, mocker, agent: Agent):
|
||||
"""Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks."""
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
@@ -108,7 +109,7 @@ class TestScrapeLinks:
|
||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||
|
||||
# Call the function being tested
|
||||
result = scrape_links("https://www.example.com", config)
|
||||
result = scrape_links("https://www.example.com", agent)
|
||||
|
||||
# Assert that the function returns a list of formatted hyperlinks
|
||||
assert isinstance(result, list)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.web_requests import scrape_text
|
||||
|
||||
"""
|
||||
@@ -42,7 +43,7 @@ Additional aspects:
|
||||
|
||||
|
||||
class TestScrapeText:
|
||||
def test_scrape_text_with_valid_url(self, mocker, config):
|
||||
def test_scrape_text_with_valid_url(self, mocker, agent: Agent):
|
||||
"""Tests that scrape_text() returns the expected text when given a valid URL."""
|
||||
# Mock the requests.get() method to return a response with expected text
|
||||
expected_text = "This is some sample text"
|
||||
@@ -57,14 +58,14 @@ class TestScrapeText:
|
||||
# Call the function with a valid URL and assert that it returns the
|
||||
# expected text
|
||||
url = "http://www.example.com"
|
||||
assert scrape_text(url, config) == expected_text
|
||||
assert scrape_text(url, agent) == expected_text
|
||||
|
||||
def test_invalid_url(self, config):
|
||||
def test_invalid_url(self, agent: Agent):
|
||||
"""Tests that an error is raised when an invalid url is provided."""
|
||||
url = "invalidurl.com"
|
||||
pytest.raises(ValueError, scrape_text, url, config)
|
||||
pytest.raises(ValueError, scrape_text, url, agent)
|
||||
|
||||
def test_unreachable_url(self, mocker, config):
|
||||
def test_unreachable_url(self, mocker, agent: Agent):
|
||||
"""Test that scrape_text returns an error message when an invalid or unreachable url is provided."""
|
||||
# Mock the requests.get() method to raise an exception
|
||||
mocker.patch(
|
||||
@@ -74,10 +75,10 @@ class TestScrapeText:
|
||||
# Call the function with an invalid URL and assert that it returns an error
|
||||
# message
|
||||
url = "http://thiswebsitedoesnotexist.net/"
|
||||
error_message = scrape_text(url, config)
|
||||
error_message = scrape_text(url, agent)
|
||||
assert "Error:" in error_message
|
||||
|
||||
def test_no_text(self, mocker, config):
|
||||
def test_no_text(self, mocker, agent: Agent):
|
||||
"""Test that scrape_text returns an empty string when the html page contains no text to be scraped."""
|
||||
# Mock the requests.get() method to return a response with no text
|
||||
mock_response = mocker.Mock()
|
||||
@@ -87,20 +88,20 @@ class TestScrapeText:
|
||||
|
||||
# Call the function with a valid URL and assert that it returns an empty string
|
||||
url = "http://www.example.com"
|
||||
assert scrape_text(url, config) == ""
|
||||
assert scrape_text(url, agent) == ""
|
||||
|
||||
def test_http_error(self, mocker, config):
|
||||
def test_http_error(self, mocker, agent: Agent):
|
||||
"""Test that scrape_text returns an error message when the response status code is an http error (>=400)."""
|
||||
# Mock the requests.get() method to return a response with a 404 status code
|
||||
mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
|
||||
|
||||
# Call the function with a URL
|
||||
result = scrape_text("https://www.example.com", config)
|
||||
result = scrape_text("https://www.example.com", agent)
|
||||
|
||||
# Check that the function returns an error message
|
||||
assert result == "Error: HTTP 404 error"
|
||||
|
||||
def test_scrape_text_with_html_tags(self, mocker, config):
|
||||
def test_scrape_text_with_html_tags(self, mocker, agent: Agent):
|
||||
"""Test that scrape_text() properly handles HTML tags."""
|
||||
# Create a mock response object with HTML containing tags
|
||||
html = "<html><body><p>This is <b>bold</b> text.</p></body></html>"
|
||||
@@ -110,7 +111,7 @@ class TestScrapeText:
|
||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a URL
|
||||
result = scrape_text("https://www.example.com", config)
|
||||
result = scrape_text("https://www.example.com", agent)
|
||||
|
||||
# Check that the function properly handles HTML tags
|
||||
assert result == "This is bold text."
|
||||
|
||||
@@ -12,7 +12,7 @@ import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
import autogpt.commands.file_operations as file_ops
|
||||
from autogpt.config import Config
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.memory.vector.memory_item import MemoryItem
|
||||
from autogpt.memory.vector.utils import Embedding
|
||||
from autogpt.utils import readable_file_size
|
||||
@@ -42,7 +42,7 @@ def mock_MemoryItem_from_text(mocker: MockerFixture, mock_embedding: Embedding):
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_file_path(config, workspace: Workspace):
|
||||
def test_file_path(workspace: Workspace):
|
||||
return workspace.get_path("test_file.txt")
|
||||
|
||||
|
||||
@@ -55,22 +55,22 @@ def test_file(test_file_path: Path):
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_file_with_content_path(test_file: TextIOWrapper, file_content, config):
|
||||
def test_file_with_content_path(test_file: TextIOWrapper, file_content, agent: Agent):
|
||||
test_file.write(file_content)
|
||||
test_file.close()
|
||||
file_ops.log_operation(
|
||||
"write", test_file.name, config, file_ops.text_checksum(file_content)
|
||||
"write", test_file.name, agent, file_ops.text_checksum(file_content)
|
||||
)
|
||||
return Path(test_file.name)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_directory(config, workspace: Workspace):
|
||||
def test_directory(workspace: Workspace):
|
||||
return workspace.get_path("test_directory")
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_nested_file(config, workspace: Workspace):
|
||||
def test_nested_file(workspace: Workspace):
|
||||
return workspace.get_path("nested/test_file.txt")
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ def test_file_operations_state(test_file: TextIOWrapper):
|
||||
assert file_ops.file_operations_state(test_file.name) == expected_state
|
||||
|
||||
|
||||
def test_is_duplicate_operation(config: Config, mocker: MockerFixture):
|
||||
def test_is_duplicate_operation(agent: Agent, mocker: MockerFixture):
|
||||
# Prepare a fake state dictionary for the function to use
|
||||
state = {
|
||||
"path/to/file1.txt": "checksum1",
|
||||
@@ -128,42 +128,48 @@ def test_is_duplicate_operation(config: Config, mocker: MockerFixture):
|
||||
# Test cases with write operations
|
||||
assert (
|
||||
file_ops.is_duplicate_operation(
|
||||
"write", "path/to/file1.txt", config, "checksum1"
|
||||
"write", "path/to/file1.txt", agent.config, "checksum1"
|
||||
)
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
file_ops.is_duplicate_operation(
|
||||
"write", "path/to/file1.txt", config, "checksum2"
|
||||
"write", "path/to/file1.txt", agent.config, "checksum2"
|
||||
)
|
||||
is False
|
||||
)
|
||||
assert (
|
||||
file_ops.is_duplicate_operation(
|
||||
"write", "path/to/file3.txt", config, "checksum3"
|
||||
"write", "path/to/file3.txt", agent.config, "checksum3"
|
||||
)
|
||||
is False
|
||||
)
|
||||
# Test cases with append operations
|
||||
assert (
|
||||
file_ops.is_duplicate_operation(
|
||||
"append", "path/to/file1.txt", config, "checksum1"
|
||||
"append", "path/to/file1.txt", agent.config, "checksum1"
|
||||
)
|
||||
is False
|
||||
)
|
||||
# Test cases with delete operations
|
||||
assert (
|
||||
file_ops.is_duplicate_operation("delete", "path/to/file1.txt", config) is False
|
||||
file_ops.is_duplicate_operation(
|
||||
"delete", "path/to/file1.txt", config=agent.config
|
||||
)
|
||||
is False
|
||||
)
|
||||
assert (
|
||||
file_ops.is_duplicate_operation("delete", "path/to/file3.txt", config) is True
|
||||
file_ops.is_duplicate_operation(
|
||||
"delete", "path/to/file3.txt", config=agent.config
|
||||
)
|
||||
is True
|
||||
)
|
||||
|
||||
|
||||
# Test logging a file operation
|
||||
def test_log_operation(config: Config):
|
||||
file_ops.log_operation("log_test", "path/to/test", config)
|
||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
def test_log_operation(agent: Agent):
|
||||
file_ops.log_operation("log_test", "path/to/test", agent=agent)
|
||||
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
assert f"log_test: path/to/test\n" in content
|
||||
|
||||
@@ -175,9 +181,9 @@ def test_text_checksum(file_content: str):
|
||||
assert checksum != different_checksum
|
||||
|
||||
|
||||
def test_log_operation_with_checksum(config: Config):
|
||||
file_ops.log_operation("log_test", "path/to/test", config, checksum="ABCDEF")
|
||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
def test_log_operation_with_checksum(agent: Agent):
|
||||
file_ops.log_operation("log_test", "path/to/test", agent=agent, checksum="ABCDEF")
|
||||
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
assert f"log_test: path/to/test #ABCDEF\n" in content
|
||||
|
||||
@@ -223,66 +229,66 @@ def test_read_file(
|
||||
mock_MemoryItem_from_text,
|
||||
test_file_with_content_path: Path,
|
||||
file_content,
|
||||
config: Config,
|
||||
agent: Agent,
|
||||
):
|
||||
content = file_ops.read_file(test_file_with_content_path, config)
|
||||
content = file_ops.read_file(test_file_with_content_path, agent=agent)
|
||||
assert content.replace("\r", "") == file_content
|
||||
|
||||
|
||||
def test_read_file_not_found(config: Config):
|
||||
def test_read_file_not_found(agent: Agent):
|
||||
filename = "does_not_exist.txt"
|
||||
content = file_ops.read_file(filename, config)
|
||||
content = file_ops.read_file(filename, agent=agent)
|
||||
assert "Error:" in content and filename in content and "no such file" in content
|
||||
|
||||
|
||||
def test_write_to_file(test_file_path: Path, config):
|
||||
def test_write_to_file(test_file_path: Path, agent: Agent):
|
||||
new_content = "This is new content.\n"
|
||||
file_ops.write_to_file(str(test_file_path), new_content, config)
|
||||
file_ops.write_to_file(str(test_file_path), new_content, agent=agent)
|
||||
with open(test_file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
assert content == new_content
|
||||
|
||||
|
||||
def test_write_file_logs_checksum(test_file_path: Path, config):
|
||||
def test_write_file_logs_checksum(test_file_path: Path, agent: Agent):
|
||||
new_content = "This is new content.\n"
|
||||
new_checksum = file_ops.text_checksum(new_content)
|
||||
file_ops.write_to_file(str(test_file_path), new_content, config)
|
||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
file_ops.write_to_file(str(test_file_path), new_content, agent=agent)
|
||||
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
log_entry = f.read()
|
||||
assert log_entry == f"write: {test_file_path} #{new_checksum}\n"
|
||||
|
||||
|
||||
def test_write_file_fails_if_content_exists(test_file_path: Path, config):
|
||||
def test_write_file_fails_if_content_exists(test_file_path: Path, agent: Agent):
|
||||
new_content = "This is new content.\n"
|
||||
file_ops.log_operation(
|
||||
"write",
|
||||
str(test_file_path),
|
||||
config,
|
||||
agent=agent,
|
||||
checksum=file_ops.text_checksum(new_content),
|
||||
)
|
||||
result = file_ops.write_to_file(str(test_file_path), new_content, config)
|
||||
result = file_ops.write_to_file(str(test_file_path), new_content, agent=agent)
|
||||
assert result == "Error: File has already been updated."
|
||||
|
||||
|
||||
def test_write_file_succeeds_if_content_different(
|
||||
test_file_with_content_path: Path, config
|
||||
test_file_with_content_path: Path, agent: Agent
|
||||
):
|
||||
new_content = "This is different content.\n"
|
||||
result = file_ops.write_to_file(
|
||||
str(test_file_with_content_path), new_content, config
|
||||
str(test_file_with_content_path), new_content, agent=agent
|
||||
)
|
||||
assert result == "File written to successfully."
|
||||
|
||||
|
||||
# Update file testing
|
||||
def test_replace_in_file_all_occurrences(test_file, test_file_path, config):
|
||||
def test_replace_in_file_all_occurrences(test_file, test_file_path, agent: Agent):
|
||||
old_content = "This is a test file.\n we test file here\na test is needed"
|
||||
expected_content = (
|
||||
"This is a update file.\n we update file here\na update is needed"
|
||||
)
|
||||
test_file.write(old_content)
|
||||
test_file.close()
|
||||
file_ops.replace_in_file(test_file_path, "test", "update", config)
|
||||
file_ops.replace_in_file(test_file_path, "test", "update", agent=agent)
|
||||
with open(test_file_path) as f:
|
||||
new_content = f.read()
|
||||
print(new_content)
|
||||
@@ -290,13 +296,13 @@ def test_replace_in_file_all_occurrences(test_file, test_file_path, config):
|
||||
assert new_content == expected_content
|
||||
|
||||
|
||||
def test_replace_in_file_one_occurrence(test_file, test_file_path, config):
|
||||
def test_replace_in_file_one_occurrence(test_file, test_file_path, agent: Agent):
|
||||
old_content = "This is a test file.\n we test file here\na test is needed"
|
||||
expected_content = "This is a test file.\n we update file here\na test is needed"
|
||||
test_file.write(old_content)
|
||||
test_file.close()
|
||||
file_ops.replace_in_file(
|
||||
test_file_path, "test", "update", config, occurrence_index=1
|
||||
test_file_path, "test", "update", agent=agent, occurrence_index=1
|
||||
)
|
||||
with open(test_file_path) as f:
|
||||
new_content = f.read()
|
||||
@@ -304,7 +310,7 @@ def test_replace_in_file_one_occurrence(test_file, test_file_path, config):
|
||||
assert new_content == expected_content
|
||||
|
||||
|
||||
def test_replace_in_file_multiline_old_text(test_file, test_file_path, config):
|
||||
def test_replace_in_file_multiline_old_text(test_file, test_file_path, agent: Agent):
|
||||
old_content = "This is a multi_line\ntest for testing\nhow well this function\nworks when the input\nis multi-lined"
|
||||
expected_content = "This is a multi_line\nfile. succeeded test\nis multi-lined"
|
||||
test_file.write(old_content)
|
||||
@@ -313,7 +319,7 @@ def test_replace_in_file_multiline_old_text(test_file, test_file_path, config):
|
||||
test_file_path,
|
||||
"\ntest for testing\nhow well this function\nworks when the input\n",
|
||||
"\nfile. succeeded test\n",
|
||||
config,
|
||||
agent=agent,
|
||||
)
|
||||
with open(test_file_path) as f:
|
||||
new_content = f.read()
|
||||
@@ -321,11 +327,11 @@ def test_replace_in_file_multiline_old_text(test_file, test_file_path, config):
|
||||
assert new_content == expected_content
|
||||
|
||||
|
||||
def test_append_to_file(test_nested_file: Path, config):
|
||||
def test_append_to_file(test_nested_file: Path, agent: Agent):
|
||||
append_text = "This is appended text.\n"
|
||||
file_ops.write_to_file(test_nested_file, append_text, config)
|
||||
file_ops.write_to_file(test_nested_file, append_text, agent=agent)
|
||||
|
||||
file_ops.append_to_file(test_nested_file, append_text, config)
|
||||
file_ops.append_to_file(test_nested_file, append_text, agent=agent)
|
||||
|
||||
with open(test_nested_file, "r") as f:
|
||||
content_after = f.read()
|
||||
@@ -333,11 +339,13 @@ def test_append_to_file(test_nested_file: Path, config):
|
||||
assert content_after == append_text + append_text
|
||||
|
||||
|
||||
def test_append_to_file_uses_checksum_from_appended_file(test_file_path: Path, config):
|
||||
def test_append_to_file_uses_checksum_from_appended_file(
|
||||
test_file_path: Path, agent: Agent
|
||||
):
|
||||
append_text = "This is appended text.\n"
|
||||
file_ops.append_to_file(test_file_path, append_text, config)
|
||||
file_ops.append_to_file(test_file_path, append_text, config)
|
||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
file_ops.append_to_file(test_file_path, append_text, agent=agent)
|
||||
file_ops.append_to_file(test_file_path, append_text, agent=agent)
|
||||
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
log_contents = f.read()
|
||||
|
||||
digest = hashlib.md5()
|
||||
@@ -351,25 +359,25 @@ def test_append_to_file_uses_checksum_from_appended_file(test_file_path: Path, c
|
||||
)
|
||||
|
||||
|
||||
def test_delete_file(test_file_with_content_path: Path, config):
|
||||
result = file_ops.delete_file(str(test_file_with_content_path), config)
|
||||
def test_delete_file(test_file_with_content_path: Path, agent: Agent):
|
||||
result = file_ops.delete_file(str(test_file_with_content_path), agent=agent)
|
||||
assert result == "File deleted successfully."
|
||||
assert os.path.exists(test_file_with_content_path) is False
|
||||
|
||||
|
||||
def test_delete_missing_file(config):
|
||||
def test_delete_missing_file(agent: Agent):
|
||||
filename = "path/to/file/which/does/not/exist"
|
||||
# confuse the log
|
||||
file_ops.log_operation("write", filename, config, checksum="fake")
|
||||
file_ops.log_operation("write", filename, agent=agent, checksum="fake")
|
||||
try:
|
||||
os.remove(filename)
|
||||
except FileNotFoundError as err:
|
||||
assert str(err) in file_ops.delete_file(filename, config)
|
||||
assert str(err) in file_ops.delete_file(filename, agent=agent)
|
||||
return
|
||||
assert False, f"Failed to test delete_file; {filename} not expected to exist"
|
||||
|
||||
|
||||
def test_list_files(workspace: Workspace, test_directory: Path, config):
|
||||
def test_list_files(workspace: Workspace, test_directory: Path, agent: Agent):
|
||||
# Case 1: Create files A and B, search for A, and ensure we don't return A and B
|
||||
file_a = workspace.get_path("file_a.txt")
|
||||
file_b = workspace.get_path("file_b.txt")
|
||||
@@ -387,7 +395,7 @@ def test_list_files(workspace: Workspace, test_directory: Path, config):
|
||||
with open(os.path.join(test_directory, file_a.name), "w") as f:
|
||||
f.write("This is file A in the subdirectory.")
|
||||
|
||||
files = file_ops.list_files(str(workspace.root), config)
|
||||
files = file_ops.list_files(str(workspace.root), agent=agent)
|
||||
assert file_a.name in files
|
||||
assert file_b.name in files
|
||||
assert os.path.join(Path(test_directory).name, file_a.name) in files
|
||||
@@ -400,17 +408,17 @@ def test_list_files(workspace: Workspace, test_directory: Path, config):
|
||||
|
||||
# Case 2: Search for a file that does not exist and make sure we don't throw
|
||||
non_existent_file = "non_existent_file.txt"
|
||||
files = file_ops.list_files("", config)
|
||||
files = file_ops.list_files("", agent=agent)
|
||||
assert non_existent_file not in files
|
||||
|
||||
|
||||
def test_download_file(workspace: Workspace, config):
|
||||
def test_download_file(workspace: Workspace, agent: Agent):
|
||||
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz"
|
||||
local_name = workspace.get_path("auto-gpt.tar.gz")
|
||||
size = 365023
|
||||
readable_size = readable_file_size(size)
|
||||
assert (
|
||||
file_ops.download_file(url, local_name, config)
|
||||
file_ops.download_file(url, local_name, agent=agent)
|
||||
== f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})'
|
||||
)
|
||||
assert os.path.isfile(local_name) is True
|
||||
@@ -418,10 +426,10 @@ def test_download_file(workspace: Workspace, config):
|
||||
|
||||
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz"
|
||||
assert "Got an HTTP Error whilst trying to download file" in file_ops.download_file(
|
||||
url, local_name, config
|
||||
url, local_name, agent=agent
|
||||
)
|
||||
|
||||
url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz"
|
||||
assert "Failed to establish a new connection:" in file_ops.download_file(
|
||||
url, local_name, config
|
||||
url, local_name, agent=agent
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@ import pytest
|
||||
from git.exc import GitCommandError
|
||||
from git.repo.base import Repo
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.git_operations import clone_repository
|
||||
|
||||
|
||||
@@ -10,7 +11,7 @@ def mock_clone_from(mocker):
|
||||
return mocker.patch.object(Repo, "clone_from")
|
||||
|
||||
|
||||
def test_clone_auto_gpt_repository(workspace, mock_clone_from, config):
|
||||
def test_clone_auto_gpt_repository(workspace, mock_clone_from, agent: Agent):
|
||||
mock_clone_from.return_value = None
|
||||
|
||||
repo = "github.com/Significant-Gravitas/Auto-GPT.git"
|
||||
@@ -20,16 +21,16 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, config):
|
||||
|
||||
expected_output = f"Cloned {url} to {clone_path}"
|
||||
|
||||
clone_result = clone_repository(url=url, clone_path=clone_path, config=config)
|
||||
clone_result = clone_repository(url=url, clone_path=clone_path, agent=agent)
|
||||
|
||||
assert clone_result == expected_output
|
||||
mock_clone_from.assert_called_once_with(
|
||||
url=f"{scheme}{config.github_username}:{config.github_api_key}@{repo}",
|
||||
url=f"{scheme}{agent.config.github_username}:{agent.config.github_api_key}@{repo}",
|
||||
to_path=clone_path,
|
||||
)
|
||||
|
||||
|
||||
def test_clone_repository_error(workspace, mock_clone_from, config):
|
||||
def test_clone_repository_error(workspace, mock_clone_from, agent: Agent):
|
||||
url = "https://github.com/this-repository/does-not-exist.git"
|
||||
clone_path = str(workspace.get_path("does-not-exist"))
|
||||
|
||||
@@ -37,6 +38,6 @@ def test_clone_repository_error(workspace, mock_clone_from, config):
|
||||
"clone", "fatal: repository not found", ""
|
||||
)
|
||||
|
||||
result = clone_repository(url=url, clone_path=clone_path, config=config)
|
||||
result = clone_repository(url=url, clone_path=clone_path, agent=agent)
|
||||
|
||||
assert "Error: " in result
|
||||
|
||||
@@ -3,6 +3,7 @@ import json
|
||||
import pytest
|
||||
from googleapiclient.errors import HttpError
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.google_search import (
|
||||
google_official_search,
|
||||
google_search,
|
||||
@@ -39,13 +40,13 @@ def test_safe_google_results_invalid_input():
|
||||
],
|
||||
)
|
||||
def test_google_search(
|
||||
query, num_results, expected_output, return_value, mocker, config
|
||||
query, num_results, expected_output, return_value, mocker, agent: Agent
|
||||
):
|
||||
mock_ddg = mocker.Mock()
|
||||
mock_ddg.return_value = return_value
|
||||
|
||||
mocker.patch("autogpt.commands.google_search.DDGS.text", mock_ddg)
|
||||
actual_output = google_search(query, config, num_results=num_results)
|
||||
actual_output = google_search(query, agent=agent, num_results=num_results)
|
||||
expected_output = safe_google_results(expected_output)
|
||||
assert actual_output == expected_output
|
||||
|
||||
@@ -79,10 +80,15 @@ def mock_googleapiclient(mocker):
|
||||
],
|
||||
)
|
||||
def test_google_official_search(
|
||||
query, num_results, expected_output, search_results, mock_googleapiclient, config
|
||||
query,
|
||||
num_results,
|
||||
expected_output,
|
||||
search_results,
|
||||
mock_googleapiclient,
|
||||
agent: Agent,
|
||||
):
|
||||
mock_googleapiclient.return_value = search_results
|
||||
actual_output = google_official_search(query, config, num_results=num_results)
|
||||
actual_output = google_official_search(query, agent=agent, num_results=num_results)
|
||||
assert actual_output == safe_google_results(expected_output)
|
||||
|
||||
|
||||
@@ -113,7 +119,7 @@ def test_google_official_search_errors(
|
||||
mock_googleapiclient,
|
||||
http_code,
|
||||
error_msg,
|
||||
config,
|
||||
agent: Agent,
|
||||
):
|
||||
class resp:
|
||||
def __init__(self, _status, _reason):
|
||||
@@ -130,5 +136,5 @@ def test_google_official_search_errors(
|
||||
)
|
||||
|
||||
mock_googleapiclient.side_effect = error
|
||||
actual_output = google_official_search(query, config, num_results=num_results)
|
||||
actual_output = google_official_search(query, agent=agent, num_results=num_results)
|
||||
assert actual_output == safe_google_results(expected_output)
|
||||
|
||||
@@ -2,11 +2,11 @@ from unittest.mock import MagicMock
|
||||
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.app import list_agents, start_agent
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
def test_make_agent(config: Config, mocker: MockerFixture) -> None:
|
||||
def test_make_agent(agent: Agent, mocker: MockerFixture) -> None:
|
||||
"""Test that an agent can be created"""
|
||||
mock = mocker.patch("openai.ChatCompletion.create")
|
||||
|
||||
@@ -16,9 +16,9 @@ def test_make_agent(config: Config, mocker: MockerFixture) -> None:
|
||||
response.usage.prompt_tokens = 1
|
||||
response.usage.completion_tokens = 1
|
||||
mock.return_value = response
|
||||
start_agent("Test Agent", "chat", "Hello, how are you?", config, "gpt-3.5-turbo")
|
||||
agents = list_agents(config)
|
||||
start_agent("Test Agent", "chat", "Hello, how are you?", agent, "gpt-3.5-turbo")
|
||||
agents = list_agents(agent)
|
||||
assert "List of agents:\n0: chat" == agents
|
||||
start_agent("Test Agent 2", "write", "Hello, how are you?", config, "gpt-3.5-turbo")
|
||||
agents = list_agents(config)
|
||||
start_agent("Test Agent 2", "write", "Hello, how are you?", agent, "gpt-3.5-turbo")
|
||||
agents = list_agents(agent.config)
|
||||
assert "List of agents:\n0: chat\n1: write" == agents
|
||||
|
||||
Reference in New Issue
Block a user