diff --git a/.env.template b/.env.template index 7a4a22bb..4c079e00 100644 --- a/.env.template +++ b/.env.template @@ -30,7 +30,6 @@ ## autogpt.commands.google_search ## autogpt.commands.image_gen ## autogpt.commands.improve_code -## autogpt.commands.twitter ## autogpt.commands.web_selenium ## autogpt.commands.write_tests ## autogpt.app diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 8c7a5456..93d31853 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -274,6 +274,7 @@ class Agent: command_name, arguments, self.config.prompt_generator, + config=cfg, ) result = f"Command {command_name} returned: " f"{command_result}" diff --git a/autogpt/app.py b/autogpt/app.py index da67c35f..985bf0f8 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -6,15 +6,11 @@ from autogpt.agent.agent_manager import AgentManager from autogpt.commands.command import CommandRegistry, command from autogpt.commands.web_requests import scrape_links, scrape_text from autogpt.config import Config -from autogpt.logs import logger -from autogpt.memory.vector import get_memory from autogpt.processing.text import summarize_text from autogpt.prompts.generator import PromptGenerator from autogpt.speech import say_text from autogpt.url_utils.validators import validate_url -CFG = Config() - def is_valid_int(value: str) -> bool: """Check if the value is a valid integer @@ -93,6 +89,7 @@ def execute_command( command_name: str, arguments, prompt: PromptGenerator, + config: Config, ): """Execute the command and return the result @@ -108,7 +105,7 @@ def execute_command( # If the command is found, call it with the provided arguments if cmd: - return cmd(**arguments) + return cmd(**arguments, config=config) # TODO: Remove commands below after they are moved to the command registry. command_name = map_command_synonyms(command_name.lower()) @@ -135,7 +132,7 @@ def execute_command( "get_text_summary", "Get text summary", '"url": "", "question": ""' ) @validate_url -def get_text_summary(url: str, question: str) -> str: +def get_text_summary(url: str, question: str, config: Config) -> str: """Get the text summary of a webpage Args: @@ -153,7 +150,7 @@ def get_text_summary(url: str, question: str) -> str: @command("get_hyperlinks", "Get hyperlinks", '"url": ""') @validate_url -def get_hyperlinks(url: str) -> Union[str, List[str]]: +def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]: """Get all hyperlinks on a webpage Args: @@ -162,7 +159,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]: Returns: str or list: The hyperlinks on the page """ - return scrape_links(url) + return scrape_links(url, config) @command( @@ -170,7 +167,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]: "Start GPT Agent", '"name": "", "task": "", "prompt": ""', ) -def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: +def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -> str: """Start an agent with a given name, task, and prompt Args: @@ -191,11 +188,11 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> agent_intro = f"{voice_name} here, Reporting for duty!" # Create agent - if CFG.speak_mode: + if config.speak_mode: say_text(agent_intro, 1) key, ack = agent_manager.create_agent(task, first_message, model) - if CFG.speak_mode: + if config.speak_mode: say_text(f"Hello {voice_name}. Your task is as follows. {task}.") # Assign task (prompt), get response @@ -205,7 +202,7 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> @command("message_agent", "Message GPT Agent", '"key": "", "message": ""') -def message_agent(key: str, message: str) -> str: +def message_agent(key: str, message: str, config: Config) -> str: """Message an agent with a given key and message""" # Check if the key is a valid integer if is_valid_int(key): @@ -214,13 +211,13 @@ def message_agent(key: str, message: str) -> str: return "Invalid key, must be an integer." # Speak response - if CFG.speak_mode: + if config.speak_mode: say_text(agent_response, 1) return agent_response -@command("list_agents", "List GPT Agents", "") -def list_agents() -> str: +@command("list_agents", "List GPT Agents", "() -> str") +def list_agents(config: Config) -> str: """List all agents Returns: @@ -232,7 +229,7 @@ def list_agents() -> str: @command("delete_agent", "Delete GPT Agent", '"key": ""') -def delete_agent(key: str) -> str: +def delete_agent(key: str, config: Config) -> str: """Delete an agent with a given key Args: diff --git a/autogpt/commands/analyze_code.py b/autogpt/commands/analyze_code.py index b2af33e2..4de68334 100644 --- a/autogpt/commands/analyze_code.py +++ b/autogpt/commands/analyze_code.py @@ -1,16 +1,21 @@ """Code evaluation module.""" from __future__ import annotations +from typing import TYPE_CHECKING + from autogpt.commands.command import command from autogpt.llm.utils import call_ai_function +if TYPE_CHECKING: + from autogpt.config import Config + @command( "analyze_code", "Analyze Code", '"code": ""', ) -def analyze_code(code: str) -> list[str]: +def analyze_code(code: str, config: Config) -> list[str]: """ A function that takes in a string and returns a response from create chat completion api call. @@ -28,4 +33,4 @@ def analyze_code(code: str) -> list[str]: "Analyzes the given code and returns a list of suggestions for improvements." ) - return call_ai_function(function_string, args, description_string) + return call_ai_function(function_string, args, description_string, config=config) diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py index 0a8640cf..ba4fb347 100644 --- a/autogpt/commands/audio_text.py +++ b/autogpt/commands/audio_text.py @@ -1,22 +1,25 @@ """Commands for converting audio to text.""" import json +from typing import TYPE_CHECKING import requests from autogpt.commands.command import command from autogpt.config import Config -CFG = Config() +if TYPE_CHECKING: + from autogpt.config import Config @command( "read_audio_from_file", "Convert Audio to text", '"filename": ""', - CFG.huggingface_audio_to_text_model, - "Configure huggingface_audio_to_text_model.", + lambda config: config.huggingface_audio_to_text_model + and config.huggingface_api_token, + "Configure huggingface_audio_to_text_model and Hugging Face api token.", ) -def read_audio_from_file(filename: str) -> str: +def read_audio_from_file(filename: str, config: Config) -> str: """ Convert audio to text. @@ -28,10 +31,10 @@ def read_audio_from_file(filename: str) -> str: """ with open(filename, "rb") as audio_file: audio = audio_file.read() - return read_audio(audio) + return read_audio(audio, config) -def read_audio(audio: bytes) -> str: +def read_audio(audio: bytes, config: Config) -> str: """ Convert audio to text. @@ -41,9 +44,9 @@ def read_audio(audio: bytes) -> str: Returns: str: The text from the audio """ - model = CFG.huggingface_audio_to_text_model + model = config.huggingface_audio_to_text_model api_url = f"https://api-inference.huggingface.co/models/{model}" - api_token = CFG.huggingface_api_token + api_token = config.huggingface_api_token headers = {"Authorization": f"Bearer {api_token}"} if api_token is None: diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py index 174a691c..e99a68c0 100644 --- a/autogpt/commands/command.py +++ b/autogpt/commands/command.py @@ -1,8 +1,9 @@ import functools import importlib import inspect -from typing import Any, Callable, Optional +from typing import TYPE_CHECKING, Any, Callable, Optional +from autogpt.config import Config from autogpt.logs import logger # Unique identifier for auto-gpt commands @@ -24,19 +25,23 @@ class Command: description: str, method: Callable[..., Any], signature: str = "", - enabled: bool = True, + enabled: bool | Callable[[Config], bool] = True, disabled_reason: Optional[str] = None, ): self.name = name self.description = description self.method = method - self.signature = signature if signature else str(inspect.signature(self.method)) + self.signature = signature self.enabled = enabled self.disabled_reason = disabled_reason def __call__(self, *args, **kwargs) -> Any: + if hasattr(kwargs, "config") and callable(self.enabled): + self.enabled = self.enabled(kwargs["config"]) if not self.enabled: - return f"Command '{self.name}' is disabled: {self.disabled_reason}" + if self.disabled_reason: + return f"Command '{self.name}' is disabled: {self.disabled_reason}" + return f"Command '{self.name}' is disabled" return self.method(*args, **kwargs) def __str__(self) -> str: @@ -133,12 +138,17 @@ class CommandRegistry: def command( name: str, description: str, - signature: str = "", - enabled: bool = True, + signature: str, + enabled: bool | Callable[[Config], bool] = True, disabled_reason: Optional[str] = None, ) -> Callable[..., Any]: """The command decorator is used to create Command objects from ordinary functions.""" + # TODO: Remove this in favor of better command management + CFG = Config() + + if callable(enabled): + enabled = enabled(CFG) if not enabled: if disabled_reason is not None: logger.debug(f"Command '{name}' is disabled: {disabled_reason}") diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 8d14c17f..e8ef6551 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -2,6 +2,7 @@ import os import subprocess from pathlib import Path +from typing import TYPE_CHECKING import docker from docker.errors import ImageNotFound @@ -10,11 +11,9 @@ from autogpt.commands.command import command from autogpt.config import Config from autogpt.logs import logger -CFG = Config() - @command("execute_python_file", "Execute Python File", '"filename": ""') -def execute_python_file(filename: str) -> str: +def execute_python_file(filename: str, config: Config) -> str: """Execute a Python file in a Docker container and return the output Args: @@ -65,9 +64,9 @@ def execute_python_file(filename: str) -> str: logger.info(status) container = client.containers.run( image_name, - ["python", str(Path(filename).relative_to(CFG.workspace_path))], + ["python", str(Path(filename).relative_to(config.workspace_path))], volumes={ - CFG.workspace_path: { + config.workspace_path: { "bind": "/workspace", "mode": "ro", } @@ -97,7 +96,7 @@ def execute_python_file(filename: str) -> str: return f"Error: {str(e)}" -def validate_command(command: str) -> bool: +def validate_command(command: str, config: Config) -> bool: """Validate a command to ensure it is allowed Args: @@ -111,13 +110,13 @@ def validate_command(command: str) -> bool: if not tokens: return False - if CFG.deny_commands and tokens[0] not in CFG.deny_commands: + if config.deny_commands and tokens[0] not in config.deny_commands: return False - for keyword in CFG.allow_commands: + for keyword in config.allow_commands: if keyword in tokens: return True - if CFG.allow_commands: + if config.allow_commands: return False return True @@ -127,12 +126,12 @@ def validate_command(command: str) -> bool: "execute_shell", "Execute Shell Command, non-interactive commands only", '"command_line": ""', - CFG.execute_local_commands, + lambda cfg: cfg.execute_local_commands, "You are not allowed to run local shell commands. To execute" " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " "in your config file: .env - do not attempt to bypass the restriction.", ) -def execute_shell(command_line: str) -> str: +def execute_shell(command_line: str, config: Config) -> str: """Execute a shell command and return the output Args: @@ -141,14 +140,14 @@ def execute_shell(command_line: str) -> str: Returns: str: The output of the command """ - if not validate_command(command_line): + if not validate_command(command_line, config): logger.info(f"Command '{command_line}' not allowed") return "Error: This Shell Command is not allowed." current_dir = Path.cwd() # Change dir into workspace if necessary - if not current_dir.is_relative_to(CFG.workspace_path): - os.chdir(CFG.workspace_path) + if not current_dir.is_relative_to(config.workspace_path): + os.chdir(config.workspace_path) logger.info( f"Executing command '{command_line}' in working directory '{os.getcwd()}'" @@ -167,12 +166,12 @@ def execute_shell(command_line: str) -> str: "execute_shell_popen", "Execute Shell Command, non-interactive commands only", '"command_line": ""', - CFG.execute_local_commands, + lambda config: config.execute_local_commands, "You are not allowed to run local shell commands. To execute" " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " "in your config. Do not attempt to bypass the restriction.", ) -def execute_shell_popen(command_line) -> str: +def execute_shell_popen(command_line, config: Config) -> str: """Execute a shell command with Popen and returns an english description of the event and the process id @@ -182,14 +181,14 @@ def execute_shell_popen(command_line) -> str: Returns: str: Description of the fact that the process started and its id """ - if not validate_command(command_line): + if not validate_command(command_line, config): logger.info(f"Command '{command_line}' not allowed") return "Error: This Shell Command is not allowed." current_dir = os.getcwd() # Change dir into workspace if necessary - if CFG.workspace_path not in current_dir: - os.chdir(CFG.workspace_path) + if config.workspace_path not in current_dir: + os.chdir(config.workspace_path) logger.info( f"Executing command '{command_line}' in working directory '{os.getcwd()}'" diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index cf0325e2..7205b302 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -4,7 +4,7 @@ from __future__ import annotations import hashlib import os import os.path -from typing import Generator, Literal +from typing import TYPE_CHECKING, Generator, Literal import charset_normalizer import requests @@ -13,13 +13,14 @@ from requests.adapters import HTTPAdapter, Retry from autogpt.commands.command import command from autogpt.commands.file_operations_utils import read_textual_file -from autogpt.config import Config from autogpt.logs import logger from autogpt.memory.vector import MemoryItem, VectorMemory from autogpt.spinner import Spinner from autogpt.utils import readable_file_size -CFG = Config() +if TYPE_CHECKING: + from autogpt.config import Config + Operation = Literal["write", "append", "delete"] @@ -60,7 +61,7 @@ def operations_from_log( def file_operations_state(log_path: str) -> dict[str, str]: """Iterates over the operations log and returns the expected state. - Parses a log file at CFG.file_logger_path to construct a dictionary that maps + Parses a log file at config.file_logger_path to construct a dictionary that maps each file path written or appended to its checksum. Deleted files are removed from the dictionary. @@ -68,7 +69,7 @@ def file_operations_state(log_path: str) -> dict[str, str]: A dictionary mapping file paths to their checksums. Raises: - FileNotFoundError: If CFG.file_logger_path is not found. + FileNotFoundError: If config.file_logger_path is not found. ValueError: If the log file content is not in the expected format. """ state = {} @@ -81,7 +82,7 @@ def file_operations_state(log_path: str) -> dict[str, str]: def is_duplicate_operation( - operation: Operation, filename: str, checksum: str | None = None + operation: Operation, filename: str, config: Config, checksum: str | None = None ) -> bool: """Check if the operation has already been performed @@ -93,7 +94,7 @@ def is_duplicate_operation( Returns: True if the operation has already been performed on the file """ - state = file_operations_state(CFG.file_logger_path) + state = file_operations_state(config.file_logger_path) if operation == "delete" and filename not in state: return True if operation == "write" and state.get(filename) == checksum: @@ -101,7 +102,9 @@ def is_duplicate_operation( return False -def log_operation(operation: str, filename: str, checksum: str | None = None) -> None: +def log_operation( + operation: str, filename: str, config: Config, checksum: str | None = None +) -> None: """Log the file operation to the file_logger.txt Args: @@ -113,7 +116,7 @@ def log_operation(operation: str, filename: str, checksum: str | None = None) -> if checksum is not None: log_entry += f" #{checksum}" logger.debug(f"Logging file operation: {log_entry}") - append_to_file(CFG.file_logger_path, f"{log_entry}\n", should_log=False) + append_to_file(config.file_logger_path, f"{log_entry}\n", config, should_log=False) def split_file( @@ -149,7 +152,7 @@ def split_file( @command("read_file", "Read a file", '"filename": ""') -def read_file(filename: str) -> str: +def read_file(filename: str, config: Config) -> str: """Read a file and return the contents Args: @@ -198,7 +201,7 @@ def ingest_file( @command("write_to_file", "Write to file", '"filename": "", "text": ""') -def write_to_file(filename: str, text: str) -> str: +def write_to_file(filename: str, text: str, config: Config) -> str: """Write text to a file Args: @@ -209,14 +212,14 @@ def write_to_file(filename: str, text: str) -> str: str: A message indicating success or failure """ checksum = text_checksum(text) - if is_duplicate_operation("write", filename, checksum): + if is_duplicate_operation("write", filename, config, checksum): return "Error: File has already been updated." try: directory = os.path.dirname(filename) os.makedirs(directory, exist_ok=True) with open(filename, "w", encoding="utf-8") as f: f.write(text) - log_operation("write", filename, checksum) + log_operation("write", filename, config, checksum) return "File written to successfully." except Exception as err: return f"Error: {err}" @@ -225,7 +228,9 @@ def write_to_file(filename: str, text: str) -> str: @command( "append_to_file", "Append to file", '"filename": "", "text": ""' ) -def append_to_file(filename: str, text: str, should_log: bool = True) -> str: +def append_to_file( + filename: str, text: str, config: Config, should_log: bool = True +) -> str: """Append text to a file Args: @@ -245,7 +250,7 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str: if should_log: with open(filename, "r", encoding="utf-8") as f: checksum = text_checksum(f.read()) - log_operation("append", filename, checksum=checksum) + log_operation("append", filename, config, checksum=checksum) return "Text appended successfully." except Exception as err: @@ -253,7 +258,7 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str: @command("delete_file", "Delete file", '"filename": ""') -def delete_file(filename: str) -> str: +def delete_file(filename: str, config: Config) -> str: """Delete a file Args: @@ -262,18 +267,18 @@ def delete_file(filename: str) -> str: Returns: str: A message indicating success or failure """ - if is_duplicate_operation("delete", filename): + if is_duplicate_operation("delete", filename, config): return "Error: File has already been deleted." try: os.remove(filename) - log_operation("delete", filename) + log_operation("delete", filename, config) return "File deleted successfully." except Exception as err: return f"Error: {err}" @command("list_files", "List Files in Directory", '"directory": ""') -def list_files(directory: str) -> list[str]: +def list_files(directory: str, config: Config) -> list[str]: """lists files in a directory recursively Args: @@ -289,7 +294,7 @@ def list_files(directory: str) -> list[str]: if file.startswith("."): continue relative_path = os.path.relpath( - os.path.join(root, file), CFG.workspace_path + os.path.join(root, file), config.workspace_path ) found_files.append(relative_path) @@ -300,10 +305,10 @@ def list_files(directory: str) -> list[str]: "download_file", "Download File", '"url": "", "filename": ""', - CFG.allow_downloads, + lambda config: config.allow_downloads, "Error: You do not have user authorization to download files locally.", ) -def download_file(url, filename): +def download_file(url, filename, config: Config): """Downloads a file Args: url (str): URL of the file to download diff --git a/autogpt/commands/file_operations_utils.py b/autogpt/commands/file_operations_utils.py index e408c6fa..7f3e418d 100644 --- a/autogpt/commands/file_operations_utils.py +++ b/autogpt/commands/file_operations_utils.py @@ -14,13 +14,13 @@ from autogpt.logs import logger class ParserStrategy: - def read(self, file_path: str): + def read(self, file_path: str) -> str: raise NotImplementedError # Basic text file reading class TXTParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: charset_match = charset_normalizer.from_path(file_path).best() logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'") return str(charset_match) @@ -28,7 +28,7 @@ class TXTParser(ParserStrategy): # Reading text from binary file using pdf parser class PDFParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: parser = PyPDF2.PdfReader(file_path) text = "" for page_idx in range(len(parser.pages)): @@ -38,7 +38,7 @@ class PDFParser(ParserStrategy): # Reading text from binary file using docs parser class DOCXParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: doc_file = docx.Document(file_path) text = "" for para in doc_file.paragraphs: @@ -48,7 +48,7 @@ class DOCXParser(ParserStrategy): # Reading as dictionary and returning string format class JSONParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: with open(file_path, "r") as f: data = json.load(f) text = str(data) @@ -56,7 +56,7 @@ class JSONParser(ParserStrategy): class XMLParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: with open(file_path, "r") as f: soup = BeautifulSoup(f, "xml") text = soup.get_text() @@ -65,7 +65,7 @@ class XMLParser(ParserStrategy): # Reading as dictionary and returning string format class YAMLParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: with open(file_path, "r") as f: data = yaml.load(f, Loader=yaml.FullLoader) text = str(data) @@ -73,7 +73,7 @@ class YAMLParser(ParserStrategy): class HTMLParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: with open(file_path, "r") as f: soup = BeautifulSoup(f, "html.parser") text = soup.get_text() @@ -81,7 +81,7 @@ class HTMLParser(ParserStrategy): class MarkdownParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: with open(file_path, "r") as f: html = markdown.markdown(f.read()) text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True)) @@ -89,7 +89,7 @@ class MarkdownParser(ParserStrategy): class LaTeXParser(ParserStrategy): - def read(self, file_path): + def read(self, file_path: str) -> str: with open(file_path, "r") as f: latex = f.read() text = LatexNodes2Text().latex_to_text(latex) @@ -101,11 +101,11 @@ class FileContext: self.parser = parser self.logger = logger - def set_parser(self, parser: ParserStrategy): + def set_parser(self, parser: ParserStrategy) -> None: self.logger.debug(f"Setting Context Parser to {parser}") self.parser = parser - def read_file(self, file_path): + def read_file(self, file_path) -> str: self.logger.debug(f"Reading file {file_path} with parser {self.parser}") return self.parser.read(file_path) @@ -144,7 +144,7 @@ def is_file_binary_fn(file_path: str): return False -def read_textual_file(file_path: str, logger: logs.Logger): +def read_textual_file(file_path: str, logger: logs.Logger) -> str: if not os.path.isfile(file_path): raise FileNotFoundError(f"{file_path} not found!") is_binary = is_file_binary_fn(file_path) diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 22233108..c32a8cc3 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -1,22 +1,25 @@ """Git operations for autogpt""" +from typing import TYPE_CHECKING + from git.repo import Repo from autogpt.commands.command import command from autogpt.config import Config from autogpt.url_utils.validators import validate_url -CFG = Config() +if TYPE_CHECKING: + from autogpt.config import Config @command( "clone_repository", "Clone Repository", '"url": "", "clone_path": ""', - CFG.github_username and CFG.github_api_key, + lambda config: config.github_username and config.github_api_key, "Configure github_username and github_api_key.", ) @validate_url -def clone_repository(url: str, clone_path: str) -> str: +def clone_repository(url: str, clone_path: str, config: Config) -> str: """Clone a GitHub repository locally. Args: @@ -27,7 +30,9 @@ def clone_repository(url: str, clone_path: str) -> str: str: The result of the clone operation. """ split_url = url.split("//") - auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) + auth_repo_url = f"//{config.github_username}:{config.github_api_key}@".join( + split_url + ) try: Repo.clone_from(url=auth_repo_url, to_path=clone_path) return f"""Cloned {url} to {clone_path}""" diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py index 23abef7d..c01ec0a1 100644 --- a/autogpt/commands/google_search.py +++ b/autogpt/commands/google_search.py @@ -3,17 +3,23 @@ from __future__ import annotations import json from itertools import islice +from typing import TYPE_CHECKING from duckduckgo_search import DDGS from autogpt.commands.command import command -from autogpt.config import Config -CFG = Config() +if TYPE_CHECKING: + from autogpt.config import Config -@command("google", "Google Search", '"query": ""', not CFG.google_api_key) -def google_search(query: str, num_results: int = 8) -> str: +@command( + "google", + "Google Search", + '"query": ""', + lambda config: not config.google_api_key, +) +def google_search(query: str, config: Config, num_results: int = 8) -> str: """Return the results of a Google search Args: @@ -42,10 +48,12 @@ def google_search(query: str, num_results: int = 8) -> str: "google", "Google Search", '"query": ""', - bool(CFG.google_api_key) and bool(CFG.custom_search_engine_id), + lambda config: bool(config.google_api_key) and bool(config.custom_search_engine_id), "Configure google_api_key and custom_search_engine_id.", ) -def google_official_search(query: str, num_results: int = 8) -> str | list[str]: +def google_official_search( + query: str, config: Config, num_results: int = 8 +) -> str | list[str]: """Return the results of a Google search using the official Google API Args: @@ -61,8 +69,8 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]: try: # Get the Google API key and Custom Search Engine ID from the config file - api_key = CFG.google_api_key - custom_search_engine_id = CFG.custom_search_engine_id + api_key = config.google_api_key + custom_search_engine_id = config.custom_search_engine_id # Initialize the Custom Search API service service = build("customsearch", "v1", developerKey=api_key) diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 5326cd52..04d86564 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -4,6 +4,7 @@ import json import time import uuid from base64 import b64decode +from typing import TYPE_CHECKING import openai import requests @@ -13,11 +14,18 @@ from autogpt.commands.command import command from autogpt.config import Config from autogpt.logs import logger -CFG = Config() +if TYPE_CHECKING: + from autogpt.config import Config -@command("generate_image", "Generate Image", '"prompt": ""', CFG.image_provider) -def generate_image(prompt: str, size: int = 256) -> str: +@command( + "generate_image", + "Generate Image", + '"prompt": ""', + lambda config: config.image_provider, + "Requires a image provider to be set.", +) +def generate_image(prompt: str, config: Config, size: int = 256) -> str: """Generate an image from a prompt. Args: @@ -27,21 +35,21 @@ def generate_image(prompt: str, size: int = 256) -> str: Returns: str: The filename of the image """ - filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg" + filename = f"{config.workspace_path}/{str(uuid.uuid4())}.jpg" # DALL-E - if CFG.image_provider == "dalle": - return generate_image_with_dalle(prompt, filename, size) + if config.image_provider == "dalle": + return generate_image_with_dalle(prompt, filename, size, config) # HuggingFace - elif CFG.image_provider == "huggingface": - return generate_image_with_hf(prompt, filename) + elif config.image_provider == "huggingface": + return generate_image_with_hf(prompt, filename, config) # SD WebUI - elif CFG.image_provider == "sdwebui": - return generate_image_with_sd_webui(prompt, filename, size) + elif config.image_provider == "sdwebui": + return generate_image_with_sd_webui(prompt, filename, config, size) return "No Image Provider Set" -def generate_image_with_hf(prompt: str, filename: str) -> str: +def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str: """Generate an image with HuggingFace's API. Args: @@ -52,14 +60,14 @@ def generate_image_with_hf(prompt: str, filename: str) -> str: str: The filename of the image """ API_URL = ( - f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}" + f"https://api-inference.huggingface.co/models/{config.huggingface_image_model}" ) - if CFG.huggingface_api_token is None: + if config.huggingface_api_token is None: raise ValueError( "You need to set your Hugging Face API token in the config file." ) headers = { - "Authorization": f"Bearer {CFG.huggingface_api_token}", + "Authorization": f"Bearer {config.huggingface_api_token}", "X-Use-Cache": "false", } @@ -101,7 +109,9 @@ def generate_image_with_hf(prompt: str, filename: str) -> str: return f"Error creating image." -def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: +def generate_image_with_dalle( + prompt: str, filename: str, size: int, config: Config +) -> str: """Generate an image with DALL-E. Args: @@ -126,7 +136,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: n=1, size=f"{size}x{size}", response_format="b64_json", - api_key=CFG.openai_api_key, + api_key=config.openai_api_key, ) logger.info(f"Image Generated for prompt:{prompt}") @@ -142,6 +152,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: def generate_image_with_sd_webui( prompt: str, filename: str, + config: Config, size: int = 512, negative_prompt: str = "", extra: dict = {}, @@ -158,13 +169,13 @@ def generate_image_with_sd_webui( """ # Create a session and set the basic auth if needed s = requests.Session() - if CFG.sd_webui_auth: - username, password = CFG.sd_webui_auth.split(":") + if config.sd_webui_auth: + username, password = config.sd_webui_auth.split(":") s.auth = (username, password or "") # Generate the images response = requests.post( - f"{CFG.sd_webui_url}/sdapi/v1/txt2img", + f"{config.sd_webui_url}/sdapi/v1/txt2img", json={ "prompt": prompt, "negative_prompt": negative_prompt, diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py index d2665f3f..60e517ef 100644 --- a/autogpt/commands/improve_code.py +++ b/autogpt/commands/improve_code.py @@ -1,17 +1,21 @@ from __future__ import annotations import json +from typing import TYPE_CHECKING from autogpt.commands.command import command from autogpt.llm.utils import call_ai_function +if TYPE_CHECKING: + from autogpt.config import Config + @command( "improve_code", "Get Improved Code", '"suggestions": "", "code": ""', ) -def improve_code(suggestions: list[str], code: str) -> str: +def improve_code(suggestions: list[str], code: str, config: Config) -> str: """ A function that takes in code and suggestions and returns a response from create chat completion api call. @@ -32,4 +36,4 @@ def improve_code(suggestions: list[str], code: str) -> str: " provided, making no other changes." ) - return call_ai_function(function_string, args, description_string) + return call_ai_function(function_string, args, description_string, config=config) diff --git a/autogpt/commands/task_statuses.py b/autogpt/commands/task_statuses.py index 46c5b6c0..9f60209c 100644 --- a/autogpt/commands/task_statuses.py +++ b/autogpt/commands/task_statuses.py @@ -1,18 +1,21 @@ """Task Statuses module.""" from __future__ import annotations -from typing import NoReturn +from typing import TYPE_CHECKING, NoReturn from autogpt.commands.command import command from autogpt.logs import logger +if TYPE_CHECKING: + from autogpt.config import Config + @command( "task_complete", "Task Complete (Shutdown)", '"reason": ""', ) -def task_complete(reason: str) -> NoReturn: +def task_complete(reason: str, config: Config) -> NoReturn: """ A function that takes in a string and exits the program diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py index 156ba103..d7de8dc9 100644 --- a/autogpt/commands/web_requests.py +++ b/autogpt/commands/web_requests.py @@ -9,15 +9,12 @@ from autogpt.config import Config from autogpt.processing.html import extract_hyperlinks, format_hyperlinks from autogpt.url_utils.validators import validate_url -CFG = Config() - session = requests.Session() -session.headers.update({"User-Agent": CFG.user_agent}) @validate_url def get_response( - url: str, timeout: int = 10 + url: str, config: Config, timeout: int = 10 ) -> tuple[None, str] | tuple[Response, None]: """Get the response from a URL @@ -33,6 +30,7 @@ def get_response( requests.exceptions.RequestException: If the HTTP request fails """ try: + session.headers.update({"User-Agent": config.user_agent}) response = session.get(url, timeout=timeout) # Check if the response contains an HTTP error @@ -50,7 +48,7 @@ def get_response( return None, f"Error: {str(re)}" -def scrape_text(url: str) -> str: +def scrape_text(url: str, config: Config) -> str: """Scrape text from a webpage Args: @@ -59,7 +57,7 @@ def scrape_text(url: str) -> str: Returns: str: The scraped text """ - response, error_message = get_response(url) + response, error_message = get_response(url, config) if error_message: return error_message if not response: @@ -78,7 +76,7 @@ def scrape_text(url: str) -> str: return text -def scrape_links(url: str) -> str | list[str]: +def scrape_links(url: str, config: Config) -> str | list[str]: """Scrape links from a webpage Args: @@ -87,7 +85,7 @@ def scrape_links(url: str) -> str | list[str]: Returns: str | list[str]: The scraped links """ - response, error_message = get_response(url) + response, error_message = get_response(url, config) if error_message: return error_message if not response: diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 72f849d0..da6dd35d 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -4,7 +4,7 @@ from __future__ import annotations import logging from pathlib import Path from sys import platform -from typing import Optional, Type +from typing import TYPE_CHECKING, Optional, Type from bs4 import BeautifulSoup from selenium.common.exceptions import WebDriverException @@ -28,17 +28,17 @@ from webdriver_manager.firefox import GeckoDriverManager from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager from autogpt.commands.command import command -from autogpt.config import Config from autogpt.logs import logger from autogpt.memory.vector import MemoryItem, NoMemory, get_memory from autogpt.processing.html import extract_hyperlinks, format_hyperlinks -from autogpt.processing.text import summarize_text from autogpt.url_utils.validators import validate_url +if TYPE_CHECKING: + from autogpt.config import Config + BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions FILE_DIR = Path(__file__).parent.parent -CFG = Config() @command( @@ -47,7 +47,7 @@ CFG = Config() '"url": "", "question": ""', ) @validate_url -def browse_website(url: str, question: str) -> str: +def browse_website(url: str, question: str, config: Config) -> str: """Browse a website and return the answer and links to the user Args: @@ -58,7 +58,7 @@ def browse_website(url: str, question: str) -> str: Tuple[str, WebDriver]: The answer and links to the user and the webdriver """ try: - driver, text = scrape_text_with_selenium(url) + driver, text = scrape_text_with_selenium(url, config) except WebDriverException as e: # These errors are often quite long and include lots of context. # Just grab the first line. @@ -66,7 +66,7 @@ def browse_website(url: str, question: str) -> str: return f"Error: {msg}" add_header(driver) - summary = summarize_memorize_webpage(url, text, question, driver) + summary = summarize_memorize_webpage(url, text, question, config, driver) links = scrape_links_with_selenium(driver, url) # Limit links to 5 @@ -76,7 +76,7 @@ def browse_website(url: str, question: str) -> str: return f"Answer gathered from website: {summary}\n\nLinks: {links}" -def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: +def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]: """Scrape text from a website using selenium Args: @@ -94,23 +94,23 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: "safari": SafariOptions, } - options: BrowserOptions = options_available[CFG.selenium_web_browser]() + options: BrowserOptions = options_available[config.selenium_web_browser]() options.add_argument( "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" ) - if CFG.selenium_web_browser == "firefox": - if CFG.selenium_headless: + if config.selenium_web_browser == "firefox": + if config.selenium_headless: options.headless = True options.add_argument("--disable-gpu") driver = FirefoxDriver( service=GeckoDriverService(GeckoDriverManager().install()), options=options ) - elif CFG.selenium_web_browser == "edge": + elif config.selenium_web_browser == "edge": driver = EdgeDriver( service=EdgeDriverService(EdgeDriverManager().install()), options=options ) - elif CFG.selenium_web_browser == "safari": + elif config.selenium_web_browser == "safari": # Requires a bit more setup on the users end # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari driver = SafariDriver(options=options) @@ -120,7 +120,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: options.add_argument("--remote-debugging-port=9222") options.add_argument("--no-sandbox") - if CFG.selenium_headless: + if config.selenium_headless: options.add_argument("--headless=new") options.add_argument("--disable-gpu") @@ -202,7 +202,11 @@ def add_header(driver: WebDriver) -> None: def summarize_memorize_webpage( - url: str, text: str, question: str, driver: Optional[WebDriver] = None + url: str, + text: str, + question: str, + config: Config, + driver: Optional[WebDriver] = None, ) -> str: """Summarize text using the OpenAI API @@ -221,7 +225,7 @@ def summarize_memorize_webpage( text_length = len(text) logger.info(f"Text length: {text_length} characters") - memory = get_memory(CFG) + memory = get_memory(config) new_memory = MemoryItem.from_webpage(text, url, question=question) memory.add(new_memory) diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py index 1f4c4dbb..a63c265f 100644 --- a/autogpt/commands/write_tests.py +++ b/autogpt/commands/write_tests.py @@ -2,17 +2,21 @@ from __future__ import annotations import json +from typing import TYPE_CHECKING from autogpt.commands.command import command from autogpt.llm.utils import call_ai_function +if TYPE_CHECKING: + from autogpt.config import Config + @command( "write_tests", "Write Tests", '"code": "", "focus": ""', ) -def write_tests(code: str, focus: list[str]) -> str: +def write_tests(code: str, focus: list[str], config: Config) -> str: """ A function that takes in code and focus topics and returns a response from create chat completion api call. @@ -34,4 +38,4 @@ def write_tests(code: str, focus: list[str]) -> str: " specific areas if required." ) - return call_ai_function(function_string, args, description_string) + return call_ai_function(function_string, args, description_string, config=config) diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index 3872463f..1a526832 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -7,13 +7,14 @@ from __future__ import annotations import os import platform from pathlib import Path -from typing import Optional +from typing import TYPE_CHECKING, Optional import distro import yaml -from autogpt.commands.command import CommandRegistry -from autogpt.prompts.generator import PromptGenerator +if TYPE_CHECKING: + from autogpt.commands.command import CommandRegistry + from autogpt.prompts.generator import PromptGenerator # Soon this will go in a folder where it remembers more stuff about the run(s) SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml") diff --git a/autogpt/configurator.py b/autogpt/configurator.py index f156f2c7..6b855fe3 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -1,19 +1,22 @@ """Configurator module.""" from __future__ import annotations +from typing import TYPE_CHECKING + import click from colorama import Back, Fore, Style from autogpt import utils -from autogpt.config import Config from autogpt.llm.utils import check_model from autogpt.logs import logger from autogpt.memory.vector import get_supported_memory_backends -CFG = Config() +if TYPE_CHECKING: + from autogpt.config import Config def create_config( + config: Config, continuous: bool, continuous_limit: int, ai_settings_file: str, @@ -45,15 +48,15 @@ def create_config( allow_downloads (bool): Whether to allow Auto-GPT to download files natively skips_news (bool): Whether to suppress the output of latest news on startup """ - CFG.set_debug_mode(False) - CFG.set_continuous_mode(False) - CFG.set_speak_mode(False) - CFG.set_fast_llm_model(check_model(CFG.fast_llm_model, "fast_llm_model")) - CFG.set_smart_llm_model(check_model(CFG.smart_llm_model, "smart_llm_model")) + config.set_debug_mode(False) + config.set_continuous_mode(False) + config.set_speak_mode(False) + config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model")) + config.set_smart_llm_model(check_model(config.smart_llm_model, "smart_llm_model")) if debug: logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") - CFG.set_debug_mode(True) + config.set_debug_mode(True) if continuous: logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") @@ -64,13 +67,13 @@ def create_config( " cause your AI to run forever or carry out actions you would not usually" " authorise. Use at your own risk.", ) - CFG.set_continuous_mode(True) + config.set_continuous_mode(True) if continuous_limit: logger.typewriter_log( "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}" ) - CFG.set_continuous_limit(continuous_limit) + config.set_continuous_limit(continuous_limit) # Check if continuous limit is used without continuous mode if continuous_limit and not continuous: @@ -78,15 +81,15 @@ def create_config( if speak: logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") - CFG.set_speak_mode(True) + config.set_speak_mode(True) if gpt3only: logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") - CFG.set_smart_llm_model(CFG.fast_llm_model) + config.set_smart_llm_model(config.fast_llm_model) if gpt4only: logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") - CFG.set_fast_llm_model(CFG.smart_llm_model) + config.set_fast_llm_model(config.smart_llm_model) if memory_type: supported_memory = get_supported_memory_backends() @@ -97,13 +100,13 @@ def create_config( Fore.RED, f"{supported_memory}", ) - logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend) + logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend) else: - CFG.memory_backend = chosen + config.memory_backend = chosen if skip_reprompt: logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED") - CFG.skip_reprompt = True + config.skip_reprompt = True if ai_settings_file: file = ai_settings_file @@ -116,8 +119,8 @@ def create_config( exit(1) logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file) - CFG.ai_settings_file = file - CFG.skip_reprompt = True + config.ai_settings_file = file + config.skip_reprompt = True if prompt_settings_file: file = prompt_settings_file @@ -130,10 +133,10 @@ def create_config( exit(1) logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file) - CFG.prompt_settings_file = file + config.prompt_settings_file = file if browser_name: - CFG.selenium_web_browser = browser_name + config.selenium_web_browser = browser_name if allow_downloads: logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") @@ -148,7 +151,7 @@ def create_config( Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}", ) - CFG.allow_downloads = True + config.allow_downloads = True if skip_news: - CFG.skip_news = True + config.skip_news = True diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index b6bcc1d3..47e15896 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -109,7 +109,11 @@ def retry_openai_api( def call_ai_function( - function: str, args: list, description: str, model: str | None = None + function: str, + args: list, + description: str, + model: str | None = None, + config: Config = None, ) -> str: """Call an AI function @@ -125,9 +129,8 @@ def call_ai_function( Returns: str: The response from the function """ - cfg = Config() if model is None: - model = cfg.smart_llm_model + model = config.smart_llm_model # For each arg, if any are None, convert to "None": args = [str(arg) if arg is not None else "None" for arg in args] # parse args to comma separated string diff --git a/autogpt/main.py b/autogpt/main.py index 03f685db..39bbf8b5 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -49,6 +49,7 @@ def run_auto_gpt( check_openai_api_key() create_config( + cfg, continuous, continuous_limit, ai_settings, diff --git a/requirements.txt b/requirements.txt index 53785f9a..542f9b50 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,6 +40,10 @@ auto-gpt-plugin-template @ git+https://github.com/Significant-Gravitas/Auto-GPT- mkdocs pymdown-extensions mypy +types-Markdown +types-beautifulsoup4 +types-colorama +types-Pillow # OpenAI and Generic plugins import openapi-python-client==0.13.4 diff --git a/tests/integration/challenges/basic_abilities/cassettes/test_write_file/test_write_file.yaml b/tests/integration/challenges/basic_abilities/cassettes/test_write_file/test_write_file.yaml index c45b71c8..18157e73 100644 --- a/tests/integration/challenges/basic_abilities/cassettes/test_write_file/test_write_file.yaml +++ b/tests/integration/challenges/basic_abilities/cassettes/test_write_file/test_write_file.yaml @@ -1098,6 +1098,399 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + download_file: Download File, args: \"url\": \"\", \"filename\": \"\"\n4. + list_files: List Files in Directory, args: \"directory\": \"\"\n5. + read_file: Read a file, args: \"filename\": \"\"\n6. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + AGENT-MODE: + - AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F + AGENT-TYPE: + - Auto-GPT-2023-X-TYPE + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3481' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"error\": {\n \"message\": \"That model is currently overloaded + with other requests. You can retry your request, or contact us through our + help center at help.openai.com if the error persists. (Please include the + request ID 1d08452a41f33bf54874f93bf2a716de in your message.)\",\n \"type\": + \"server_error\",\n \"param\": null,\n \"code\": null\n }\n}\n" + headers: + Access-Control-Allow-Origin: + - '*' + Alt-Svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Cf-Cache-Status: + - DYNAMIC + Cf-Ray: + - 7cca53eb28d702ac-ORD + Content-Length: + - '349' + Content-Type: + - application/json + Date: + - Thu, 25 May 2023 02:25:56 GMT + Function-Execution-Id: + - rvr2i8vhq9vo + Openai-Model: + - gpt-3.5-turbo-0301 + Openai-Organization: + - significant-gravitas + Openai-Processing-Ms: + - '30015' + Openai-Version: + - '2020-10-01' + Server: + - Google Frontend + Strict-Transport-Security: + - max-age=15724800; includeSubDomains + X-Cloud-Trace-Context: + - 69c7b246f231d9cbd0c2974c332ebc51;o=1 + X-Powered-By: + - Express + X-Ratelimit-Limit-Requests: + - '3500' + X-Ratelimit-Limit-Tokens: + - '90000' + X-Ratelimit-Remaining-Requests: + - '3499' + X-Ratelimit-Remaining-Tokens: + - '86499' + X-Ratelimit-Reset-Requests: + - 17ms + X-Ratelimit-Reset-Tokens: + - 2.334s + X-Request-Id: + - 1d08452a41f33bf54874f93bf2a716de + status: + code: 429 + message: Too Many Requests +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + download_file: Download File, args: \"url\": \"\", \"filename\": \"\"\n4. + list_files: List Files in Directory, args: \"directory\": \"\"\n5. + read_file: Read a file, args: \"filename\": \"\"\n6. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + AGENT-MODE: + - AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F + AGENT-TYPE: + - Auto-GPT-2023-X-TYPE + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3481' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7yTTW/TQBCG7/yK0VxycaKE0DbxDQ6orTgAKioSRtbGHtvb2Dvu7pikRP7vlb+S + xqgnBNf5eN9ndmcOqGP0McqUREWZT69uq6fFJ/P+w+P3eWKf3P31l4+fv5pbfvhtEvSQNw8USd8x + i7gocxLNBj2MLCmhGP3F5erderW4uFx7WHBMOfqYljJdzi6mUtkNT+fL+QI9rJxKCf0DlpaLUkLh + LRmH/tVq7uFJ+xh/u1h6KCwqP4bW62XtYZSxjsih/+OABblB1nJO6KNyTjtRRhpINkKmGeAQGACA + ACXjKs3EBehDH+wTtJcmGOAN7HSeQ+UIJCPYWS0UCoeJzgkiLgplYhDuEjC5pjxnuGebxxPQRhgU + tKVGFRTDJGvy4a7Jz2Qvk1mA3ktjS8qx0Sbt3O8yAlFuC5YeK23JQUF/4wZ3r86gXTtgwU5AlaXl + 0molp3zCFiRrqpTbjrHLXJmOeArf/tlTBeak3lCE/aKcqR9jQ9WYNbJadKRdMXywIWo7ybjKNn1K + 4AYy9avTiNhaiqRla9CgNaK9wIYStgSV0yZ9feSxvytJbf/XcnXGtTdsfC/6x8I3Ch3TGcQIXdl0 + fCtdYnibTmLEcSYyvq8XMw20LXEPHpgaaw8TbbTLwu460EcnXKKH2sS0R39e/6zfPAMAAP//AwBM + TWg3zwQAAA== + headers: + Access-Control-Allow-Origin: + - '*' + Alt-Svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Cache-Control: + - no-cache, must-revalidate + Cf-Cache-Status: + - DYNAMIC + Cf-Ray: + - 7cca54f8fd85111e-ORD + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 25 May 2023 02:26:34 GMT + Function-Execution-Id: + - rvr2pyuh5pu6 + Openai-Model: + - gpt-3.5-turbo-0301 + Openai-Organization: + - significant-gravitas + Openai-Processing-Ms: + - '25106' + Openai-Version: + - '2020-10-01' + Server: + - Google Frontend + Strict-Transport-Security: + - max-age=15724800; includeSubDomains + Vary: + - Accept-Encoding + X-Cloud-Trace-Context: + - bb40fcb7f86d47a999346b879f520f53;o=1 + X-Powered-By: + - Express + X-Ratelimit-Limit-Requests: + - '3500' + X-Ratelimit-Limit-Tokens: + - '90000' + X-Ratelimit-Remaining-Requests: + - '3499' + X-Ratelimit-Remaining-Tokens: + - '86499' + X-Ratelimit-Reset-Requests: + - 17ms + X-Ratelimit-Reset-Tokens: + - 2.334s + X-Request-Id: + - 03d492530fb90d6815d8d49a68ab49b4 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + download_file: Download File, args: \"url\": \"\", \"filename\": \"\"\n4. + list_files: List Files in Directory, args: \"directory\": \"\"\n5. + read_file: Read a file, args: \"filename\": \"\"\n6. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created and nothing new has happened."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\",\n \"reasoning\": \"The task requires me + to write ''Hello World'' into a file named ''hello_world.txt''. The write_to_file + command is the most appropriate command for this task.\",\n \"plan\": + \"- Use the write_to_file command to write ''Hello World'' into a file named + ''hello_world.txt''.\\n- Use the task_complete command to complete the task.\",\n \"criticism\": + \"I need to ensure that I have the correct filename and text before using the + write_to_file command.\",\n \"speak\": \"I will use the write_to_file + command to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 0}' + headers: + AGENT-MODE: + - AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F + AGENT-TYPE: + - Auto-GPT-2023-X-TYPE + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4801' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6SST4/aQAzF7/0Uli9cAoIu7EJurSrUrdpeSrWHpkLDxEmmzIzTGUewQnz3KhD+ + lG5Pe/Wzf/PseTs0OaaoKyXa1bb/8Kl5XsznrtEf5t/e5++Kz266Wsyar5vHL2tMkFe/SEs3MdDs + akti2GOCOpASyjEd3U/Hs+loMntI0HFOFlMsa+nfDSZ9acKK+8O74QgTbKIqCdMd1oFdLUvhNfmI + 6Wh4P0zwAr8Ik3GCwqLspfR2NN4nqCs2miKmP3boKJ7AgS1hiipGE0V5aW2yF/LtCrvMAwBkKBU3 + ZSUxwxS6YifQVtpiho+wMdZCEwmkIhAV18vOIIFm55TPQRjOtVPXIMPkGhlIRfbGl0fuomuDQL8b + EyiCo5azCUYIeh/JWoYnDjbvgfHCoKAwlsArRzn0qlZfblp9IFvpwcFGRf4FI7D4r3ETD22Oo4Cq + 68B1MOpKLziAVG3XCxvVVvnjMn34/ur76GDEaBPd6e6e6DBJPjahnVNyHm6Nn4krKjgQxKoRMb6E + nDf+lh5rUuvX/+gRuU9OAera/8lP+0vH1/7C35hSobyN3nVWuqC0654AOcRGa4qxaKx9Phs6mOq8 + ZX6P+wQL402slkcSphiFa0zQ+Jy2mA73P/dv/gAAAP//AwBuG68bAwQAAA== + headers: + Access-Control-Allow-Origin: + - '*' + Alt-Svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Cache-Control: + - no-cache, must-revalidate + Cf-Cache-Status: + - DYNAMIC + Cf-Ray: + - 7cca55aa8ee7e100-ORD + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 25 May 2023 02:26:53 GMT + Function-Execution-Id: + - rvr2qsuver9b + Openai-Model: + - gpt-3.5-turbo-0301 + Openai-Organization: + - significant-gravitas + Openai-Processing-Ms: + - '15614' + Openai-Version: + - '2020-10-01' + Server: + - Google Frontend + Strict-Transport-Security: + - max-age=15724800; includeSubDomains + Vary: + - Accept-Encoding + X-Cloud-Trace-Context: + - 0b238e84f74b403e164b93d247259c70;o=1 + X-Powered-By: + - Express + X-Ratelimit-Limit-Requests: + - '3500' + X-Ratelimit-Limit-Tokens: + - '90000' + X-Ratelimit-Remaining-Requests: + - '3499' + X-Ratelimit-Remaining-Tokens: + - '84695' + X-Ratelimit-Reset-Requests: + - 17ms + X-Ratelimit-Reset-Tokens: + - 3.536s + X-Request-Id: + - b1fb38bd412977a3cd85ec84fbdd44d0 + status: + code: 200 + message: OK - request: body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are write_to_file-GPT, an AI designed to use the write_to_file command to write @@ -1105,9 +1498,9 @@ interactions: command to complete the task.\nYour decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.\nThe OS you are running on is: - Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello - World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command - to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1. + Windows-10\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello World'' + into a file named \"hello_world.txt\".\n2. Use the task_complete command to + complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar @@ -1150,7 +1543,7 @@ interactions: Connection: - keep-alive Content-Length: - - '3441' + - '3433' Content-Type: - application/json method: POST @@ -1158,21 +1551,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA6STTXPaMBCG7/0VO3vhYhg+Emh86y10uKWZTqfueIS8YIEsOdJ6Qsv4v3eEbSCk - 5FCO2o9Xzyvt7lFlGKPMBcui1P3Z182PxXS1eLL0Mn588UL9eVr81s+8KXcjjNAuNyS57RhIW5Sa - WFmDEUpHginDeDT9fPcwmU0fJhEWNiONMa5L7k8G932u3NL2h5NhEKu8WBPGeyydLUpO2W7JeIxn - s/sIT9rH+PhuGiFbFvoYGg3HozpCmVslyWP8c48F+U7XWU0Yo/BeeRaGA6U1TCY42CcGACBBzm21 - ztknGEMbbBO04xBMcA6GKAO28OoUE/QeSWsL363TWQ+UYQsCVkoTGFFQBr085NPXkB/wjnsgTAac - k4HWF4UTsPBbqLwy68OxF85pV9ELtYUw2SDB6JzLkfDWKLNu4L6FzgNXyjYNFMdOkMLAkqDyN9EP - 4MuKyQHngiOYH1QrTx8xh9veWb00UmphGg99eO70rji5gT5JzJn+DbzSKVZS+eJyKsj4ytHhfWAO - ojj7U2mdI8knwnYUBH9oV3nwlZTk/arSsKSVddQBdtL/QvQliW2H96q0/u+JPRvLK4zXZ3qQYANV - R92WtU3vlixc3/C+ueXClnDry/1sEqH2JHFh4o3I5U6fPUhHeyBuwRNTYx3hShnl87RZOYzRsy0x - QmUy2mE8rH/Vn/4CAAD//wMAcOALfkQFAAA= + H4sIAAAAAAAAA7yTTW/bMAyG7/sVBC+5KEHidkvi2zbsI7sW+wDmIVBk1lYjS55EIx6C/PdBdtym + LtANGLArSb/v89LUEXWOKapSsqpqM11+au/eXOsv3+Sv98tk965wefs60R/q5ObtDgW63R0pPn8x + U66qDbF2FgUqT5Ipx3TxanW9XieL1UJg5XIymGJR8/Rq9nLKjd+56fxqvkCBTZAFYXrE2ruq5i27 + PdmA6XI5F/igfV9PFolAdizNfWm9Sk4CVem0ooDp9yNWFAZZ7wxhijIEHVhajpDOMtkY4JhZAIAM + uXRNUXLIMIVz8dyglmMxww1YohzYQRMIuCQ4eM20Zbe91YZAuaqSthvoGjD5SMY4+Oq8ySegLTuQ + 0I1aWVEOkzL2t4fYn3HLk1mG4tLbkwzOalv0ADfaqt6YZdiDp5+N9hSgor/wFLCBgzbmefjZMFXL + ELqx2I24IMNTYOjyRiBquZu4tB/HqY20fZIpfP5PK1Res1Y6VON/SDY0PjJIhs1DXOW8J8UXsSNO + TMfumbWNXENNcj84/nnr/5a3Nz6J4ZTPok8uOSr0TI8gRujSF+NH0DeGjfQSI45HIuOHc5FpoO2I + z+CZPeFJ4K22OpTb/uYxxcCuRoHa5tRiOj/9OL34DQAA//8DACqjUZ+oBAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7cc625c85c302cb4-DFW + - 7ccb58103e2be73a-DFW Cache-Control: - no-cache, must-revalidate Connection: @@ -1182,7 +1574,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 24 May 2023 14:15:34 GMT + - Thu, 25 May 2023 05:23:27 GMT Server: - cloudflare access-control-allow-origin: @@ -1194,7 +1586,7 @@ interactions: openai-organization: - significant-gravitas openai-processing-ms: - - '40542' + - '25366' openai-version: - '2020-10-01' strict-transport-security: @@ -1206,13 +1598,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86502' + - '86499' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.332s + - 2.334s x-request-id: - - 5e84c935a23a6ad8f5a2601dd432725b + - 754502cb0f1f1266c1f1e1f7bc9b7b6b status: code: 200 message: OK @@ -1223,9 +1615,9 @@ interactions: command to complete the task.\nYour decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.\nThe OS you are running on is: - Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello - World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command - to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1. + Windows-10\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello World'' + into a file named \"hello_world.txt\".\n2. Use the task_complete command to + complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar @@ -1261,22 +1653,21 @@ interactions: reminds you of these events from your past: \nI was created and nothing new has happened."}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", - "content": "{\n \"thoughts\": {\n \"text\": \"I need to write ''Hello - World'' into a file named ''hello_world.txt'' and then complete the task using - the ''task_complete'' command.\",\n \"reasoning\": \"The ''write_to_file'' - command can be used to write ''Hello World'' into a file named ''hello_world.txt''. - After that, I can use the ''task_complete'' command to complete the task.\",\n \"plan\": - \"- Use the ''write_to_file'' command to write ''Hello World'' into a file named - ''hello_world.txt''.\\n- Use the ''task_complete'' command to complete the task.\",\n \"criticism\": - \"I need to ensure that I am using the correct file name and that the ''write_to_file'' - command is successful before completing the task.\",\n \"speak\": \"I - will write ''Hello World'' into a file named ''hello_world.txt'' using the ''write_to_file'' - command and then complete the task.\"\n },\n \"command\": {\n \"name\": - \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": - \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command - write_to_file returned: File written to successfully."}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 0}' + "content": "{\n \"thoughts\": {\n \"text\": \"I need to use the write_to_file + command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\": + \"Since the task requires me to write ''Hello World'' into a file, I will use + the write_to_file command. I will pass the filename as ''hello_world.txt'' and + the text as ''Hello World''.\",\n \"plan\": \"- Use the write_to_file + command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\": + \"I need to ensure that I pass the correct filename and text to the write_to_file + command.\",\n \"speak\": \"I will use the write_to_file command to write + ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 0}' headers: Accept: - '*/*' @@ -1285,7 +1676,7 @@ interactions: Connection: - keep-alive Content-Length: - - '4877' + - '4714' Content-Type: - application/json method: POST @@ -1293,20 +1684,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA6yS3Y7TMBCF73mK0dz0xq3aDf0hLwCL4AKxKyTIKnKdaeKtYwfPRC1UeXeUTbst - XQkhxKXneL45Y58D2gJTNJUWUzduvHz/+DWhnx8Wnxxvi2r/sb4pq/puGu+3b7+jwrB+JCPHjokJ - deNIbPCo0ETSQgWms8Xq9ZtkuUyWCutQkMMUy0bGyWQ+ljauw3iaTGeosGVdEqYHbGKoG8klbMkz - prPpaqXwDD8Li7lCCaLduXQzTzqFpgrWEGP67YA18QkcgyNMUTNbFu2ltxm8kO9XOGQeACBDqUJb - VsIZpnAsHgXaS1/M8K4iGO2iFcol5BvraAQm1LX2Bew0A7fGEPOmdQo4wC0Y7cGHHRy3IJCKQDRv - oWXry6fjqD/npxvPwEmG6tJFJM3BW18OVj5bbwbc3xr6r24ap/1gZAz3TH9qBQkvJ17zTLRijeV6 - gN6CJ3rqJM9t7Pu0nO1afiYWFzu6H7CmTYgE5IvTQk0MZdT19UBuSG9Pw3bWuX98lYHZqVOKjsqL - EHld0zDuN9SVKx3L6/xd/v4AeEfOBfgSoiug0gxrIg99CIR8/2RVr+e7Xp9IH90zrDvazXyHncKN - 9ZarfIBjiiyhQYXWF7THdNo9dK9+AQAA//8DAKoIau4bBAAA + H4sIAAAAAAAAA6ySTW/TQBCG7/yK0Vx62VROWlriW09tQXAASg8YRZv12N5mvWPtjkmqyP8dJbaT + EpCQgOs7M+8887FFm2OKptJi6sZNrt9unvJ3SSPvi6L4uL411w/mxtS3X57vljeokJdPZGSoODdc + N47EskeFJpAWyjGdXr25nM9ns2SusOacHKZYNjK5OH89kTYseZJcJFNU2EZdEqZbbALXjSyEV+Qj + ptPkcq7waH4MXE0VCot2R2k2TTqFpmJrKGL6dYs1xdE4sCNMUcdoo2gvO0z2Qn43wjbzAAAZSsVt + WUnMMIVBHAK0kZ2Y4Qdeg1Ra4B4q/Z1gHawIeTi7I+cYHjm4/AysFwapCArrSME9eKIchKGNtNdF + x9ViGIzAcF1rv084aGPWeYbqJUogHdlbX/Y8n6w3NMKMxfmhGrjYI1pf/glxbZ37D3yN075Hm8DD + P7uZHbuxse4tj4skH9tA4ykaHePew3AIZAT6LcEw4m/7n7aKDenV2Obvl9Fbdmr8qSH9l5fyuqa+ + 20/2J1A6lKff+PINeoPPu0Mfbx9bYyjGonXu+QC0hxrYMt9hp7Cw3sZq0TthilG4QYXW57TBNOm+ + da9+AAAA//8DAFX+S9wWBAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7cc626d9d9b72cb4-DFW + - 7ccb58bf8a30e73a-DFW Cache-Control: - no-cache, must-revalidate Connection: @@ -1316,7 +1707,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 24 May 2023 14:16:05 GMT + - Thu, 25 May 2023 05:23:51 GMT Server: - cloudflare access-control-allow-origin: @@ -1328,7 +1719,7 @@ interactions: openai-organization: - significant-gravitas openai-processing-ms: - - '28427' + - '21630' openai-version: - '2020-10-01' strict-transport-security: @@ -1344,9 +1735,9 @@ interactions: x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.343s + - 2.342s x-request-id: - - 941875e93a46562c74839ad42ec1c215 + - 22f487f4ffb66999eb1baab18655bc17 status: code: 200 message: OK @@ -1577,4 +1968,253 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\nThe OS you are running on is: + Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello + World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command + to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed below e.g. command_name\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read a file, + args: \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get hyperlinks, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3441' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7ySQW/bMAyF7/sVBC+5KEGCLE3j+4YFQ4thxVBgcxGoMmNrlURDotcAgf/74DhJ + Gw8bdtmufNR7H0Xu0RaYoam0GF+78fLj+uY2rVYmflrexdv3/se9TjsJ8fPXdzeokB+/k5Hji4lh + XzsSywEVmkhaqMBsdnW9mC5nb1czhZ4LcphhWct4PlmMpYmPPJ7OpzNU2CRdEmZ7rCP7WjbCTxQS + ZsvlQuGL97k+W80VCot259Lq6rpVaCq2hhJm3/boKZ1sIzvCDHVKNokO0kFyEArdAPs8AADkKBU3 + ZSUpxwyOxaNAO+mKOa4hEBUgDE0ikIrgOVqhjfBmax2BYe91ODQcBBh9IOcY7jm6YgQ2CIOGQ2vQ + ngoYVZ2+ee70iexkNMlRvc6OpBMHG8oe4M4G0weXrB3Y9BdR6g+gnUFF4DkJ6LqOXEer5WKQJtEQ + qnY69Dxj+PKfPsJEK9bY5IeboJCa2DFogfV5L4ZjJCOgY9l4CpJgy/H3oMO0VJN+OiU9W+f+5cL7 + 4FadDvFo+ssddg490wXEAF3HcnjCvdD1vlgMOC5Mhmf/aqYT7YH4CJ6HFluFWxtsqjb9xWKGSbhG + hTYUtMNs2j60b34CAAD//wMA1PnFsWYEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7cd2e853ad25e702-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 26 May 2023 03:25:14 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '23538' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86502' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.332s + x-request-id: + - 6a8a3cf2a18b70df31c5e47f66614c59 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\nThe OS you are running on is: + Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello + World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command + to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed below e.g. command_name\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read a file, + args: \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get hyperlinks, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created"}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\",\n \"reasoning\": \"Since the goal is to + write ''Hello World'' into a file, the write_to_file command is the most appropriate + command to use.\",\n \"plan\": \"- Use the write_to_file command to write + ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\": + \"I need to ensure that I use the correct arguments for the write_to_file command.\",\n \"speak\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": + \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4626' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5SSS48TMRCE7/yKVl9ymUTJZkNgjiAhIsQBsWh3xaDI8XRmTDxuy+7ZBEX572ge + eZAViL1ZXfbnKrv2aHJMUZdKdOXtcP5p8dmVt8bfe/u+fPc03SzuHj48mi+TB0+YIK9+kpb+xEhz + 5S2JYYcJ6kBKKMd08vrNbDyfzCbzBCvOyWKKhZfhdDQbSh1WPBxPxxNMsI6qIEz36ANXXpbCG3IR + 08n45m2CZ/hZmN8mKCzKnkc34+khQV2y0RQx/b7HiuIRHNgSpqhiNFGUk8YmOyHXRNhnDgAgQym5 + LkqJGabQD3uBdtIMM7wrCRztBDRXlXI5CEMdCUwEUXGz7M1SMz+vS2rVUYbJJTaQiuyMKzr2V+N0 + t7lgZVskwzYYIRh8JGsZ7jnYfADGCYOCtbEETlWUw6Bs9OW20UeykwG03vp7oVQRVkTuZClPTuLZ + 8jFSc3FJUHEUUN4H9sGoC72LfB3GW+W6HEP4Fukf/P95Gh2MGG1i1SEX4Ijak+RiHZpzSmABPvCT + yQkUdG8Jaw5HuHHFX/HRk9oc0VtjbfuJL7bcIQ/JsUD99mf9aT6pu+0P/JUpFYrr6l325FzA9k95 + 3XajSfnydjwvBMRaa4pxXVv76xStjdenzNwBDwmujTOxXHaeMMUo7DFB43LaYTo+/Di8+g0AAP// + AwDeKpjwTQQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7cd2e8f7bfaee702-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 26 May 2023 03:25:40 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '23218' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86010' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.66s + x-request-id: + - 29a5d0f069d62474b048ab97ae6b614d + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/challenges/basic_abilities/test_browse_website.py b/tests/integration/challenges/basic_abilities/test_browse_website.py index 4befb059..3f9821ed 100644 --- a/tests/integration/challenges/basic_abilities/test_browse_website.py +++ b/tests/integration/challenges/basic_abilities/test_browse_website.py @@ -13,9 +13,11 @@ def test_browse_website( browser_agent: Agent, patched_api_requestor: None, monkeypatch: pytest.MonkeyPatch, + # config: Config, ) -> None: file_path = browser_agent.workspace.get_path("browse_website.txt") run_interaction_loop(monkeypatch, browser_agent, CYCLE_COUNT) + # content = read_file(file_path, config) content = open(file_path, encoding="utf-8").read() assert "£25.89" in content, f"Expected £25.89, got {content}" diff --git a/tests/integration/challenges/basic_abilities/test_write_file.py b/tests/integration/challenges/basic_abilities/test_write_file.py index 1b0198c7..efc5e921 100644 --- a/tests/integration/challenges/basic_abilities/test_write_file.py +++ b/tests/integration/challenges/basic_abilities/test_write_file.py @@ -2,6 +2,7 @@ import pytest from autogpt.agent import Agent from autogpt.commands.file_operations import read_file +from autogpt.config import Config from tests.integration.challenges.utils import run_interaction_loop from tests.utils import requires_api_key @@ -14,9 +15,10 @@ def test_write_file( writer_agent: Agent, patched_api_requestor: None, monkeypatch: pytest.MonkeyPatch, + config: Config, ) -> None: file_path = str(writer_agent.workspace.get_path("hello_world.txt")) run_interaction_loop(monkeypatch, writer_agent, CYCLE_COUNT) - content = read_file(file_path) + content = read_file(file_path, config) assert content == "Hello World", f"Expected 'Hello World', got {content}" diff --git a/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py b/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py index e5275071..42e9a993 100644 --- a/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py +++ b/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py @@ -1,6 +1,9 @@ +import typing + import pytest from autogpt.commands.file_operations import read_file, write_to_file +from autogpt.config import Config from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times from tests.utils import requires_api_key @@ -16,6 +19,7 @@ def test_information_retrieval_challenge_a( get_company_revenue_agent: Agent, monkeypatch: pytest.MonkeyPatch, patched_api_requestor: None, + config: Config, ) -> None: """ Test the challenge_a function in a given agent by mocking user inputs and checking the output file content. @@ -26,5 +30,5 @@ def test_information_retrieval_challenge_a( run_interaction_loop(monkeypatch, get_company_revenue_agent, CYCLE_COUNT) file_path = str(get_company_revenue_agent.workspace.get_path("output.txt")) - content = read_file(file_path) + content = read_file(file_path, config) assert "81" in content, "Expected the file to contain 81" diff --git a/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py b/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py index b8ae3004..b84f5018 100644 --- a/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py +++ b/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py @@ -3,6 +3,7 @@ import yaml from autogpt.agent import Agent from autogpt.commands.file_operations import read_file +from autogpt.config import Config from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times from tests.utils import requires_api_key @@ -14,7 +15,7 @@ CYCLE_COUNT = 6 @requires_api_key("OPENAI_API_KEY") @run_multiple_times(3) def test_kubernetes_template_challenge_a( - kubernetes_agent: Agent, monkeypatch: pytest.MonkeyPatch + kubernetes_agent: Agent, monkeypatch: pytest.MonkeyPatch, config: Config ) -> None: """ Test the challenge_a function in a given agent by mocking user inputs @@ -26,7 +27,7 @@ def test_kubernetes_template_challenge_a( run_interaction_loop(monkeypatch, kubernetes_agent, CYCLE_COUNT) file_path = str(kubernetes_agent.workspace.get_path("kube.yaml")) - content = read_file(file_path) + content = read_file(file_path, config) for word in ["apiVersion", "kind", "metadata", "spec"]: assert word in content, f"Expected the file to contain {word}" diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml index 159f9dfa..cc0a43be 100644 --- a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml @@ -2011,7 +2011,7 @@ interactions: Vary: - Accept-Encoding X-Cloud-Trace-Context: - - b3143c8eb6fb2bdcf9b7429eff4fa7b4 + - 0128c66c9918142a24cd829bbc2b234a;o=1 X-Powered-By: - Express X-Ratelimit-Limit-Requests: @@ -2283,7 +2283,7 @@ interactions: X-Ratelimit-Remaining-Requests: - '3499' X-Ratelimit-Remaining-Tokens: - - '89467' + - '86452' X-Ratelimit-Reset-Requests: - 17ms X-Ratelimit-Reset-Tokens: @@ -2650,4 +2650,700 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\nThe + OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command + read_file to read the instructions_1.txt file\n2. Follow the instructions in + the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short + term memory. Your short term memory is short, so immediately save important + information to files.\n2. If you are unsure how you previously did something + or want to recall past events, thinking about similar events will help you remember.\n3. + No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read a file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3334' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4SST2/bMAzF7/sUBM92EC9J0/o2YCiQdRiwPz3NRaBItK1FljyJRpMF/u6D4jgd + kqK7PpF8v0fxgFphjrIWLJvWpMuH1ZePcq6WD5/vv9HXcju/fz+ruk+P37PWYYJu84sknzom0jWt + IdbOYoLSk2BSmGc3t4vpMlvMZwk2TpHBHKuW09lkkXLnNy6dzqYZJtgFURHmB2y9a1pes9uSDZgv + 58sEX2af9Wx+lyA7FuYs3d7d9AnK2mlJAfOfB2wojGO9M4Q5ihB0YGE5QjrLZGOAQ2EBAArk2nVV + zaHAHE7i6YF2HMUCVxBq1xkFgYVn2OzBk1DaVsA1gbaBfScjalhnE94xlNoQdGGsiNXroyZd0wir + JgUm/1p5EsFZbavB70dNUGofGAJTCzoAO+isIh9zqCtXiOJzLTiWevrdaU8KXAkNXTq1RtjBJIXH + QK/jRbsovhWvKGwKH6ww+z/0OpAiJt9oOzxb2g1xwgWR9Jq11KEZd22JjgRkQ+djs2BYvRm/7IzZ + w4ZK5wla7yRR/J3L7KElsR1dnrUx///PY9RJgcOYPhmP5rSoq5uxoqHB4bzUCwjhq8tTGx5i7Uv7 + 9dZHiCPIiaewPfYJltrqUK+HK8IcA7sWE9RW0Q7zaf/Uv/sLAAD//wMA45HODOkDAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7cd2e99b5c6fe9a4-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 26 May 2023 03:26:01 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '18270' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86499' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.334s + x-request-id: + - b5f5aa8c3ec85b75f27d68bfcc745c41 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\nThe + OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command + read_file to read the instructions_1.txt file\n2. Follow the instructions in + the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short + term memory. Your short term memory is short, so immediately save important + information to files.\n2. If you are unsure how you previously did something + or want to recall past events, thinking about similar events will help you remember.\n3. + No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read a file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created"}, {"role": "user", "content": "Determine which next command to use, + and respond using the format specified above:"}, {"role": "assistant", "content": + "{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt + file using the read_file command.\",\n \"reasoning\": \"The first step + is to understand the instructions and what is required of me.\",\n \"plan\": + \"- Use the read_file command to read the instructions_1.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4415' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4ySTW/bMAyG7/sVAs92ECdLmvq2YzeswID1MMxDoEi0rUaiPIlG0wb+74PjuOmc + Ydv1Jfny4ccRjIYcVC1ZucamN5/u7r99nuv9z9Xm0aldeb/bVC9f9u1i8fEBEvC7R1R8rpgp7xqL + bDxBAiqgZNSQZ+vNan6TrdbvE3Beo4UcqobT5WyVcht2Pp0v5xkk0EZZIeRHaIJ3DW/Z75Ei5Lfr + dQIX71c9Wy0TYM/SXqQsu+0SULU3CiPk34/gMI6+wVuEHGSMJrIk7ik9MVI/wbEgIYQogGvfVjXH + AnJxFs8BPHAvFnAnCFEL9iKg1IJrFIYih1b1gHG7mPGBRWksijYaqk4Zfer2pCnvnCQ9KyB56x9Q + Rk+GqqHJ17+49tjSUDwZEx5YRGThy98KBNeSxYW19Nb6p2nXxkoaGqbiIeKfUf9n1KKgVHwgaZ9f + 8CpP9CYaGYMzhG+wGZs4IVLBsFEmuumykWIbcByrJY2hv+M1lShba5/FDksfUDTBK0RtqJrOHhuU + +7HLk7H23/ek0wYHky4Zf+a8pquXIelw8H9d6QRBhmr6aUOgz72UX+OMECeQM09BHXQJlIZMrLfD + P0EOkX0DCRjSeIB83v3o3v0CAAD//wMASHHt0ukDAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7cd2ea1e7fd1e9a4-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 26 May 2023 03:26:25 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '20708' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85998' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.668s + x-request-id: + - 0a4cec880e7cc8e6c9fd4a8c224e02eb + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\nThe + OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command + read_file to read the instructions_1.txt file\n2. Follow the instructions in + the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short + term memory. Your short term memory is short, so immediately save important + information to files.\n2. If you are unsure how you previously did something + or want to recall past events, thinking about similar events will help you remember.\n3. + No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read a file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created"}, {"role": "user", "content": "Determine which next command to use, + and respond using the format specified above:"}, {"role": "assistant", "content": + "{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt + file using the read_file command.\",\n \"reasoning\": \"The first step + is to understand the instructions and what is required of me.\",\n \"plan\": + \"- Use the read_file command to read the instructions_1.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5374' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4ySTW/TQBCG7/yK1ZzXUUyUJvINEIeqLQIEHKhRtNkd29uuZ63dMUmJ/N+R45gU + BwHX+XjneWfmANZABrpSrOvGJaub63ff74J7/fHDbm2+vOVbev91fvfm5uG2LkGC3z6g5lPHTPu6 + ccjWE0jQARWjgSy9Wi/nq3S5XkmovUEHGZQNJ4vZMuE2bH0yX8xTkNBGVSJkB2iCrxvesH9EipCl + 6dVLCWfxc2K5kMCelTuHFumyk6ArbzVGyO4PUGMchYN3CBmoGG1kRdxjemKk3sIhJyGEyIEr35YV + xxwycQqeErjnPpjDtSBEI9iLgMoIrlBYihxa3QPGzWLGexaFdSjaaKk8VvSlm2NM+7pWZGY5yOf6 + AVX0ZKkchnz6i2qPrSzFozDhnkVEFr74rUFwpVicWQvvnN9NpzZO0TAwEZ8j/hn1f6zmOSXiFSn3 + 9AMv6kQvYpAx1JbwGTZjEydEOli22sZ6umyk2AYcbbVkMPR3vKQSRevck9hi4QOKJniNaCyVU++x + QfU4TtlZ5/59TzpucBDp5PgzpzVdvAypGgf9XyudIKhQTj9tSPS15/ZLnBHiCHLiyamDTkJhycZq + M/wTZBDZNyDBksE9ZPPuW/fiJwAAAP//AwASKayp6gMAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7cd2eaafde3be9a4-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 26 May 2023 03:26:46 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '18484' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85993' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.671s + x-request-id: + - 34eb238426602367df90ee10e18db817 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\nThe + OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command + read_file to read the instructions_1.txt file\n2. Follow the instructions in + the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short + term memory. Your short term memory is short, so immediately save important + information to files.\n2. If you are unsure how you previously did something + or want to recall past events, thinking about similar events will help you remember.\n3. + No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read a file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created"}, {"role": "user", "content": "Determine which next command to use, + and respond using the format specified above:"}, {"role": "assistant", "content": + "{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt + file using the read_file command.\",\n \"reasoning\": \"The first step + is to understand the instructions and what is required of me.\",\n \"plan\": + \"- Use the read_file command to read the instructions_1.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will read the instructions_3.txt file now.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6353' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5SST2/bMAzF7/sUAs9K4MT50/ncQ4MO22UdMMxDoMqMrcUmPYlGAgT+7oPtBG49 + DF2vj+J7P1K8gMsgAVsYsVVdzraPuy8P93S6f/z2/dNqGZX1E27jwtw9HD6fQAM//0Ir14655aou + URwTaLAejWAGyWJzt462i030UUPFGZaQQF7LLJ6vZ9L4Z55FcbQADU0wOUJygdpzVcte+IgUIFnE + m6WG0XwsbJcahMWUo7SOV60GW7CzGCD5cYEKw83Yc4mQgAnBBTEkHSaTIHUjXFJSSqkUpOAmLySk + kKireC3gWToxhZ0ixEwJq5N3gkoKVGLCce8y5Ui4F7iRupG5nEUdXImqCY7yvtI37YX3vW65qgxl + 8xT0yzSPJjA5yofIrwUqR0F8Y7slBOXxd+M8qgrfx6FVYDUO0AT8N1P3IOtcjEzx6tLQQDZTT295 + /D/dJMV6J866UE33jhQajz2Z2r3wt+w9WnkzZzpOqNEcbyEnV5bv+Vni0zyFwa7Vtzu6zv/XGZGp + cEh6ta8JkPH59AKHQvd2tBhRXvVPD3YZL1Y3wp7yCptSC62GgyMXiv1wcpBAEK5Bg6MMz5BE7c/2 + wx8AAAD//wMAvsNLCRsEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7cd2eb32da79e9a4-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 26 May 2023 03:27:10 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '21216' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85987' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.675s + x-request-id: + - b054f20306579e7810b86bc92965ef9e + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\nThe + OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command + read_file to read the instructions_1.txt file\n2. Follow the instructions in + the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short + term memory. Your short term memory is short, so immediately save important + information to files.\n2. If you are unsure how you previously did something + or want to recall past events, thinking about similar events will help you remember.\n3. + No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read a file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get hyperlinks, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created"}, {"role": "user", "content": "Determine which next command to use, + and respond using the format specified above:"}, {"role": "assistant", "content": + "{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt + file using the read_file command.\",\n \"reasoning\": \"The first step + is to understand the instructions and what is required of me.\",\n \"plan\": + \"- Use the read_file command to read the instructions_1.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\": + \"I will read the instructions_3.txt file now.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to write the task_id into the output.txt file using the write_to_file + command.\",\n \"reasoning\": \"The instructions require me to write the + task_id into the output.txt file, so I need to use the write_to_file command + to do that.\",\n \"plan\": \"- Use the write_to_file command to write + the task_id into the output.txt file\",\n \"criticism\": \"I need to + ensure that I write the correct task_id into the output.txt file.\",\n \"speak\": + \"I will write the task_id into the output.txt file now.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, {"role": + "system", "content": "Command write_to_file returned: File written to successfully."}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7362' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA3SRS2/bMBCE7/0VxJ4pw684jm4FiiIpgp5aoEUVGDS1lhjzoXJXsQ1D/73Qw7Wr + ttdd8puZnTOYHFLQpWLtKpvcf3rdfv84f36c7ec791hV+Pl5efx2evuwPEWQELavqHn4MdHBVRbZ + BA8SdETFmEM6W62XD4v79cNcggs5WkihqDhZTO4SruM2JNPFdAYSalIFQnqGKgZX8YbDHj1BOrtb + rSVc4dfFbCWBAyt7Ha3Wy0aCLoPRSJD+OINDuoBjsAgpKCJDrDy3NoNn9G2Ec+aFECIDLkNdlEwZ + pGIYDgs8cjvM4El4xFxwEFTWnIeDFzUZXwguUbCi/Wawi0IH55TPJxnIW1ZERcEbX/TALyUK44lj + rduIJCL+rE1E4fBWZUyprPI9IBFfCf8vfwsZMXQ0bLQhN46GnurYMhWLJ1Gqt47WYXOhrO3UBp95 + J0tii7sQsZPi9h7/Mk0Vqv1F7GCsvR7Rh8Mkg/5xIy+FDBn+6sMrhz3nj8wjORWLcZW3FfSA922c + LsE1ItVaI9Gutvb021XnbDCY+QYaCTvjDZWbHgcpEIcKJBif4xHSafPSvPsFAAD//wMAvNN1YlgD + AAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7cd2ebc78d28e9a4-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 24 May 2023 14:18:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '19007' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86473' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.35s + x-request-id: + - d281dd2fbae62c2925a71cdef320242e + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/challenges/memory/test_memory_challenge_a.py b/tests/integration/challenges/memory/test_memory_challenge_a.py index 1b1bcfe0..063824c6 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_a.py +++ b/tests/integration/challenges/memory/test_memory_challenge_a.py @@ -2,6 +2,7 @@ import pytest from autogpt.agent import Agent from autogpt.commands.file_operations import read_file, write_to_file +from autogpt.config import Config from tests.integration.challenges.utils import get_level_to_run, run_interaction_loop from tests.utils import requires_api_key @@ -16,6 +17,7 @@ def test_memory_challenge_a( user_selected_level: int, patched_api_requestor: None, monkeypatch: pytest.MonkeyPatch, + config: Config, ) -> None: """ The agent reads a file containing a task_id. Then, it reads a series of other files. @@ -29,12 +31,12 @@ def test_memory_challenge_a( num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL) task_id = "2314" - create_instructions_files(memory_management_agent, num_files, task_id) + create_instructions_files(memory_management_agent, num_files, task_id, config) run_interaction_loop(monkeypatch, memory_management_agent, num_files + 2) file_path = str(memory_management_agent.workspace.get_path("output.txt")) - content = read_file(file_path) + content = read_file(file_path, config) assert task_id in content, f"Expected the file to contain {task_id}" @@ -42,6 +44,7 @@ def create_instructions_files( memory_management_agent: Agent, num_files: int, task_id: str, + config: Config, base_filename: str = "instructions_", ) -> None: """ @@ -56,7 +59,7 @@ def create_instructions_files( content = generate_content(i, task_id, base_filename, num_files) file_name = f"{base_filename}{i}.txt" file_path = str(memory_management_agent.workspace.get_path(file_name)) - write_to_file(file_path, content) + write_to_file(file_path, content, config) def generate_content( diff --git a/tests/integration/challenges/memory/test_memory_challenge_b.py b/tests/integration/challenges/memory/test_memory_challenge_b.py index f05c1cfc..7dc00000 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_b.py +++ b/tests/integration/challenges/memory/test_memory_challenge_b.py @@ -2,6 +2,7 @@ import pytest from autogpt.agent import Agent from autogpt.commands.file_operations import read_file, write_to_file +from autogpt.config import Config from tests.integration.challenges.utils import ( generate_noise, get_level_to_run, @@ -21,6 +22,7 @@ def test_memory_challenge_b( user_selected_level: int, patched_api_requestor: None, monkeypatch: pytest.MonkeyPatch, + config: Config, ) -> None: """ The agent reads a series of files, each containing a task_id and noise. After reading 'n' files, @@ -34,12 +36,12 @@ def test_memory_challenge_b( user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL ) task_ids = [str(i * 1111) for i in range(1, current_level + 1)] - create_instructions_files(memory_management_agent, current_level, task_ids) + create_instructions_files(memory_management_agent, current_level, task_ids, config) run_interaction_loop(monkeypatch, memory_management_agent, current_level + 2) file_path = str(memory_management_agent.workspace.get_path("output.txt")) - content = read_file(file_path) + content = read_file(file_path, config) for task_id in task_ids: assert task_id in content, f"Expected the file to contain {task_id}" @@ -48,6 +50,7 @@ def create_instructions_files( memory_management_agent: Agent, level: int, task_ids: list, + config: Config, base_filename: str = "instructions_", ) -> None: """ @@ -63,7 +66,7 @@ def create_instructions_files( content = generate_content(i, task_ids, base_filename, level) file_name = f"{base_filename}{i}.txt" file_path = str(memory_management_agent.workspace.get_path(file_name)) - write_to_file(file_path, content) + write_to_file(file_path, content, config) def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str: diff --git a/tests/integration/challenges/memory/test_memory_challenge_c.py b/tests/integration/challenges/memory/test_memory_challenge_c.py index f6afad97..e113fcdf 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_c.py +++ b/tests/integration/challenges/memory/test_memory_challenge_c.py @@ -2,6 +2,7 @@ import pytest from autogpt.agent import Agent from autogpt.commands.file_operations import read_file, write_to_file +from autogpt.config import Config from tests.integration.challenges.utils import ( generate_noise, get_level_to_run, @@ -21,6 +22,7 @@ def test_memory_challenge_c( user_selected_level: int, patched_api_requestor: None, monkeypatch: pytest.MonkeyPatch, + config: Config, ) -> None: """ Instead of reading task Ids from files as with the previous challenges, the agent now must remember @@ -49,7 +51,7 @@ def test_memory_challenge_c( level_silly_phrases = silly_phrases[:current_level] create_instructions_files( - memory_management_agent, current_level, level_silly_phrases + memory_management_agent, current_level, level_silly_phrases, config=config ) run_interaction_loop(monkeypatch, memory_management_agent, current_level + 2) @@ -64,6 +66,7 @@ def create_instructions_files( memory_management_agent: Agent, level: int, task_ids: list, + config: Config, base_filename: str = "instructions_", ) -> None: """ @@ -79,7 +82,7 @@ def create_instructions_files( content = generate_content(i, task_ids, base_filename, level) file_name = f"{base_filename}{i}.txt" file_path = str(memory_management_agent.workspace.get_path(file_name)) - write_to_file(file_path, content) + write_to_file(file_path, content, config) def generate_content( diff --git a/tests/integration/test_commands.py b/tests/integration/test_commands.py index 59f63857..1cbb3929 100644 --- a/tests/integration/test_commands.py +++ b/tests/integration/test_commands.py @@ -10,7 +10,7 @@ from tests.utils import requires_api_key @pytest.mark.vcr @pytest.mark.integration_test @requires_api_key("OPENAI_API_KEY") -def test_make_agent(patched_api_requestor) -> None: +def test_make_agent(patched_api_requestor, config) -> None: """Test that an agent can be created""" # Use the mock agent manager to avoid creating a real agent with patch("openai.ChatCompletion.create") as mock: @@ -20,9 +20,13 @@ def test_make_agent(patched_api_requestor) -> None: response.usage.prompt_tokens = 1 response.usage.completion_tokens = 1 mock.return_value = response - start_agent("Test Agent", "chat", "Hello, how are you?", "gpt-3.5-turbo") - agents = list_agents() + start_agent( + "Test Agent", "chat", "Hello, how are you?", config, "gpt-3.5-turbo" + ) + agents = list_agents(config) assert "List of agents:\n0: chat" == agents - start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt-3.5-turbo") - agents = list_agents() + start_agent( + "Test Agent 2", "write", "Hello, how are you?", config, "gpt-3.5-turbo" + ) + agents = list_agents(config) assert "List of agents:\n0: chat\n1: write" == agents diff --git a/tests/integration/test_execute_code.py b/tests/integration/test_execute_code.py index 2d4e0293..c75d66fa 100644 --- a/tests/integration/test_execute_code.py +++ b/tests/integration/test_execute_code.py @@ -29,22 +29,22 @@ def random_string(): return "".join(random.choice(string.ascii_lowercase) for _ in range(10)) -def test_execute_python_file(python_test_file: str, random_string: str): - result = sut.execute_python_file(python_test_file) - assert result == f"Hello {random_string}!\n" +def test_execute_python_file(python_test_file: str, random_string: str, config): + result: str = sut.execute_python_file(python_test_file, config) + assert result.replace("\r", "") == f"Hello {random_string}!\n" -def test_execute_python_file_invalid(): +def test_execute_python_file_invalid(config): assert all( - s in sut.execute_python_file("not_python").lower() + s in sut.execute_python_file("not_python", config).lower() for s in ["error:", "invalid", ".py"] ) assert all( - s in sut.execute_python_file("notexist.py").lower() + s in sut.execute_python_file("notexist.py", config).lower() for s in ["error:", "does not exist"] ) -def test_execute_shell(config_allow_execute, random_string): - result = sut.execute_shell(f"echo 'Hello {random_string}!'") +def test_execute_shell(config_allow_execute, random_string, config): + result = sut.execute_shell(f"echo 'Hello {random_string}!'", config) assert f"Hello {random_string}!" in result diff --git a/tests/integration/test_git_commands.py b/tests/integration/test_git_commands.py index d942b1b7..375a9cf4 100644 --- a/tests/integration/test_git_commands.py +++ b/tests/integration/test_git_commands.py @@ -20,7 +20,7 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, config): expected_output = f"Cloned {url} to {clone_path}" - clone_result = clone_repository(url=url, clone_path=clone_path) + clone_result = clone_repository(url=url, clone_path=clone_path, config=config) assert clone_result == expected_output mock_clone_from.assert_called_once_with( @@ -29,7 +29,7 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, config): ) -def test_clone_repository_error(workspace, mock_clone_from): +def test_clone_repository_error(workspace, mock_clone_from, config): url = "https://github.com/this-repository/does-not-exist.git" clone_path = str(workspace.get_path("does-not-exist")) @@ -37,6 +37,6 @@ def test_clone_repository_error(workspace, mock_clone_from): "clone", "fatal: repository not found", "" ) - result = clone_repository(url=url, clone_path=clone_path) + result = clone_repository(url=url, clone_path=clone_path, config=config) assert "Error: " in result diff --git a/tests/integration/test_google_search.py b/tests/integration/test_google_search.py index 50a2ce35..e379f78e 100644 --- a/tests/integration/test_google_search.py +++ b/tests/integration/test_google_search.py @@ -38,12 +38,14 @@ def test_safe_google_results_invalid_input(): ("no results", 1, "[]", []), ], ) -def test_google_search(query, num_results, expected_output, return_value, mocker): +def test_google_search( + query, num_results, expected_output, return_value, mocker, config +): mock_ddg = mocker.Mock() mock_ddg.return_value = return_value mocker.patch("autogpt.commands.google_search.DDGS.text", mock_ddg) - actual_output = google_search(query, num_results=num_results) + actual_output = google_search(query, config, num_results=num_results) expected_output = safe_google_results(expected_output) assert actual_output == expected_output @@ -77,10 +79,10 @@ def mock_googleapiclient(mocker): ], ) def test_google_official_search( - query, num_results, expected_output, search_results, mock_googleapiclient + query, num_results, expected_output, search_results, mock_googleapiclient, config ): mock_googleapiclient.return_value = search_results - actual_output = google_official_search(query, num_results=num_results) + actual_output = google_official_search(query, config, num_results=num_results) assert actual_output == safe_google_results(expected_output) @@ -111,6 +113,7 @@ def test_google_official_search_errors( mock_googleapiclient, http_code, error_msg, + config, ): class resp: def __init__(self, _status, _reason): @@ -127,5 +130,5 @@ def test_google_official_search_errors( ) mock_googleapiclient.side_effect = error - actual_output = google_official_search(query, num_results=num_results) + actual_output = google_official_search(query, config, num_results=num_results) assert actual_output == safe_google_results(expected_output) diff --git a/tests/mocks/mock_commands.py b/tests/mocks/mock_commands.py index d5bf5df0..42b0ea11 100644 --- a/tests/mocks/mock_commands.py +++ b/tests/mocks/mock_commands.py @@ -1,7 +1,9 @@ from autogpt.commands.command import command -@command("function_based", "Function-based test command") +@command( + "function_based", "Function-based test command", "(arg1: int, arg2: str) -> str" +) def function_based(arg1: int, arg2: str) -> str: """A function-based test command that returns a string with the two arguments separated by a dash.""" return f"{arg1} - {arg2}" diff --git a/tests/test_analyze_code.py b/tests/test_analyze_code.py index 48bded03..98ab8b72 100644 --- a/tests/test_analyze_code.py +++ b/tests/test_analyze_code.py @@ -3,6 +3,7 @@ import pytest from autogpt.commands.analyze_code import analyze_code +from autogpt.config import Config @pytest.fixture @@ -15,46 +16,59 @@ class TestAnalyzeCode: # Positive Test mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"] code = "def example_function():\n pass" - result = analyze_code(code) + config = Config() + result = analyze_code(code, config) assert result == ["Suggestion 1", "Suggestion 2"] mock_call_ai_function.assert_called_once_with( "def analyze_code(code: str) -> list[str]:", [code], "Analyzes the given code and returns a list of suggestions for improvements.", + config=config, ) - def test_negative_analyze_code(self, mock_call_ai_function): + def test_negative_analyze_code( + self, + mock_call_ai_function, + config: Config, + ): # Negative Test mock_call_ai_function.return_value = [] code = "def example_function():\n pass" - result = analyze_code(code) + result = analyze_code(code, config) assert result == [] mock_call_ai_function.assert_called_once_with( "def analyze_code(code: str) -> list[str]:", [code], "Analyzes the given code and returns a list of suggestions for improvements.", + config=config, ) - def test_error_analyze_code(self, mock_call_ai_function): + def test_error_analyze_code(self, mock_call_ai_function, config: Config): # Error Test mock_call_ai_function.side_effect = Exception("Error occurred") code = "def example_function():\n pass" with pytest.raises(Exception): - analyze_code(code) + result = analyze_code(code, config) mock_call_ai_function.assert_called_once_with( "def analyze_code(code: str) -> list[str]:", [code], "Analyzes the given code and returns a list of suggestions for improvements.", + config=config, ) - def test_edge_analyze_code_empty_code(self, mock_call_ai_function): + def test_edge_analyze_code_empty_code( + self, + mock_call_ai_function, + config: Config, + ): # Edge Test mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"] code = "" - result = analyze_code(code) + result = analyze_code(code, config) assert result == ["Suggestion 1", "Suggestion 2"] mock_call_ai_function.assert_called_once_with( "def analyze_code(code: str) -> list[str]:", [code], "Analyzes the given code and returns a list of suggestions for improvements.", + config=config, ) diff --git a/tests/test_audio_text_read_audio.py b/tests/test_audio_text_read_audio.py index d8f2af52..1f324601 100644 --- a/tests/test_audio_text_read_audio.py +++ b/tests/test_audio_text_read_audio.py @@ -8,12 +8,10 @@ import pytest from autogpt.commands.audio_text import read_audio -patch_func1 = "autogpt.commands.audio_text.CFG" - class TestReadAudio: @patch("requests.post") - def test_positive_read_audio(self, mock_post): + def test_positive_read_audio(self, mock_post, config): # Positive Test audio_data = b"test_audio_data" mock_response = MagicMock() @@ -21,41 +19,39 @@ class TestReadAudio: {"text": "Hello, world!"} ) mock_post.return_value = mock_response - with patch(patch_func1) as mock_cfg: - mock_cfg.huggingface_api_token = "testing-token" - result = read_audio(audio_data) - assert result == "The audio says: Hello, world!" - mock_post.assert_called_once_with( - f"https://api-inference.huggingface.co/models/{mock_cfg.huggingface_audio_to_text_model}", - headers={"Authorization": f"Bearer {mock_cfg.huggingface_api_token}"}, - data=audio_data, - ) + + config.huggingface_api_token = "testing-token" + result = read_audio(audio_data, config) + assert result == "The audio says: Hello, world!" + mock_post.assert_called_once_with( + f"https://api-inference.huggingface.co/models/{config.huggingface_audio_to_text_model}", + headers={"Authorization": f"Bearer {config.huggingface_api_token}"}, + data=audio_data, + ) @patch("requests.post") - def test_negative_read_audio(self, mock_post): + def test_negative_read_audio(self, mock_post, config): # Negative Test audio_data = b"test_audio_data" mock_response = MagicMock() mock_response.content.decode.return_value = json.dumps({"text": ""}) mock_post.return_value = mock_response - with patch(patch_func1) as mock_cfg: - mock_cfg.huggingface_api_token = "testing-token" - result = read_audio(audio_data) - assert result == "The audio says: " - mock_post.assert_called_once_with( - f"https://api-inference.huggingface.co/models/{mock_cfg.huggingface_audio_to_text_model}", - headers={"Authorization": f"Bearer {mock_cfg.huggingface_api_token}"}, - data=audio_data, - ) + config.huggingface_api_token = "testing-token" + result = read_audio(audio_data, config) + assert result == "The audio says: " + mock_post.assert_called_once_with( + f"https://api-inference.huggingface.co/models/{config.huggingface_audio_to_text_model}", + headers={"Authorization": f"Bearer {config.huggingface_api_token}"}, + data=audio_data, + ) - def test_error_read_audio(self): + def test_error_read_audio(self, config): # Error Test - with patch(patch_func1) as mock_cfg: - mock_cfg.huggingface_api_token = None - with pytest.raises(ValueError): - read_audio(b"test_audio_data") + config.huggingface_api_token = None + with pytest.raises(ValueError): + read_audio(b"test_audio_data", config) - def test_edge_read_audio_empty_audio(self): + def test_edge_read_audio_empty_audio(self, config): # Edge Test with pytest.raises(ValueError): - read_audio(b"") + read_audio(b"", config) diff --git a/tests/test_audio_text_read_audio_from_file.py b/tests/test_audio_text_read_audio_from_file.py index b8cb352c..c8d66a06 100644 --- a/tests/test_audio_text_read_audio_from_file.py +++ b/tests/test_audio_text_read_audio_from_file.py @@ -7,6 +7,7 @@ from unittest.mock import mock_open, patch import pytest from autogpt.commands.audio_text import read_audio_from_file +from autogpt.config import Config @pytest.fixture @@ -22,7 +23,7 @@ class TestReadAudioFromFile: m = mock_open(read_data=mock_file_data) with patch("builtins.open", m): - result = read_audio_from_file("test_audio.wav") + result = read_audio_from_file("test_audio.wav", Config()) assert result == "This is a sample text." m.assert_called_once_with("test_audio.wav", "rb") @@ -33,14 +34,14 @@ class TestReadAudioFromFile: m = mock_open(read_data=mock_file_data) with patch("builtins.open", m): - result = read_audio_from_file("test_audio.wav") + result = read_audio_from_file("test_audio.wav", Config()) assert result != "Incorrect text." m.assert_called_once_with("test_audio.wav", "rb") def test_error_read_audio_from_file(self): # Error test with pytest.raises(FileNotFoundError): - read_audio_from_file("non_existent_file.wav") + read_audio_from_file("non_existent_file.wav", Config()) def test_edge_empty_audio_file(self, mock_read_audio): # Edge test @@ -49,6 +50,6 @@ class TestReadAudioFromFile: m = mock_open(read_data=mock_file_data) with patch("builtins.open", m): - result = read_audio_from_file("empty_audio.wav") + result = read_audio_from_file("empty_audio.wav", Config()) assert result == "" m.assert_called_once_with("empty_audio.wav", "rb") diff --git a/tests/test_commands.py b/tests/test_commands.py index 0961d0af..5779a8a3 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -7,6 +7,8 @@ import pytest from autogpt.commands.command import Command, CommandRegistry +SIGNATURE = "(arg1: int, arg2: str) -> str" + class TestCommand: """Test cases for the Command class.""" @@ -23,6 +25,7 @@ class TestCommand: name="example", description="Example command", method=self.example_command_method, + signature=SIGNATURE, ) assert cmd.name == "example" @@ -47,19 +50,11 @@ class TestCommand: name="example", description="Example command", method=self.example_command_method, + signature=SIGNATURE, ) with pytest.raises(TypeError): cmd(arg1="invalid", does_not_exist="test") - def test_command_default_signature(self): - """Test that the default signature is generated correctly.""" - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - ) - assert cmd.signature == "(arg1: int, arg2: str) -> str" - def test_command_custom_signature(self): custom_signature = "custom_arg1: int, custom_arg2: str" cmd = Command( @@ -84,6 +79,7 @@ class TestCommandRegistry: name="example", description="Example command", method=self.example_command_method, + signature=SIGNATURE, ) registry.register(cmd) @@ -98,6 +94,7 @@ class TestCommandRegistry: name="example", description="Example command", method=self.example_command_method, + signature=SIGNATURE, ) registry.register(cmd) @@ -112,6 +109,7 @@ class TestCommandRegistry: name="example", description="Example command", method=self.example_command_method, + signature=SIGNATURE, ) registry.register(cmd) @@ -133,6 +131,7 @@ class TestCommandRegistry: name="example", description="Example command", method=self.example_command_method, + signature=SIGNATURE, ) registry.register(cmd) @@ -154,6 +153,7 @@ class TestCommandRegistry: name="example", description="Example command", method=self.example_command_method, + signature=SIGNATURE, ) registry.register(cmd) diff --git a/tests/test_config.py b/tests/test_config.py index 98134dc8..1e156f98 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -135,6 +135,7 @@ def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config): mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]} create_config( + config=config, continuous=False, continuous_limit=False, ai_settings_file="", diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index a18855e7..59962af0 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -66,6 +66,7 @@ def test_sd_webui_negative_prompt(config, workspace, image_size): gen_image = functools.partial( generate_image_with_sd_webui, prompt="astronaut riding a horse", + config=config, size=image_size, extra={"seed": 123}, ) @@ -101,7 +102,7 @@ def generate_and_validate( config.huggingface_image_model = hugging_face_image_model prompt = "astronaut riding a horse" - image_path = lst(generate_image(prompt, image_size, **kwargs)) + image_path = lst(generate_image(prompt, config, image_size, **kwargs)) assert image_path.exists() with Image.open(image_path) as img: assert img.size == (image_size, image_size) @@ -146,7 +147,7 @@ def test_huggingface_fail_request_with_delay( with patch("time.sleep") as mock_sleep: # Verify request fails. - result = generate_image(prompt, image_size) + result = generate_image(prompt, config, image_size) assert result == "Error creating image." # Verify retry was called with delay if delay is in return_text @@ -156,8 +157,7 @@ def test_huggingface_fail_request_with_delay( mock_sleep.assert_not_called() -def test_huggingface_fail_request_with_delay(mocker): - config = Config() +def test_huggingface_fail_request_with_delay(mocker, config): config.huggingface_api_token = "1" # Mock requests.post @@ -172,7 +172,7 @@ def test_huggingface_fail_request_with_delay(mocker): config.image_provider = "huggingface" config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - result = generate_image("astronaut riding a horse", 512) + result = generate_image("astronaut riding a horse", config, 512) assert result == "Error creating image." @@ -180,8 +180,7 @@ def test_huggingface_fail_request_with_delay(mocker): mock_sleep.assert_called_with(0) -def test_huggingface_fail_request_no_delay(mocker): - config = Config() +def test_huggingface_fail_request_no_delay(mocker, config): config.huggingface_api_token = "1" # Mock requests.post @@ -198,7 +197,7 @@ def test_huggingface_fail_request_no_delay(mocker): config.image_provider = "huggingface" config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - result = generate_image("astronaut riding a horse", 512) + result = generate_image("astronaut riding a horse", config, 512) assert result == "Error creating image." @@ -206,8 +205,7 @@ def test_huggingface_fail_request_no_delay(mocker): mock_sleep.assert_not_called() -def test_huggingface_fail_request_bad_json(mocker): - config = Config() +def test_huggingface_fail_request_bad_json(mocker, config): config.huggingface_api_token = "1" # Mock requests.post @@ -222,7 +220,7 @@ def test_huggingface_fail_request_bad_json(mocker): config.image_provider = "huggingface" config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - result = generate_image("astronaut riding a horse", 512) + result = generate_image("astronaut riding a horse", config, 512) assert result == "Error creating image." @@ -230,8 +228,7 @@ def test_huggingface_fail_request_bad_json(mocker): mock_sleep.assert_not_called() -def test_huggingface_fail_request_bad_image(mocker): - config = Config() +def test_huggingface_fail_request_bad_image(mocker, config): config.huggingface_api_token = "1" # Mock requests.post @@ -241,13 +238,12 @@ def test_huggingface_fail_request_bad_image(mocker): config.image_provider = "huggingface" config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - result = generate_image("astronaut riding a horse", 512) + result = generate_image("astronaut riding a horse", config, 512) assert result == "Error creating image." -def test_huggingface_fail_missing_api_token(mocker): - config = Config() +def test_huggingface_fail_missing_api_token(mocker, config): config.image_provider = "huggingface" config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" @@ -256,4 +252,4 @@ def test_huggingface_fail_missing_api_token(mocker): # Verify request raises an error. with pytest.raises(ValueError): - generate_image("astronaut riding a horse", 512) + generate_image("astronaut riding a horse", config, 512) diff --git a/tests/unit/test_browse_scrape_links.py b/tests/unit/test_browse_scrape_links.py index 3b7442eb..15f495db 100644 --- a/tests/unit/test_browse_scrape_links.py +++ b/tests/unit/test_browse_scrape_links.py @@ -43,14 +43,14 @@ class TestScrapeLinks: provided with a valid url that returns a webpage with hyperlinks. """ - def test_valid_url_with_hyperlinks(self): + def test_valid_url_with_hyperlinks(self, config): url = "https://www.google.com" - result = scrape_links(url) + result = scrape_links(url, config=config) assert len(result) > 0 assert isinstance(result, list) assert isinstance(result[0], str) - def test_valid_url(self, mocker): + def test_valid_url(self, mocker, config): """Test that the function returns correctly formatted hyperlinks when given a valid url.""" # Mock the requests.get() function to return a response with sample HTML containing hyperlinks mock_response = mocker.Mock() @@ -61,12 +61,12 @@ class TestScrapeLinks: mocker.patch("requests.Session.get", return_value=mock_response) # Call the function with a valid URL - result = scrape_links("https://www.example.com") + result = scrape_links("https://www.example.com", config) # Assert that the function returns correctly formatted hyperlinks assert result == ["Google (https://www.google.com)"] - def test_invalid_url(self, mocker): + def test_invalid_url(self, mocker, config): """Test that the function returns "error" when given an invalid url.""" # Mock the requests.get() function to return an HTTP error response mock_response = mocker.Mock() @@ -74,12 +74,12 @@ class TestScrapeLinks: mocker.patch("requests.Session.get", return_value=mock_response) # Call the function with an invalid URL - result = scrape_links("https://www.invalidurl.com") + result = scrape_links("https://www.invalidurl.com", config) # Assert that the function returns "error" assert "Error:" in result - def test_no_hyperlinks(self, mocker): + def test_no_hyperlinks(self, mocker, config): """Test that the function returns an empty list when the html contains no hyperlinks.""" # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks mock_response = mocker.Mock() @@ -88,12 +88,12 @@ class TestScrapeLinks: mocker.patch("requests.Session.get", return_value=mock_response) # Call the function with a URL containing no hyperlinks - result = scrape_links("https://www.example.com") + result = scrape_links("https://www.example.com", config) # Assert that the function returns an empty list assert result == [] - def test_scrape_links_with_few_hyperlinks(self, mocker): + def test_scrape_links_with_few_hyperlinks(self, mocker, config): """Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks.""" mock_response = mocker.Mock() mock_response.status_code = 200 @@ -109,7 +109,7 @@ class TestScrapeLinks: mocker.patch("requests.Session.get", return_value=mock_response) # Call the function being tested - result = scrape_links("https://www.example.com") + result = scrape_links("https://www.example.com", config) # Assert that the function returns a list of formatted hyperlinks assert isinstance(result, list) diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index cda45e11..3d916bc7 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -42,7 +42,7 @@ Additional aspects: class TestScrapeText: - def test_scrape_text_with_valid_url(self, mocker): + def test_scrape_text_with_valid_url(self, mocker, config): """Tests that scrape_text() returns the expected text when given a valid URL.""" # Mock the requests.get() method to return a response with expected text expected_text = "This is some sample text" @@ -57,14 +57,14 @@ class TestScrapeText: # Call the function with a valid URL and assert that it returns the # expected text url = "http://www.example.com" - assert scrape_text(url) == expected_text + assert scrape_text(url, config) == expected_text - def test_invalid_url(self): + def test_invalid_url(self, config): """Tests that an error is raised when an invalid url is provided.""" url = "invalidurl.com" - pytest.raises(ValueError, scrape_text, url) + pytest.raises(ValueError, scrape_text, url, config) - def test_unreachable_url(self, mocker): + def test_unreachable_url(self, mocker, config): """Test that scrape_text returns an error message when an invalid or unreachable url is provided.""" # Mock the requests.get() method to raise an exception mocker.patch( @@ -74,10 +74,10 @@ class TestScrapeText: # Call the function with an invalid URL and assert that it returns an error # message url = "http://thiswebsitedoesnotexist.net/" - error_message = scrape_text(url) + error_message = scrape_text(url, config) assert "Error:" in error_message - def test_no_text(self, mocker): + def test_no_text(self, mocker, config): """Test that scrape_text returns an empty string when the html page contains no text to be scraped.""" # Mock the requests.get() method to return a response with no text mock_response = mocker.Mock() @@ -87,20 +87,20 @@ class TestScrapeText: # Call the function with a valid URL and assert that it returns an empty string url = "http://www.example.com" - assert scrape_text(url) == "" + assert scrape_text(url, config) == "" - def test_http_error(self, mocker): + def test_http_error(self, mocker, config): """Test that scrape_text returns an error message when the response status code is an http error (>=400).""" # Mock the requests.get() method to return a response with a 404 status code mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) # Call the function with a URL - result = scrape_text("https://www.example.com") + result = scrape_text("https://www.example.com", config) # Check that the function returns an error message assert result == "Error: HTTP 404 error" - def test_scrape_text_with_html_tags(self, mocker): + def test_scrape_text_with_html_tags(self, mocker, config): """Test that scrape_text() properly handles HTML tags.""" # Create a mock response object with HTML containing tags html = "

This is bold text.

" @@ -110,7 +110,7 @@ class TestScrapeText: mocker.patch("requests.Session.get", return_value=mock_response) # Call the function with a URL - result = scrape_text("https://www.example.com") + result = scrape_text("https://www.example.com", config) # Check that the function properly handles HTML tags assert result == "This is bold text." diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index 7f885348..35c77a15 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -55,11 +55,11 @@ def test_file(test_file_path: Path): @pytest.fixture() -def test_file_with_content_path(test_file: TextIOWrapper, file_content): +def test_file_with_content_path(test_file: TextIOWrapper, file_content, config): test_file.write(file_content) test_file.close() file_ops.log_operation( - "write", test_file.name, file_ops.text_checksum(file_content) + "write", test_file.name, config, file_ops.text_checksum(file_content) ) return Path(test_file.name) @@ -117,7 +117,7 @@ def test_file_operations_state(test_file: TextIOWrapper): assert file_ops.file_operations_state(test_file.name) == expected_state -def test_is_duplicate_operation(config, mocker: MockerFixture): +def test_is_duplicate_operation(config: Config, mocker: MockerFixture): # Prepare a fake state dictionary for the function to use state = { "path/to/file1.txt": "checksum1", @@ -127,30 +127,42 @@ def test_is_duplicate_operation(config, mocker: MockerFixture): # Test cases with write operations assert ( - file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum1") + file_ops.is_duplicate_operation( + "write", "path/to/file1.txt", config, "checksum1" + ) is True ) assert ( - file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum2") + file_ops.is_duplicate_operation( + "write", "path/to/file1.txt", config, "checksum2" + ) is False ) assert ( - file_ops.is_duplicate_operation("write", "path/to/file3.txt", "checksum3") + file_ops.is_duplicate_operation( + "write", "path/to/file3.txt", config, "checksum3" + ) is False ) # Test cases with append operations assert ( - file_ops.is_duplicate_operation("append", "path/to/file1.txt", "checksum1") + file_ops.is_duplicate_operation( + "append", "path/to/file1.txt", config, "checksum1" + ) is False ) # Test cases with delete operations - assert file_ops.is_duplicate_operation("delete", "path/to/file1.txt") is False - assert file_ops.is_duplicate_operation("delete", "path/to/file3.txt") is True + assert ( + file_ops.is_duplicate_operation("delete", "path/to/file1.txt", config) is False + ) + assert ( + file_ops.is_duplicate_operation("delete", "path/to/file3.txt", config) is True + ) # Test logging a file operation def test_log_operation(config: Config): - file_ops.log_operation("log_test", "path/to/test") + file_ops.log_operation("log_test", "path/to/test", config) with open(config.file_logger_path, "r", encoding="utf-8") as f: content = f.read() assert f"log_test: path/to/test\n" in content @@ -164,7 +176,7 @@ def test_text_checksum(file_content: str): def test_log_operation_with_checksum(config: Config): - file_ops.log_operation("log_test", "path/to/test", checksum="ABCDEF") + file_ops.log_operation("log_test", "path/to/test", config, checksum="ABCDEF") with open(config.file_logger_path, "r", encoding="utf-8") as f: content = f.read() assert f"log_test: path/to/test #ABCDEF\n" in content @@ -211,50 +223,56 @@ def test_read_file( mock_MemoryItem_from_text, test_file_with_content_path: Path, file_content, + config: Config, ): - content = file_ops.read_file(test_file_with_content_path) - assert content == file_content + content = file_ops.read_file(test_file_with_content_path, config) + assert content.replace("\r", "") == file_content -def test_write_to_file(test_file_path: Path): +def test_write_to_file(test_file_path: Path, config): new_content = "This is new content.\n" - file_ops.write_to_file(str(test_file_path), new_content) + file_ops.write_to_file(str(test_file_path), new_content, config) with open(test_file_path, "r", encoding="utf-8") as f: content = f.read() assert content == new_content -def test_write_file_logs_checksum(config: Config, test_file_path: Path): +def test_write_file_logs_checksum(test_file_path: Path, config): new_content = "This is new content.\n" new_checksum = file_ops.text_checksum(new_content) - file_ops.write_to_file(str(test_file_path), new_content) + file_ops.write_to_file(str(test_file_path), new_content, config) with open(config.file_logger_path, "r", encoding="utf-8") as f: log_entry = f.read() assert log_entry == f"write: {test_file_path} #{new_checksum}\n" -def test_write_file_fails_if_content_exists(test_file_path: Path): +def test_write_file_fails_if_content_exists(test_file_path: Path, config): new_content = "This is new content.\n" file_ops.log_operation( "write", str(test_file_path), + config, checksum=file_ops.text_checksum(new_content), ) - result = file_ops.write_to_file(str(test_file_path), new_content) + result = file_ops.write_to_file(str(test_file_path), new_content, config) assert result == "Error: File has already been updated." -def test_write_file_succeeds_if_content_different(test_file_with_content_path: Path): +def test_write_file_succeeds_if_content_different( + test_file_with_content_path: Path, config +): new_content = "This is different content.\n" - result = file_ops.write_to_file(str(test_file_with_content_path), new_content) + result = file_ops.write_to_file( + str(test_file_with_content_path), new_content, config + ) assert result == "File written to successfully." -def test_append_to_file(test_nested_file: Path): +def test_append_to_file(test_nested_file: Path, config): append_text = "This is appended text.\n" - file_ops.write_to_file(test_nested_file, append_text) + file_ops.write_to_file(test_nested_file, append_text, config) - file_ops.append_to_file(test_nested_file, append_text) + file_ops.append_to_file(test_nested_file, append_text, config) with open(test_nested_file, "r") as f: content_after = f.read() @@ -262,12 +280,10 @@ def test_append_to_file(test_nested_file: Path): assert content_after == append_text + append_text -def test_append_to_file_uses_checksum_from_appended_file( - config: Config, test_file_path: Path -): +def test_append_to_file_uses_checksum_from_appended_file(test_file_path: Path, config): append_text = "This is appended text.\n" - file_ops.append_to_file(test_file_path, append_text) - file_ops.append_to_file(test_file_path, append_text) + file_ops.append_to_file(test_file_path, append_text, config) + file_ops.append_to_file(test_file_path, append_text, config) with open(config.file_logger_path, "r", encoding="utf-8") as f: log_contents = f.read() @@ -282,8 +298,8 @@ def test_append_to_file_uses_checksum_from_appended_file( ) -def test_delete_file(test_file_with_content_path: Path): - result = file_ops.delete_file(str(test_file_with_content_path)) +def test_delete_file(test_file_with_content_path: Path, config): + result = file_ops.delete_file(str(test_file_with_content_path), config) assert result == "File deleted successfully." assert os.path.exists(test_file_with_content_path) is False @@ -291,16 +307,16 @@ def test_delete_file(test_file_with_content_path: Path): def test_delete_missing_file(config): filename = "path/to/file/which/does/not/exist" # confuse the log - file_ops.log_operation("write", filename, checksum="fake") + file_ops.log_operation("write", filename, config, checksum="fake") try: os.remove(filename) except FileNotFoundError as err: - assert str(err) in file_ops.delete_file(filename) + assert str(err) in file_ops.delete_file(filename, config) return assert False, f"Failed to test delete_file; {filename} not expected to exist" -def test_list_files(workspace: Workspace, test_directory: Path): +def test_list_files(workspace: Workspace, test_directory: Path, config): # Case 1: Create files A and B, search for A, and ensure we don't return A and B file_a = workspace.get_path("file_a.txt") file_b = workspace.get_path("file_b.txt") @@ -318,7 +334,7 @@ def test_list_files(workspace: Workspace, test_directory: Path): with open(os.path.join(test_directory, file_a.name), "w") as f: f.write("This is file A in the subdirectory.") - files = file_ops.list_files(str(workspace.root)) + files = file_ops.list_files(str(workspace.root), config) assert file_a.name in files assert file_b.name in files assert os.path.join(Path(test_directory).name, file_a.name) in files @@ -331,17 +347,17 @@ def test_list_files(workspace: Workspace, test_directory: Path): # Case 2: Search for a file that does not exist and make sure we don't throw non_existent_file = "non_existent_file.txt" - files = file_ops.list_files("") + files = file_ops.list_files("", config) assert non_existent_file not in files -def test_download_file(config, workspace: Workspace): +def test_download_file(workspace: Workspace, config): url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz" local_name = workspace.get_path("auto-gpt.tar.gz") size = 365023 readable_size = readable_file_size(size) assert ( - file_ops.download_file(url, local_name) + file_ops.download_file(url, local_name, config) == f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})' ) assert os.path.isfile(local_name) is True @@ -349,10 +365,10 @@ def test_download_file(config, workspace: Workspace): url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz" assert "Got an HTTP Error whilst trying to download file" in file_ops.download_file( - url, local_name + url, local_name, config ) url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz" assert "Failed to establish a new connection:" in file_ops.download_file( - url, local_name + url, local_name, config ) diff --git a/tests/unit/test_web_selenium.py b/tests/unit/test_web_selenium.py index 2746f684..0415007d 100644 --- a/tests/unit/test_web_selenium.py +++ b/tests/unit/test_web_selenium.py @@ -1,11 +1,11 @@ from autogpt.commands.web_selenium import browse_website -def test_browse_website(): +def test_browse_website(config): url = "https://barrel-roll.com" question = "How to execute a barrel roll" - response = browse_website(url, question) + response = browse_website(url, question, config) assert "Error" in response # Sanity check that the response is not too long assert len(response) < 200