From 3a80e2f399b3d802f0c962487f8071d97ee72bb1 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 2 May 2023 13:26:30 +0200 Subject: [PATCH] Revert "Revert "Merge branch 'master' into stable"" This reverts commit 999990b614f9c5e32dbfb560ab9516755d212884. --- .env.template | 7 + .github/workflows/pr-label.yml | 20 ++ .gitignore | 2 + .isort.cfg | 10 + README.md | 31 +- autogpt/agent/agent.py | 95 ++++--- autogpt/agent/agent_manager.py | 56 +++- autogpt/app.py | 148 +++------- autogpt/chat.py | 24 +- autogpt/cli.py | 30 +- autogpt/commands/analyze_code.py | 6 + autogpt/commands/audio_text.py | 41 ++- autogpt/commands/command.py | 153 ++++++++++ autogpt/commands/execute_code.py | 38 ++- autogpt/commands/file_operations.py | 17 ++ autogpt/commands/git_operations.py | 20 +- autogpt/commands/google_search.py | 34 ++- autogpt/commands/image_gen.py | 3 +- autogpt/commands/improve_code.py | 6 + autogpt/commands/twitter.py | 24 +- autogpt/commands/web_selenium.py | 6 + autogpt/commands/write_tests.py | 6 + autogpt/config/ai_config.py | 40 ++- autogpt/config/config.py | 18 ++ autogpt/llm_utils.py | 63 +++-- autogpt/models/base_open_ai_plugin.py | 199 +++++++++++++ autogpt/plugins.py | 265 ++++++++++++++++++ autogpt/prompts/__init__.py | 0 .../generator.py} | 39 ++- autogpt/{ => prompts}/prompt.py | 107 +------ autogpt/token_counter.py | 5 +- autogpt/types/openai.py | 9 + autogpt/utils.py | 2 +- plugin.png | Bin 0 -> 33356 bytes plugins/__PUT_PLUGIN_ZIPS_HERE__ | 0 requirements.txt | 5 + scripts/__init__.py | 0 tests/mocks/__init__.py | 0 tests/mocks/mock_commands.py | 6 + tests/test_commands.py | 177 ++++++++++++ tests/test_prompt_generator.py | 3 +- .../Auto-GPT-Plugin-Test-master.zip | Bin 0 -> 14927 bytes .../unit/models/test_base_open_api_plugin.py | 79 ++++++ tests/unit/test_browse_scrape_text.py | 41 ++- tests/unit/test_plugins.py | 112 ++++++++ 45 files changed, 1601 insertions(+), 346 deletions(-) create mode 100644 .isort.cfg create mode 100644 autogpt/commands/command.py create mode 100644 autogpt/models/base_open_ai_plugin.py create mode 100644 autogpt/plugins.py create mode 100644 autogpt/prompts/__init__.py rename autogpt/{promptgenerator.py => prompts/generator.py} (78%) rename autogpt/{ => prompts}/prompt.py (50%) create mode 100644 autogpt/types/openai.py create mode 100644 plugin.png create mode 100644 plugins/__PUT_PLUGIN_ZIPS_HERE__ create mode 100644 scripts/__init__.py create mode 100644 tests/mocks/__init__.py create mode 100644 tests/mocks/mock_commands.py create mode 100644 tests/test_commands.py create mode 100644 tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip create mode 100644 tests/unit/models/test_base_open_api_plugin.py create mode 100644 tests/unit/test_plugins.py diff --git a/.env.template b/.env.template index f1b511c2..60edecd6 100644 --- a/.env.template +++ b/.env.template @@ -188,3 +188,10 @@ OPENAI_API_KEY=your-openai-api-key # TW_CONSUMER_SECRET= # TW_ACCESS_TOKEN= # TW_ACCESS_TOKEN_SECRET= + +################################################################################ +### ALLOWLISTED PLUGINS +################################################################################ + +#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) +ALLOWLISTED_PLUGINS= diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index 92c5a66b..f1b200b0 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -26,3 +26,23 @@ jobs: repoToken: "${{ secrets.GITHUB_TOKEN }}" commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request." commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly." + + size: + if: ${{ github.event_name == 'pull_request_target' }} + permissions: + issues: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: codelytv/pr-size-labeler@v1.7.0 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + s_label: "size/s" + s_max_size: "10" + m_label: "size/m" + m_max_size: "50" + l_label: "size/l" + l_max_size: "200" + xl_label: "size/xl" + fail_if_xl: "false" + github_api_url: "api.github.com" diff --git a/.gitignore b/.gitignore index a6b3b80f..0c277d89 100644 --- a/.gitignore +++ b/.gitignore @@ -157,5 +157,7 @@ vicuna-* # mac .DS_Store +openai/ + # news CURRENT_BULLETIN.md \ No newline at end of file diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 00000000..17eab482 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,10 @@ +[settings] +profile = black +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 +sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER +skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist diff --git a/README.md b/README.md index 6d636c56..3e811a1c 100644 --- a/README.md +++ b/README.md @@ -31,14 +31,25 @@ Your support is greatly appreciated Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.

+

-InfluxData    Roost.AI    NucleiAI    Algohash    TypingMind    

+
+InfluxData +Roost.AI +NucleiAI +Algohash +TypingMind + + +
+

robinicus  prompthero  crizzler  tob-le-rone  FSTatSBS  toverly1  ddtarazona  Nalhos  Kazamario  pingbotan  indoor47  AuroraHolding  kreativai  hunteraraujo  Explorergt92  judegomila   thepok   SpacingLily  merwanehamadi  m  zkonduit  maxxflyer  tekelsey  digisomni  nocodeclarity  tjarmain -Josecodesalot  saten-private  kenndanielso  johnculkin  Daniel1357  0xmatchmaker  belharethsami  nicoguyon  josephcmiller2  KiaArmani  Mobivs  rocks6  Odin519Tomas  ChrisDMT  thisisjeffchen  RealChrisSean  AIdevelopersAI  scryptedinc  jun784  goldenrecursion  allenstecat  LeeRobidas  cfarquhar  avy-ai  omphos  sunchongren  CrazySwami  fruition  Web3Capital  jazgarewal  rejunity  dexterityx  shawnharmsen  tommygeee  abhinav-pandey29  ColinConwell  kMag410  lucas-chu  Heitechsoft  bentoml  MediConCenHK  nnkostov  founderblocks-sils  CarmenCocoa  angiaou  fabrietech  Partender  RThaweewat  GalaxyVideoAgency  Brodie0  sultanmeghji  CatsMeow492  caitlynmeeks  garythebat  concreit  Pythagora-io  ASmithOWL  Cameron-Fulton  joaomdmoura  Dradstone  st617  wenfengwang  morcos  CrypteorCapital  jd3655  mathewhawkins  ZERO-A-ONE  MayurVirkar  SwftCoins  marv-technology  cxs  iddelacruz  AryaXAI  lmaugustin  Mr-Bishop42  vixul-accelerator  TheStoneMX  ciscodebs  ntwrite  DataMetis  ikarosai  refinery1  MetaPath01  ternary5  arjunb023  yx3110  vkozacek  eelbaz  rapidstartup  txtr99  tob-le-rone  neverinstall  projectonegames  DailyBotHQ  comet-ml  rickscode  webbcolton  MBassi91  

+CrypteorCapital  eelbaz  lucas-chu  tob-le-rone  jazgarewal  yx3110  MetaPath01  tommi-joentakanen  GalaxyVideoAgency  ciscodebs  josephjacks  mathewhawkins  cxs  txtr99  ChrisDMT  MayurVirkar  comet-ml  RThaweewat  DailyBotHQ  jacobyoby  jd3655  MBassi91  chatgpt-prompts  marv-technology  CrazySwami  tullytim  sultanmeghji  cfarquhar  goldenrecursion  ikarosai  avy-ai  MediConCenHK  RealChrisSean  DataMetis  CarmenCocoa  Cameron-Fulton  rejunity  belharethsami  AcountoOU  CatsMeow492  fabrietech  kenndanielso  arthur-x88  Heitechsoft  knifour  rickscode  Brodie0  ternary5  dexterityx  turintech  projectonegames  ZERO-A-ONE  KiaArmani  caitlynmeeks  tommygeee  st617  webbcolton  jondwillis  Mr-Bishop42  ASmithOWL  omphos  neverinstall  lmaugustin  vkozacek  abhinav-pandey29  ColinConwell  Partender  shawnharmsen  TheStoneMX  wenfengwang  allenstecat  johnculkin  Web3Capital  sunchongren  bentoml  angiaou  Dradstone  jun784  ZoneSixGames  Pythagora-io  nnkostov  AIdevelopersAI  josephcmiller2  VoiceBeer  AryaXAI  saten-private  refinery1  Mobivs  ntwrite  Josecodesalot  horazius  nicoguyon  0xmatchmaker  rocks6  SwftCoins  iddelacruz  scryptedinc  thisisjeffchen  kMag410  rapidstartup  founderblocks-sils  concreit  Odin519Tomas  Daniel1357  fruition  SparkplanAI  anvarazizov  joaomdmoura  morcos  LeeRobidas  arjunb023  garythebat  humungasaurus  

+ ## 🚀 Features @@ -254,6 +265,22 @@ export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" ``` +## Plugins + +See https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template for the template of the plugins. + +⚠️💀 WARNING 💀⚠️: Review the code of any plugin you use, this allows for any Python to be executed and do malicious things. Like stealing your API keys. + +Drop the repo's zipfile in the plugins folder. + +![Download Zip](https://raw.githubusercontent.com/BillSchumacher/Auto-GPT/master/plugin.png) + +If you add the plugins class name to the `ALLOWLISTED_PLUGINS` in the `.env` you will not be prompted otherwise you'll be warned before loading the plugin: + +``` +ALLOWLISTED_PLUGINS=example-plugin1,example-plugin2,example-plugin3 +``` + ## Setting Your Cache Type By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone. diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index ee7885f8..189338f5 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -19,18 +19,25 @@ class Agent: memory: The memory object to use. full_message_history: The full message history. next_action_count: The number of actions to execute. - system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. - Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals. + system_prompt: The system prompt is the initial prompt that defines everything + the AI needs to know to achieve its task successfully. + Currently, the dynamic and customizable information in the system prompt are + ai_name, description and goals. - triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: - Determine which next command to use, and respond using the format specified above: - The triggering prompt is not part of the system prompt because between the system prompt and the triggering - prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. + triggering_prompt: The last sentence the AI will see before answering. + For Auto-GPT, this prompt is: + Determine which next command to use, and respond using the format specified + above: + The triggering prompt is not part of the system prompt because between the + system prompt and the triggering + prompt we have contextual information that can distract the AI and make it + forget that its goal is to find the next task to achieve. SYSTEM PROMPT CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) TRIGGERING PROMPT - The triggering prompt reminds the AI about its short term meta task (defining the next task) + The triggering prompt reminds the AI about its short term meta task + (defining the next task) """ def __init__( @@ -39,6 +46,8 @@ class Agent: memory, full_message_history, next_action_count, + command_registry, + config, system_prompt, triggering_prompt, ): @@ -46,6 +55,8 @@ class Agent: self.memory = memory self.full_message_history = full_message_history self.next_action_count = next_action_count + self.command_registry = command_registry + self.config = config self.system_prompt = system_prompt self.triggering_prompt = triggering_prompt @@ -73,6 +84,7 @@ class Agent: # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( + self, self.system_prompt, self.triggering_prompt, self.full_message_history, @@ -81,6 +93,10 @@ class Agent: ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply) + for plugin in cfg.plugins: + if not plugin.can_handle_post_planning(): + continue + assistant_reply_json = plugin.post_planning(self, assistant_reply_json) # Print Assistant thoughts if assistant_reply_json != {}: @@ -89,14 +105,13 @@ class Agent: try: print_assistant_thoughts(self.ai_name, assistant_reply_json) command_name, arguments = get_command(assistant_reply_json) - # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"] if cfg.speak_mode: say_text(f"I want to execute {command_name}") except Exception as e: logger.error("Error: \n", str(e)) if not cfg.continuous_mode and self.next_action_count == 0: - ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit logger.typewriter_log( @@ -168,30 +183,46 @@ class Agent: elif command_name == "human_feedback": result = f"Human feedback: {user_input}" else: - result = ( - f"Command {command_name} returned: " - f"{execute_command(command_name, arguments)}" + for plugin in cfg.plugins: + if not plugin.can_handle_pre_command(): + continue + command_name, arguments = plugin.pre_command( + command_name, arguments + ) + command_result = execute_command( + self.command_registry, + command_name, + arguments, + self.config.prompt_generator, ) + result = f"Command {command_name} returned: " f"{command_result}" + + for plugin in cfg.plugins: + if not plugin.can_handle_post_command(): + continue + result = plugin.post_command(command_name, result) if self.next_action_count > 0: self.next_action_count -= 1 - - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " - f"\nResult: {result} " - f"\nHuman Feedback: {user_input} " - ) - - self.memory.add(memory_to_add) - - # Check if there's a result from the command append it to the message - # history - if result is not None: - self.full_message_history.append(create_chat_message("system", result)) - logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) - else: - self.full_message_history.append( - create_chat_message("system", "Unable to execute command") - ) - logger.typewriter_log( - "SYSTEM: ", Fore.YELLOW, "Unable to execute command" + if command_name != "do_nothing": + memory_to_add = ( + f"Assistant Reply: {assistant_reply} " + f"\nResult: {result} " + f"\nHuman Feedback: {user_input} " ) + + self.memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + self.full_message_history.append( + create_chat_message("system", result) + ) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) + else: + self.full_message_history.append( + create_chat_message("system", "Unable to execute command") + ) + logger.typewriter_log( + "SYSTEM: ", Fore.YELLOW, "Unable to execute command" + ) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 898767a4..9a62ef61 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,10 +1,11 @@ """Agent manager for managing GPT agents""" from __future__ import annotations -from typing import Union +from typing import List, Union -from autogpt.config.config import Singleton +from autogpt.config.config import Config, Singleton from autogpt.llm_utils import create_chat_completion +from autogpt.types.openai import Message class AgentManager(metaclass=Singleton): @@ -13,6 +14,7 @@ class AgentManager(metaclass=Singleton): def __init__(self): self.next_key = 0 self.agents = {} # key, (task, full_message_history, model) + self.cfg = Config() # Create new GPT agent # TODO: Centralise use of create_chat_completion() to globally enforce token limit @@ -28,19 +30,32 @@ class AgentManager(metaclass=Singleton): Returns: The key of the new agent """ - messages = [ + messages: List[Message] = [ {"role": "user", "content": prompt}, ] - + for plugin in self.cfg.plugins: + if not plugin.can_handle_pre_instruction(): + continue + if plugin_messages := plugin.pre_instruction(messages): + messages.extend(iter(plugin_messages)) # Start GPT instance agent_reply = create_chat_completion( model=model, messages=messages, ) - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) + plugins_reply = "" + for i, plugin in enumerate(self.cfg.plugins): + if not plugin.can_handle_on_instruction(): + continue + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" + plugins_reply = f"{plugins_reply}{sep}{plugin_result}" + + if plugins_reply and plugins_reply != "": + messages.append({"role": "assistant", "content": plugins_reply}) key = self.next_key # This is done instead of len(agents) to make keys unique even if agents # are deleted @@ -48,6 +63,11 @@ class AgentManager(metaclass=Singleton): self.agents[key] = (task, messages, model) + for plugin in self.cfg.plugins: + if not plugin.can_handle_post_instruction(): + continue + agent_reply = plugin.post_instruction(agent_reply) + return key, agent_reply def message_agent(self, key: str | int, message: str) -> str: @@ -65,15 +85,37 @@ class AgentManager(metaclass=Singleton): # Add user message to message history before sending to agent messages.append({"role": "user", "content": message}) + for plugin in self.cfg.plugins: + if not plugin.can_handle_pre_instruction(): + continue + if plugin_messages := plugin.pre_instruction(messages): + for plugin_message in plugin_messages: + messages.append(plugin_message) + # Start GPT instance agent_reply = create_chat_completion( model=model, messages=messages, ) - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) + plugins_reply = agent_reply + for i, plugin in enumerate(self.cfg.plugins): + if not plugin.can_handle_on_instruction(): + continue + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" + plugins_reply = f"{plugins_reply}{sep}{plugin_result}" + # Update full message history + if plugins_reply and plugins_reply != "": + messages.append({"role": "assistant", "content": plugins_reply}) + + for plugin in self.cfg.plugins: + if not plugin.can_handle_post_instruction(): + continue + agent_reply = plugin.post_instruction(agent_reply) + return agent_reply def list_agents(self) -> list[tuple[str | int, str]]: @@ -86,7 +128,7 @@ class AgentManager(metaclass=Singleton): # Return a list of agent keys and their tasks return [(key, task) for key, (task, _, _) in self.agents.items()] - def delete_agent(self, key: Union[str, int]) -> bool: + def delete_agent(self, key: str | int) -> bool: """Delete an agent from the agent manager Args: diff --git a/autogpt/app.py b/autogpt/app.py index 58d9f716..cf8e29a3 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -3,33 +3,12 @@ import json from typing import Dict, List, NoReturn, Union from autogpt.agent.agent_manager import AgentManager -from autogpt.commands.analyze_code import analyze_code -from autogpt.commands.audio_text import read_audio_from_file -from autogpt.commands.execute_code import ( - execute_python_file, - execute_shell, - execute_shell_popen, -) -from autogpt.commands.file_operations import ( - append_to_file, - delete_file, - download_file, - read_file, - search_files, - write_to_file, -) -from autogpt.commands.git_operations import clone_repository -from autogpt.commands.google_search import google_official_search, google_search -from autogpt.commands.image_gen import generate_image -from autogpt.commands.improve_code import improve_code -from autogpt.commands.twitter import send_tweet +from autogpt.commands.command import CommandRegistry, command from autogpt.commands.web_requests import scrape_links, scrape_text -from autogpt.commands.web_selenium import browse_website -from autogpt.commands.write_tests import write_tests from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_and_parse_json from autogpt.memory import get_memory from autogpt.processing.text import summarize_text +from autogpt.prompts.generator import PromptGenerator from autogpt.speech import say_text CFG = Config() @@ -108,7 +87,12 @@ def map_command_synonyms(command_name: str): return command_name -def execute_command(command_name: str, arguments): +def execute_command( + command_registry: CommandRegistry, + command_name: str, + arguments, + prompt: PromptGenerator, +): """Execute the command and return the result Args: @@ -119,105 +103,29 @@ def execute_command(command_name: str, arguments): str: The result of the command """ try: + cmd = command_registry.commands.get(command_name) + + # If the command is found, call it with the provided arguments + if cmd: + return cmd(**arguments) + + # TODO: Remove commands below after they are moved to the command registry. command_name = map_command_synonyms(command_name.lower()) - if command_name == "google": - # Check if the Google API key is set and use the official search method - # If the API key is not set or has only whitespaces, use the unofficial - # search method - key = CFG.google_api_key - if key and key.strip() and key != "your-google-api-key": - google_result = google_official_search(arguments["input"]) - return google_result - else: - google_result = google_search(arguments["input"]) - # google_result can be a list or a string depending on the search results - if isinstance(google_result, list): - safe_message = [ - google_result_single.encode("utf-8", "ignore") - for google_result_single in google_result - ] - else: - safe_message = google_result.encode("utf-8", "ignore") + if command_name == "memory_add": + return get_memory(CFG).add(arguments["string"]) - return safe_message.decode("utf-8") - elif command_name == "memory_add": - memory = get_memory(CFG) - return memory.add(arguments["string"]) - elif command_name == "start_agent": - return start_agent( - arguments["name"], arguments["task"], arguments["prompt"] - ) - elif command_name == "message_agent": - return message_agent(arguments["key"], arguments["message"]) - elif command_name == "list_agents": - return list_agents() - elif command_name == "delete_agent": - return delete_agent(arguments["key"]) - elif command_name == "get_text_summary": - return get_text_summary(arguments["url"], arguments["question"]) - elif command_name == "get_hyperlinks": - return get_hyperlinks(arguments["url"]) - elif command_name == "clone_repository": - return clone_repository( - arguments["repository_url"], arguments["clone_path"] - ) - elif command_name == "read_file": - return read_file(arguments["file"]) - elif command_name == "write_to_file": - return write_to_file(arguments["file"], arguments["text"]) - elif command_name == "append_to_file": - return append_to_file(arguments["file"], arguments["text"]) - elif command_name == "delete_file": - return delete_file(arguments["file"]) - elif command_name == "search_files": - return search_files(arguments["directory"]) - elif command_name == "download_file": - if not CFG.allow_downloads: - return "Error: You do not have user authorization to download files locally." - return download_file(arguments["url"], arguments["file"]) - elif command_name == "browse_website": - return browse_website(arguments["url"], arguments["question"]) # TODO: Change these to take in a file rather than pasted code, if # non-file is given, return instructions "Input should be a python - # filepath, write your code to file and try again" - elif command_name == "analyze_code": - return analyze_code(arguments["code"]) - elif command_name == "improve_code": - return improve_code(arguments["suggestions"], arguments["code"]) - elif command_name == "write_tests": - return write_tests(arguments["code"], arguments.get("focus")) - elif command_name == "execute_python_file": # Add this command - return execute_python_file(arguments["file"]) - elif command_name == "execute_shell": - if CFG.execute_local_commands: - return execute_shell(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "execute_shell_popen": - if CFG.execute_local_commands: - return execute_shell_popen(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "read_audio_from_file": - return read_audio_from_file(arguments["file"]) - elif command_name == "generate_image": - return generate_image(arguments["prompt"]) - elif command_name == "send_tweet": - return send_tweet(arguments["text"]) + # filepath, write your code to file and try again elif command_name == "do_nothing": return "No action performed." elif command_name == "task_complete": shutdown() else: + for command in prompt.commands: + if command_name == command["label"] or command_name == command["name"]: + return command["function"](*arguments.values()) return ( f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" " list for available commands and only respond in the specified JSON" @@ -227,6 +135,9 @@ def execute_command(command_name: str, arguments): return f"Error: {str(e)}" +@command( + "get_text_summary", "Get text summary", '"url": "", "question": ""' +) def get_text_summary(url: str, question: str) -> str: """Return the results of a Google search @@ -242,6 +153,7 @@ def get_text_summary(url: str, question: str) -> str: return f""" "Result" : {summary}""" +@command("get_hyperlinks", "Get text summary", '"url": ""') def get_hyperlinks(url: str) -> Union[str, List[str]]: """Return the results of a Google search @@ -260,6 +172,11 @@ def shutdown() -> NoReturn: quit() +@command( + "start_agent", + "Start GPT Agent", + '"name": "", "task": "", "prompt": ""', +) def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: """Start an agent with a given name, task, and prompt @@ -292,6 +209,7 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> return f"Agent {name} created with key {key}. First response: {agent_response}" +@command("message_agent", "Message GPT Agent", '"key": "", "message": ""') def message_agent(key: str, message: str) -> str: """Message an agent with a given key and message""" # Check if the key is a valid integer @@ -306,7 +224,8 @@ def message_agent(key: str, message: str) -> str: return agent_response -def list_agents(): +@command("list_agents", "List GPT Agents", "") +def list_agents() -> str: """List all agents Returns: @@ -317,6 +236,7 @@ def list_agents(): ) +@command("delete_agent", "Delete GPT Agent", '"key": ""') def delete_agent(key: str) -> str: """Delete an agent with a given key diff --git a/autogpt/chat.py b/autogpt/chat.py index 1f6bca96..f9fc9471 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -6,11 +6,12 @@ from autogpt import token_counter from autogpt.config import Config from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger +from autogpt.types.openai import Message cfg = Config() -def create_chat_message(role, content): +def create_chat_message(role, content) -> Message: """ Create a chat message with the given role and content. @@ -51,7 +52,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model): # TODO: Change debug from hardcode to argument def chat_with_ai( - prompt, user_input, full_message_history, permanent_memory, token_limit + agent, prompt, user_input, full_message_history, permanent_memory, token_limit ): """Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.""" @@ -135,6 +136,25 @@ def chat_with_ai( # Append user input, the length of this is accounted for above current_context.extend([create_chat_message("user", user_input)]) + plugin_count = len(cfg.plugins) + for i, plugin in enumerate(cfg.plugins): + if not plugin.can_handle_on_planning(): + continue + plugin_response = plugin.on_planning( + agent.prompt_generator, current_context + ) + if not plugin_response or plugin_response == "": + continue + tokens_to_add = token_counter.count_message_tokens( + [create_chat_message("system", plugin_response)], model + ) + if current_tokens_used + tokens_to_add > send_token_limit: + if cfg.debug_mode: + print("Plugin response too long, skipping:", plugin_response) + print("Plugins remaining at stop:", plugin_count - i) + break + current_context.append(create_chat_message("system", plugin_response)) + # Calculate remaining tokens tokens_remaining = token_limit - current_tokens_used # assert tokens_remaining >= 0, "Tokens remaining is negative. diff --git a/autogpt/cli.py b/autogpt/cli.py index 6fe9ecbb..51a946a7 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -75,11 +75,13 @@ def main( from colorama import Fore from autogpt.agent.agent import Agent + from autogpt.commands.command import CommandRegistry from autogpt.config import Config, check_openai_api_key from autogpt.configurator import create_config from autogpt.logs import logger from autogpt.memory import get_memory - from autogpt.prompt import construct_prompt + from autogpt.plugins import scan_plugins + from autogpt.prompts.prompt import construct_main_ai_config from autogpt.utils import get_current_git_branch, get_latest_bulletin if ctx.invoked_subcommand is None: @@ -123,7 +125,26 @@ def main( "parts of Auto-GPT with this version. " "Please consider upgrading to Python 3.10 or higher.", ) - system_prompt = construct_prompt() + + cfg = Config() + cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) + # Create a CommandRegistry instance and scan default folder + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.analyze_code") + command_registry.import_commands("autogpt.commands.audio_text") + command_registry.import_commands("autogpt.commands.execute_code") + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.commands.git_operations") + command_registry.import_commands("autogpt.commands.google_search") + command_registry.import_commands("autogpt.commands.image_gen") + command_registry.import_commands("autogpt.commands.improve_code") + command_registry.import_commands("autogpt.commands.twitter") + command_registry.import_commands("autogpt.commands.web_selenium") + command_registry.import_commands("autogpt.commands.write_tests") + command_registry.import_commands("autogpt.app") + ai_name = "" + ai_config = construct_main_ai_config() + ai_config.command_registry = command_registry # print(prompt) # Initialize variables full_message_history = [] @@ -140,11 +161,16 @@ def main( "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" ) logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) + system_prompt = ai_config.construct_full_prompt() + if cfg.debug_mode: + logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt) agent = Agent( ai_name=ai_name, memory=memory, full_message_history=full_message_history, next_action_count=next_action_count, + command_registry=command_registry, + config=ai_config, system_prompt=system_prompt, triggering_prompt=triggering_prompt, ) diff --git a/autogpt/commands/analyze_code.py b/autogpt/commands/analyze_code.py index e02ea4c5..b87d73e1 100644 --- a/autogpt/commands/analyze_code.py +++ b/autogpt/commands/analyze_code.py @@ -1,9 +1,15 @@ """Code evaluation module.""" from __future__ import annotations +from autogpt.commands.command import command from autogpt.llm_utils import call_ai_function +@command( + "analyze_code", + "Analyze Code", + '"code": ""', +) def analyze_code(code: str) -> list[str]: """ A function that takes in a string and returns a response from create chat diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py index cae32d4e..b409fefd 100644 --- a/autogpt/commands/audio_text.py +++ b/autogpt/commands/audio_text.py @@ -1,24 +1,51 @@ +"""Commands for converting audio to text.""" import json import requests +from autogpt.commands.command import command from autogpt.config import Config from autogpt.workspace import path_in_workspace -cfg = Config() +CFG = Config() -def read_audio_from_file(audio_path): - audio_path = path_in_workspace(audio_path) +@command( + "read_audio_from_file", + "Convert Audio to text", + '"filename": ""', + CFG.huggingface_audio_to_text_model, + "Configure huggingface_audio_to_text_model.", +) +def read_audio_from_file(filename: str) -> str: + """ + Convert audio to text. + + Args: + audio_path (str): The path to the audio file + + Returns: + str: The text from the audio + """ + audio_path = path_in_workspace(filename) with open(audio_path, "rb") as audio_file: audio = audio_file.read() return read_audio(audio) -def read_audio(audio): - model = cfg.huggingface_audio_to_text_model +def read_audio(audio: bytes) -> str: + """ + Convert audio to text. + + Args: + audio (bytes): The audio to convert + + Returns: + str: The text from the audio + """ + model = CFG.huggingface_audio_to_text_model api_url = f"https://api-inference.huggingface.co/models/{model}" - api_token = cfg.huggingface_api_token + api_token = CFG.huggingface_api_token headers = {"Authorization": f"Bearer {api_token}"} if api_token is None: @@ -33,4 +60,4 @@ def read_audio(audio): ) text = json.loads(response.content.decode("utf-8"))["text"] - return "The audio says: " + text + return f"The audio says: {text}" diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py new file mode 100644 index 00000000..e97af008 --- /dev/null +++ b/autogpt/commands/command.py @@ -0,0 +1,153 @@ +import importlib +import inspect +from typing import Any, Callable, Optional + +# Unique identifier for auto-gpt commands +AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command" + + +class Command: + """A class representing a command. + + Attributes: + name (str): The name of the command. + description (str): A brief description of what the command does. + signature (str): The signature of the function that the command executes. Defaults to None. + """ + + def __init__( + self, + name: str, + description: str, + method: Callable[..., Any], + signature: str = "", + enabled: bool = True, + disabled_reason: Optional[str] = None, + ): + self.name = name + self.description = description + self.method = method + self.signature = signature if signature else str(inspect.signature(self.method)) + self.enabled = enabled + self.disabled_reason = disabled_reason + + def __call__(self, *args, **kwargs) -> Any: + if not self.enabled: + return f"Command '{self.name}' is disabled: {self.disabled_reason}" + return self.method(*args, **kwargs) + + def __str__(self) -> str: + return f"{self.name}: {self.description}, args: {self.signature}" + + +class CommandRegistry: + """ + The CommandRegistry class is a manager for a collection of Command objects. + It allows the registration, modification, and retrieval of Command objects, + as well as the scanning and loading of command plugins from a specified + directory. + """ + + def __init__(self): + self.commands = {} + + def _import_module(self, module_name: str) -> Any: + return importlib.import_module(module_name) + + def _reload_module(self, module: Any) -> Any: + return importlib.reload(module) + + def register(self, cmd: Command) -> None: + self.commands[cmd.name] = cmd + + def unregister(self, command_name: str): + if command_name in self.commands: + del self.commands[command_name] + else: + raise KeyError(f"Command '{command_name}' not found in registry.") + + def reload_commands(self) -> None: + """Reloads all loaded command plugins.""" + for cmd_name in self.commands: + cmd = self.commands[cmd_name] + module = self._import_module(cmd.__module__) + reloaded_module = self._reload_module(module) + if hasattr(reloaded_module, "register"): + reloaded_module.register(self) + + def get_command(self, name: str) -> Callable[..., Any]: + return self.commands[name] + + def call(self, command_name: str, **kwargs) -> Any: + if command_name not in self.commands: + raise KeyError(f"Command '{command_name}' not found in registry.") + command = self.commands[command_name] + return command(**kwargs) + + def command_prompt(self) -> str: + """ + Returns a string representation of all registered `Command` objects for use in a prompt + """ + commands_list = [ + f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values()) + ] + return "\n".join(commands_list) + + def import_commands(self, module_name: str) -> None: + """ + Imports the specified Python module containing command plugins. + + This method imports the associated module and registers any functions or + classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute + as `Command` objects. The registered `Command` objects are then added to the + `commands` dictionary of the `CommandRegistry` object. + + Args: + module_name (str): The name of the module to import for command plugins. + """ + + module = importlib.import_module(module_name) + + for attr_name in dir(module): + attr = getattr(module, attr_name) + # Register decorated functions + if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr( + attr, AUTO_GPT_COMMAND_IDENTIFIER + ): + self.register(attr.command) + # Register command classes + elif ( + inspect.isclass(attr) and issubclass(attr, Command) and attr != Command + ): + cmd_instance = attr() + self.register(cmd_instance) + + +def command( + name: str, + description: str, + signature: str = "", + enabled: bool = True, + disabled_reason: Optional[str] = None, +) -> Callable[..., Any]: + """The command decorator is used to create Command objects from ordinary functions.""" + + def decorator(func: Callable[..., Any]) -> Command: + cmd = Command( + name=name, + description=description, + method=func, + signature=signature, + enabled=enabled, + disabled_reason=disabled_reason, + ) + + def wrapper(*args, **kwargs) -> Any: + return func(*args, **kwargs) + + wrapper.command = cmd + + setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True) + return wrapper + + return decorator diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 11266f85..ff35d428 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -5,19 +5,24 @@ import subprocess import docker from docker.errors import ImageNotFound +from autogpt.commands.command import command +from autogpt.config import Config from autogpt.workspace import WORKSPACE_PATH, path_in_workspace +CFG = Config() -def execute_python_file(file: str) -> str: + +@command("execute_python_file", "Execute Python File", '"filename": ""') +def execute_python_file(filename: str) -> str: """Execute a Python file in a Docker container and return the output Args: - file (str): The name of the file to execute + filename (str): The name of the file to execute Returns: str: The output of the file """ - + file = filename print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'") if not file.endswith(".py"): @@ -94,6 +99,15 @@ def execute_python_file(file: str) -> str: return f"Error: {str(e)}" +@command( + "execute_shell", + "Execute Shell Command, non-interactive commands only", + '"command_line": ""', + CFG.execute_local_commands, + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction.", +) def execute_shell(command_line: str) -> str: """Execute a shell command and return the output @@ -103,6 +117,13 @@ def execute_shell(command_line: str) -> str: Returns: str: The output of the command """ + + if not CFG.execute_local_commands: + return ( + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction." + ) current_dir = os.getcwd() # Change dir into workspace if necessary if str(WORKSPACE_PATH) not in current_dir: @@ -117,9 +138,16 @@ def execute_shell(command_line: str) -> str: os.chdir(current_dir) - return output - +@command( + "execute_shell_popen", + "Execute Shell Command, non-interactive commands only", + '"command_line": ""', + CFG.execute_local_commands, + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction.", +) def execute_shell_popen(command_line) -> str: """Execute a shell command with Popen and returns an english description of the event and the process id diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index ad145ec9..b73fb987 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -9,10 +9,13 @@ import requests from colorama import Back, Fore from requests.adapters import HTTPAdapter, Retry +from autogpt.commands.command import command +from autogpt.config import Config from autogpt.spinner import Spinner from autogpt.utils import readable_file_size from autogpt.workspace import WORKSPACE_PATH, path_in_workspace +CFG = Config() LOG_FILE = "file_logger.txt" LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE @@ -81,6 +84,7 @@ def split_file( start += max_length - overlap +@command("read_file", "Read file", '"filename": ""') def read_file(filename: str) -> str: """Read a file and return the contents @@ -133,6 +137,7 @@ def ingest_file( print(f"Error while ingesting file '{filename}': {str(e)}") +@command("write_to_file", "Write to file", '"filename": "", "text": ""') def write_to_file(filename: str, text: str) -> str: """Write text to a file @@ -158,6 +163,9 @@ def write_to_file(filename: str, text: str) -> str: return f"Error: {str(e)}" +@command( + "append_to_file", "Append to file", '"filename": "", "text": ""' +) def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: """Append text to a file @@ -181,6 +189,7 @@ def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: return f"Error: {str(e)}" +@command("delete_file", "Delete file", '"filename": ""') def delete_file(filename: str) -> str: """Delete a file @@ -201,6 +210,7 @@ def delete_file(filename: str) -> str: return f"Error: {str(e)}" +@command("search_files", "Search Files", '"directory": ""') def search_files(directory: str) -> list[str]: """Search for files in a directory @@ -227,6 +237,13 @@ def search_files(directory: str) -> list[str]: return found_files +@command( + "download_file", + "Search Files", + '"url": "", "filename": ""', + CFG.allow_downloads, + "Error: You do not have user authorization to download files locally.", +) def download_file(url, filename): """Downloads a file Args: diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 028f3b8d..1fb99e5b 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -1,26 +1,34 @@ """Git operations for autogpt""" -import git +from git.repo import Repo +from autogpt.commands.command import command from autogpt.config import Config from autogpt.workspace import path_in_workspace CFG = Config() -def clone_repository(repo_url: str, clone_path: str) -> str: +@command( + "clone_repository", + "Clone Repositoryy", + '"repository_url": "", "clone_path": ""', + CFG.github_username and CFG.github_api_key, + "Configure github_username and github_api_key.", +) +def clone_repository(repository_url: str, clone_path: str) -> str: """Clone a GitHub repository locally Args: - repo_url (str): The URL of the repository to clone + repository_url (str): The URL of the repository to clone clone_path (str): The path to clone the repository to Returns: str: The result of the clone operation""" - split_url = repo_url.split("//") + split_url = repository_url.split("//") auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) safe_clone_path = path_in_workspace(clone_path) try: - git.Repo.clone_from(auth_repo_url, safe_clone_path) - return f"""Cloned {repo_url} to {safe_clone_path}""" + Repo.clone_from(auth_repo_url, safe_clone_path) + return f"""Cloned {repository_url} to {safe_clone_path}""" except Exception as e: return f"Error: {str(e)}" diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py index 7d38ce75..fcc1a9f4 100644 --- a/autogpt/commands/google_search.py +++ b/autogpt/commands/google_search.py @@ -5,11 +5,13 @@ import json from duckduckgo_search import ddg +from autogpt.commands.command import command from autogpt.config import Config CFG = Config() +@command("google", "Google Search", '"query": ""', not CFG.google_api_key) def google_search(query: str, num_results: int = 8) -> str: """Return the results of a Google search @@ -31,9 +33,17 @@ def google_search(query: str, num_results: int = 8) -> str: for j in results: search_results.append(j) - return json.dumps(search_results, ensure_ascii=False, indent=4) + results = json.dumps(search_results, ensure_ascii=False, indent=4) + return safe_google_results(results) +@command( + "google", + "Google Search", + '"query": ""', + bool(CFG.google_api_key), + "Configure google_api_key.", +) def google_official_search(query: str, num_results: int = 8) -> str | list[str]: """Return the results of a Google search using the official Google API @@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]: return "Error: The provided Google API key is invalid or missing." else: return f"Error: {e}" + # google_result can be a list or a string depending on the search results # Return the list of search result URLs - return search_results_links + return safe_google_results(search_results_links) + + +def safe_google_results(results: str | list) -> str: + """ + Return the results of a google search in a safe format. + + Args: + results (str | list): The search results. + + Returns: + str: The results of the search. + """ + if isinstance(results, list): + safe_message = json.dumps( + [result.enocde("utf-8", "ignore") for result in results] + ) + else: + safe_message = results.encode("utf-8", "ignore").decode("utf-8") + return safe_message diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 0809fcdd..60cdaec0 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -1,6 +1,5 @@ """ Image Generation Module for AutoGPT.""" import io -import os.path import uuid from base64 import b64decode @@ -8,12 +7,14 @@ import openai import requests from PIL import Image +from autogpt.commands.command import command from autogpt.config import Config from autogpt.workspace import path_in_workspace CFG = Config() +@command("generate_image", "Generate Image", '"prompt": ""', CFG.image_provider) def generate_image(prompt: str, size: int = 256) -> str: """Generate an image from a prompt. diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py index e3440d8b..41a369b4 100644 --- a/autogpt/commands/improve_code.py +++ b/autogpt/commands/improve_code.py @@ -2,9 +2,15 @@ from __future__ import annotations import json +from autogpt.commands.command import command from autogpt.llm_utils import call_ai_function +@command( + "improve_code", + "Get Improved Code", + '"suggestions": "", "code": ""', +) def improve_code(suggestions: list[str], code: str) -> str: """ A function that takes in code and suggestions and returns a response from create diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py index 3eaed36e..f0502271 100644 --- a/autogpt/commands/twitter.py +++ b/autogpt/commands/twitter.py @@ -1,12 +1,30 @@ +"""A module that contains a command to send a tweet.""" import os import tweepy from dotenv import load_dotenv +from autogpt.commands.command import command + load_dotenv() -def send_tweet(tweet_text): +@command( + "send_tweet", + "Send Tweet", + '"tweet_text": ""', +) +def send_tweet(tweet_text: str) -> str: + """ + A function that takes in a string and returns a response from create chat + completion api call. + + Args: + tweet_text (str): Text to be tweeted. + + Returns: + A result from sending the tweet. + """ consumer_key = os.environ.get("TW_CONSUMER_KEY") consumer_secret = os.environ.get("TW_CONSUMER_SECRET") access_token = os.environ.get("TW_ACCESS_TOKEN") @@ -21,6 +39,6 @@ def send_tweet(tweet_text): # Send tweet try: api.update_status(tweet_text) - print("Tweet sent successfully!") + return "Tweet sent successfully!" except tweepy.TweepyException as e: - print("Error sending tweet: {}".format(e.reason)) + return f"Error sending tweet: {e.reason}" diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 11bdfeb1..e0e0d70a 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -18,6 +18,7 @@ from webdriver_manager.chrome import ChromeDriverManager from webdriver_manager.firefox import GeckoDriverManager import autogpt.processing.text as summary +from autogpt.commands.command import command from autogpt.config import Config from autogpt.processing.html import extract_hyperlinks, format_hyperlinks @@ -25,6 +26,11 @@ FILE_DIR = Path(__file__).parent.parent CFG = Config() +@command( + "browse_website", + "Browse Website", + '"url": "", "question": ""', +) def browse_website(url: str, question: str) -> tuple[str, WebDriver]: """Browse a website and return the answer and links to the user diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py index 35a08653..91cd9304 100644 --- a/autogpt/commands/write_tests.py +++ b/autogpt/commands/write_tests.py @@ -3,9 +3,15 @@ from __future__ import annotations import json +from autogpt.commands.command import command from autogpt.llm_utils import call_ai_function +@command( + "write_tests", + "Write Tests", + '"code": "", "focus": ""', +) def write_tests(code: str, focus: list[str]) -> str: """ A function that takes in code and focus topics and returns a response from create diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index d50c30be..1e48ab4d 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -5,10 +5,16 @@ A module that contains the AIConfig class object that contains the configuration from __future__ import annotations import os -from typing import Type +from pathlib import Path +from typing import Optional, Type import yaml +from autogpt.prompts.generator import PromptGenerator + +# Soon this will go in a folder where it remembers more stuff about the run(s) +SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml") + class AIConfig: """ @@ -38,9 +44,8 @@ class AIConfig: self.ai_name = ai_name self.ai_role = ai_role self.ai_goals = ai_goals - - # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml") + self.prompt_generator = None + self.command_registry = None @staticmethod def load(config_file: str = SAVE_FILE) -> "AIConfig": @@ -89,7 +94,9 @@ class AIConfig: with open(config_file, "w", encoding="utf-8") as file: yaml.dump(config, file, allow_unicode=True) - def construct_full_prompt(self) -> str: + def construct_full_prompt( + self, prompt_generator: Optional[PromptGenerator] = None + ) -> str: """ Returns a prompt to the user with the class information in an organized fashion. @@ -108,14 +115,25 @@ class AIConfig: "" ) - from autogpt.prompt import get_prompt + from autogpt.config import Config + from autogpt.prompts.prompt import build_default_prompt_generator + + cfg = Config() + if prompt_generator is None: + prompt_generator = build_default_prompt_generator() + prompt_generator.goals = self.ai_goals + prompt_generator.name = self.ai_name + prompt_generator.role = self.ai_role + prompt_generator.command_registry = self.command_registry + for plugin in cfg.plugins: + if not plugin.can_handle_post_prompt(): + continue + prompt_generator = plugin.post_prompt(prompt_generator) # Construct full prompt - full_prompt = ( - f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" - ) + full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n" for i, goal in enumerate(self.ai_goals): full_prompt += f"{i+1}. {goal}\n" - - full_prompt += f"\n\n{get_prompt()}" + self.prompt_generator = prompt_generator + full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}" return full_prompt diff --git a/autogpt/config/config.py b/autogpt/config/config.py index c284a4ac..801df2bb 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -1,8 +1,10 @@ """Configuration class to store the state of bools for different scripts access.""" import os +from typing import List import openai import yaml +from auto_gpt_plugin_template import AutoGPTPluginTemplate from colorama import Fore from dotenv import load_dotenv @@ -123,6 +125,18 @@ class Config(metaclass=Singleton): # Initialize the OpenAI API client openai.api_key = self.openai_api_key + self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins") + self.plugins: List[AutoGPTPluginTemplate] = [] + self.plugins_openai = [] + + plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS") + if plugins_allowlist: + plugins_allowlist = plugins_allowlist.split(",") + self.plugins_whitelist = plugins_allowlist + else: + self.plugins_whitelist = [] + self.plugins_blacklist = [] + def get_azure_deployment_id_for_model(self, model: str) -> str: """ Returns the relevant deployment id for the model specified. @@ -241,6 +255,10 @@ class Config(metaclass=Singleton): """Set the debug mode value.""" self.debug_mode = value + def set_plugins(self, value: list) -> None: + """Set the plugins value.""" + self.plugins = value + def check_openai_api_key() -> None: """Check if the OpenAI API key is set in config.py or as an environment variable.""" diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 821820ff..8b85959c 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -1,7 +1,7 @@ from __future__ import annotations import time -from ast import List +from typing import List, Optional import openai from colorama import Fore, Style @@ -9,6 +9,7 @@ from openai.error import APIError, RateLimitError from autogpt.config import Config from autogpt.logs import logger +from autogpt.types.openai import Message CFG = Config() @@ -37,8 +38,8 @@ def call_ai_function( # For each arg, if any are None, convert to "None": args = [str(arg) if arg is not None else "None" for arg in args] # parse args to comma separated string - args = ", ".join(args) - messages = [ + args: str = ", ".join(args) + messages: List[Message] = [ { "role": "system", "content": f"You are now the following python function: ```# {description}" @@ -53,15 +54,15 @@ def call_ai_function( # Overly simple abstraction until we create something better # simple retry mechanism when getting a rate error or a bad gateway def create_chat_completion( - messages: list, # type: ignore - model: str | None = None, + messages: List[Message], # type: ignore + model: Optional[str] = None, temperature: float = CFG.temperature, - max_tokens: int | None = None, + max_tokens: Optional[int] = None, ) -> str: """Create a chat completion using the OpenAI API Args: - messages (list[dict[str, str]]): The messages to send to the chat completion + messages (List[Message]): The messages to send to the chat completion model (str, optional): The model to use. Defaults to None. temperature (float, optional): The temperature to use. Defaults to 0.9. max_tokens (int, optional): The max tokens to use. Defaults to None. @@ -69,15 +70,28 @@ def create_chat_completion( Returns: str: The response from the chat completion """ - response = None num_retries = 10 warned_user = False if CFG.debug_mode: print( - Fore.GREEN - + f"Creating chat completion with model {model}, temperature {temperature}," - f" max_tokens {max_tokens}" + Fore.RESET + f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" ) + for plugin in CFG.plugins: + if plugin.can_handle_chat_completion( + messages=messages, + model=model, + temperature=temperature, + max_tokens=max_tokens, + ): + message = plugin.handle_chat_completion( + messages=messages, + model=model, + temperature=temperature, + max_tokens=max_tokens, + ) + if message is not None: + return message + response = None for attempt in range(num_retries): backoff = 2 ** (attempt + 2) try: @@ -100,8 +114,7 @@ def create_chat_completion( except RateLimitError: if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"Reached rate limit, passing..." + Fore.RESET, + f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" ) if not warned_user: logger.double_check( @@ -110,16 +123,14 @@ def create_chat_completion( ) warned_user = True except APIError as e: - if e.http_status == 502: - pass - else: + if e.http_status != 502: raise if attempt == num_retries - 1: raise if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", ) time.sleep(backoff) if response is None: @@ -134,8 +145,12 @@ def create_chat_completion( raise RuntimeError(f"Failed to get response after {num_retries} retries") else: quit(1) - - return response.choices[0].message["content"] + resp = response.choices[0].message["content"] + for plugin in CFG.plugins: + if not plugin.can_handle_on_response(): + continue + resp = plugin.on_response(resp) + return resp def create_embedding_with_ada(text) -> list: @@ -158,15 +173,13 @@ def create_embedding_with_ada(text) -> list: except RateLimitError: pass except APIError as e: - if e.http_status == 502: - pass - else: + if e.http_status != 502: raise if attempt == num_retries - 1: raise if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", ) time.sleep(backoff) diff --git a/autogpt/models/base_open_ai_plugin.py b/autogpt/models/base_open_ai_plugin.py new file mode 100644 index 00000000..046295c0 --- /dev/null +++ b/autogpt/models/base_open_ai_plugin.py @@ -0,0 +1,199 @@ +"""Handles loading of plugins.""" +from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar + +from auto_gpt_plugin_template import AutoGPTPluginTemplate + +PromptGenerator = TypeVar("PromptGenerator") + + +class Message(TypedDict): + role: str + content: str + + +class BaseOpenAIPlugin(AutoGPTPluginTemplate): + """ + This is a BaseOpenAIPlugin class for generating Auto-GPT plugins. + """ + + def __init__(self, manifests_specs_clients: dict): + # super().__init__() + self._name = manifests_specs_clients["manifest"]["name_for_model"] + self._version = manifests_specs_clients["manifest"]["schema_version"] + self._description = manifests_specs_clients["manifest"]["description_for_model"] + self._client = manifests_specs_clients["client"] + self._manifest = manifests_specs_clients["manifest"] + self._openapi_spec = manifests_specs_clients["openapi_spec"] + + def can_handle_on_response(self) -> bool: + """This method is called to check that the plugin can + handle the on_response method. + Returns: + bool: True if the plugin can handle the on_response method.""" + return False + + def on_response(self, response: str, *args, **kwargs) -> str: + """This method is called when a response is received from the model.""" + return response + + def can_handle_post_prompt(self) -> bool: + """This method is called to check that the plugin can + handle the post_prompt method. + Returns: + bool: True if the plugin can handle the post_prompt method.""" + return False + + def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: + """This method is called just after the generate_prompt is called, + but actually before the prompt is generated. + Args: + prompt (PromptGenerator): The prompt generator. + Returns: + PromptGenerator: The prompt generator. + """ + return prompt + + def can_handle_on_planning(self) -> bool: + """This method is called to check that the plugin can + handle the on_planning method. + Returns: + bool: True if the plugin can handle the on_planning method.""" + return False + + def on_planning( + self, prompt: PromptGenerator, messages: List[Message] + ) -> Optional[str]: + """This method is called before the planning chat completion is done. + Args: + prompt (PromptGenerator): The prompt generator. + messages (List[str]): The list of messages. + """ + pass + + def can_handle_post_planning(self) -> bool: + """This method is called to check that the plugin can + handle the post_planning method. + Returns: + bool: True if the plugin can handle the post_planning method.""" + return False + + def post_planning(self, response: str) -> str: + """This method is called after the planning chat completion is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the pre_instruction method. + Returns: + bool: True if the plugin can handle the pre_instruction method.""" + return False + + def pre_instruction(self, messages: List[Message]) -> List[Message]: + """This method is called before the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + List[Message]: The resulting list of messages. + """ + return messages + + def can_handle_on_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the on_instruction method. + Returns: + bool: True if the plugin can handle the on_instruction method.""" + return False + + def on_instruction(self, messages: List[Message]) -> Optional[str]: + """This method is called when the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + Optional[str]: The resulting message. + """ + pass + + def can_handle_post_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the post_instruction method. + Returns: + bool: True if the plugin can handle the post_instruction method.""" + return False + + def post_instruction(self, response: str) -> str: + """This method is called after the instruction chat is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_command(self) -> bool: + """This method is called to check that the plugin can + handle the pre_command method. + Returns: + bool: True if the plugin can handle the pre_command method.""" + return False + + def pre_command( + self, command_name: str, arguments: Dict[str, Any] + ) -> Tuple[str, Dict[str, Any]]: + """This method is called before the command is executed. + Args: + command_name (str): The command name. + arguments (Dict[str, Any]): The arguments. + Returns: + Tuple[str, Dict[str, Any]]: The command name and the arguments. + """ + return command_name, arguments + + def can_handle_post_command(self) -> bool: + """This method is called to check that the plugin can + handle the post_command method. + Returns: + bool: True if the plugin can handle the post_command method.""" + return False + + def post_command(self, command_name: str, response: str) -> str: + """This method is called after the command is executed. + Args: + command_name (str): The command name. + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_chat_completion( + self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int + ) -> bool: + """This method is called to check that the plugin can + handle the chat_completion method. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + bool: True if the plugin can handle the chat_completion method.""" + return False + + def handle_chat_completion( + self, messages: List[Message], model: str, temperature: float, max_tokens: int + ) -> str: + """This method is called when the chat completion is done. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + str: The resulting response. + """ + pass diff --git a/autogpt/plugins.py b/autogpt/plugins.py new file mode 100644 index 00000000..b536acbd --- /dev/null +++ b/autogpt/plugins.py @@ -0,0 +1,265 @@ +"""Handles loading of plugins.""" + +import importlib +import json +import os +import zipfile +from pathlib import Path +from typing import List, Optional, Tuple +from urllib.parse import urlparse +from zipimport import zipimporter + +import openapi_python_client +import requests +from auto_gpt_plugin_template import AutoGPTPluginTemplate +from openapi_python_client.cli import Config as OpenAPIConfig + +from autogpt.config import Config +from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin + + +def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]: + """ + Inspect a zipfile for a module. + + Args: + zip_path (str): Path to the zipfile. + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + Optional[str]: The name of the module if found, else None. + """ + with zipfile.ZipFile(zip_path, "r") as zfile: + for name in zfile.namelist(): + if name.endswith("__init__.py"): + if debug: + print(f"Found module '{name}' in the zipfile at: {name}") + return name + if debug: + print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") + return None + + +def write_dict_to_json_file(data: dict, file_path: str) -> None: + """ + Write a dictionary to a JSON file. + Args: + data (dict): Dictionary to write. + file_path (str): Path to the file. + """ + with open(file_path, "w") as file: + json.dump(data, file, indent=4) + + +def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict: + """ + Fetch the manifest for a list of OpenAI plugins. + Args: + urls (List): List of URLs to fetch. + Returns: + dict: per url dictionary of manifest and spec. + """ + # TODO add directory scan + manifests = {} + for url in cfg.plugins_openai: + openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}" + create_directory_if_not_exists(openai_plugin_client_dir) + if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"): + try: + response = requests.get(f"{url}/.well-known/ai-plugin.json") + if response.status_code == 200: + manifest = response.json() + if manifest["schema_version"] != "v1": + print( + f"Unsupported manifest version: {manifest['schem_version']} for {url}" + ) + continue + if manifest["api"]["type"] != "openapi": + print( + f"Unsupported API type: {manifest['api']['type']} for {url}" + ) + continue + write_dict_to_json_file( + manifest, f"{openai_plugin_client_dir}/ai-plugin.json" + ) + else: + print(f"Failed to fetch manifest for {url}: {response.status_code}") + except requests.exceptions.RequestException as e: + print(f"Error while requesting manifest from {url}: {e}") + else: + print(f"Manifest for {url} already exists") + manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json")) + if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"): + openapi_spec = openapi_python_client._get_document( + url=manifest["api"]["url"], path=None, timeout=5 + ) + write_dict_to_json_file( + openapi_spec, f"{openai_plugin_client_dir}/openapi.json" + ) + else: + print(f"OpenAPI spec for {url} already exists") + openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json")) + manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec} + return manifests + + +def create_directory_if_not_exists(directory_path: str) -> bool: + """ + Create a directory if it does not exist. + Args: + directory_path (str): Path to the directory. + Returns: + bool: True if the directory was created, else False. + """ + if not os.path.exists(directory_path): + try: + os.makedirs(directory_path) + print(f"Created directory: {directory_path}") + return True + except OSError as e: + print(f"Error creating directory {directory_path}: {e}") + return False + else: + print(f"Directory {directory_path} already exists") + return True + + +def initialize_openai_plugins( + manifests_specs: dict, cfg: Config, debug: bool = False +) -> dict: + """ + Initialize OpenAI plugins. + Args: + manifests_specs (dict): per url dictionary of manifest and spec. + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + dict: per url dictionary of manifest, spec and client. + """ + openai_plugins_dir = f"{cfg.plugins_dir}/openai" + if create_directory_if_not_exists(openai_plugins_dir): + for url, manifest_spec in manifests_specs.items(): + openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}" + _meta_option = (openapi_python_client.MetaType.SETUP,) + _config = OpenAPIConfig( + **{ + "project_name_override": "client", + "package_name_override": "client", + } + ) + prev_cwd = Path.cwd() + os.chdir(openai_plugin_client_dir) + Path("ai-plugin.json") + if not os.path.exists("client"): + client_results = openapi_python_client.create_new_client( + url=manifest_spec["manifest"]["api"]["url"], + path=None, + meta=_meta_option, + config=_config, + ) + if client_results: + print( + f"Error creating OpenAPI client: {client_results[0].header} \n" + f" details: {client_results[0].detail}" + ) + continue + spec = importlib.util.spec_from_file_location( + "client", "client/client/client.py" + ) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + client = module.Client(base_url=url) + os.chdir(prev_cwd) + manifest_spec["client"] = client + return manifests_specs + + +def instantiate_openai_plugin_clients( + manifests_specs_clients: dict, cfg: Config, debug: bool = False +) -> dict: + """ + Instantiates BaseOpenAIPlugin instances for each OpenAI plugin. + Args: + manifests_specs_clients (dict): per url dictionary of manifest, spec and client. + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + plugins (dict): per url dictionary of BaseOpenAIPlugin instances. + + """ + plugins = {} + for url, manifest_spec_client in manifests_specs_clients.items(): + plugins[url] = BaseOpenAIPlugin(manifest_spec_client) + return plugins + + +def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]: + """Scan the plugins directory for plugins and loads them. + + Args: + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + List[Tuple[str, Path]]: List of plugins. + """ + loaded_plugins = [] + # Generic plugins + plugins_path_path = Path(cfg.plugins_dir) + for plugin in plugins_path_path.glob("*.zip"): + if module := inspect_zip_for_module(str(plugin), debug): + plugin = Path(plugin) + module = Path(module) + if debug: + print(f"Plugin: {plugin} Module: {module}") + zipped_package = zipimporter(str(plugin)) + zipped_module = zipped_package.load_module(str(module.parent)) + for key in dir(zipped_module): + if key.startswith("__"): + continue + a_module = getattr(zipped_module, key) + a_keys = dir(a_module) + if ( + "_abc_impl" in a_keys + and a_module.__name__ != "AutoGPTPluginTemplate" + and blacklist_whitelist_check(a_module.__name__, cfg) + ): + loaded_plugins.append(a_module()) + # OpenAI plugins + if cfg.plugins_openai: + manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg) + if manifests_specs.keys(): + manifests_specs_clients = initialize_openai_plugins( + manifests_specs, cfg, debug + ) + for url, openai_plugin_meta in manifests_specs_clients.items(): + if blacklist_whitelist_check(url, cfg): + plugin = BaseOpenAIPlugin(openai_plugin_meta) + loaded_plugins.append(plugin) + + if loaded_plugins: + print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------") + for plugin in loaded_plugins: + print(f"{plugin._name}: {plugin._version} - {plugin._description}") + return loaded_plugins + + +def blacklist_whitelist_check(plugin_name: str, cfg: Config) -> bool: + """Check if the plugin is in the whitelist or blacklist. + + Args: + plugin_name (str): Name of the plugin. + cfg (Config): Config object. + + Returns: + True or False + """ + if plugin_name in cfg.plugins_blacklist: + return False + if plugin_name in cfg.plugins_whitelist: + return True + ack = input( + f"WARNNG Plugin {plugin_name} found. But not in the" + " whitelist... Load? (y/n): " + ) + return ack.lower() == "y" diff --git a/autogpt/prompts/__init__.py b/autogpt/prompts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/promptgenerator.py b/autogpt/prompts/generator.py similarity index 78% rename from autogpt/promptgenerator.py rename to autogpt/prompts/generator.py index 0ad7046a..c9a441d8 100644 --- a/autogpt/promptgenerator.py +++ b/autogpt/prompts/generator.py @@ -1,8 +1,6 @@ """ A module for generating custom prompt strings.""" -from __future__ import annotations - import json -from typing import Any +from typing import Any, Callable, Dict, List, Optional class PromptGenerator: @@ -20,6 +18,10 @@ class PromptGenerator: self.commands = [] self.resources = [] self.performance_evaluation = [] + self.goals = [] + self.command_registry = None + self.name = "Bob" + self.role = "AI" self.response_format = { "thoughts": { "text": "thought", @@ -40,7 +42,13 @@ class PromptGenerator: """ self.constraints.append(constraint) - def add_command(self, command_label: str, command_name: str, args=None) -> None: + def add_command( + self, + command_label: str, + command_name: str, + args=None, + function: Optional[Callable] = None, + ) -> None: """ Add a command to the commands list with a label, name, and optional arguments. @@ -49,6 +57,8 @@ class PromptGenerator: command_name (str): The name of the command. args (dict, optional): A dictionary containing argument names and their values. Defaults to None. + function (callable, optional): A callable function to be called when + the command is executed. Defaults to None. """ if args is None: args = {} @@ -59,11 +69,12 @@ class PromptGenerator: "label": command_label, "name": command_name, "args": command_args, + "function": function, } self.commands.append(command) - def _generate_command_string(self, command: dict[str, Any]) -> str: + def _generate_command_string(self, command: Dict[str, Any]) -> str: """ Generate a formatted string representation of a command. @@ -96,7 +107,7 @@ class PromptGenerator: """ self.performance_evaluation.append(evaluation) - def _generate_numbered_list(self, items: list[Any], item_type="list") -> str: + def _generate_numbered_list(self, items: List[Any], item_type="list") -> str: """ Generate a numbered list from given items based on the item_type. @@ -109,10 +120,16 @@ class PromptGenerator: str: The formatted numbered list. """ if item_type == "command": - return "\n".join( - f"{i+1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ) + command_strings = [] + if self.command_registry: + command_strings += [ + str(item) + for item in self.command_registry.commands.values() + if item.enabled + ] + # These are the commands that are added manually, do_nothing and terminate + command_strings += [self._generate_command_string(item) for item in items] + return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings)) else: return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) @@ -134,5 +151,5 @@ class PromptGenerator: f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" "You should only respond in JSON format as described below \nResponse" f" Format: \n{formatted_response_format} \nEnsure the response can be" - " parsed by Python json.loads" + "parsed by Python json.loads" ) diff --git a/autogpt/prompt.py b/autogpt/prompts/prompt.py similarity index 50% rename from autogpt/prompt.py rename to autogpt/prompts/prompt.py index 08754605..79de04ea 100644 --- a/autogpt/prompt.py +++ b/autogpt/prompts/prompt.py @@ -1,17 +1,16 @@ from colorama import Fore -from autogpt.config import Config from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config from autogpt.logs import logger -from autogpt.promptgenerator import PromptGenerator +from autogpt.prompts.generator import PromptGenerator from autogpt.setup import prompt_user from autogpt.utils import clean_input CFG = Config() -def get_prompt() -> str: +def build_default_prompt_generator() -> PromptGenerator: """ This function generates a prompt string that includes various constraints, commands, resources, and performance evaluations. @@ -20,9 +19,6 @@ def get_prompt() -> str: str: The generated prompt string. """ - # Initialize the Config object - cfg = Config() - # Initialize the PromptGenerator object prompt_generator = PromptGenerator() @@ -39,96 +35,12 @@ def get_prompt() -> str: prompt_generator.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' ) - prompt_generator.add_constraint( - "Use subprocesses for commands that will not terminate within a few minutes" - ) # Define the command list commands = [ - ("Google Search", "google", {"input": ""}), - ( - "Browse Website", - "browse_website", - {"url": "", "question": ""}, - ), - ( - "Start GPT Agent", - "start_agent", - {"name": "", "task": "", "prompt": ""}, - ), - ( - "Message GPT Agent", - "message_agent", - {"key": "", "message": ""}, - ), - ("List GPT Agents", "list_agents", {}), - ("Delete GPT Agent", "delete_agent", {"key": ""}), - ( - "Clone Repository", - "clone_repository", - {"repository_url": "", "clone_path": ""}, - ), - ("Write to file", "write_to_file", {"file": "", "text": ""}), - ("Read file", "read_file", {"file": ""}), - ("Append to file", "append_to_file", {"file": "", "text": ""}), - ("Delete file", "delete_file", {"file": ""}), - ("Search Files", "search_files", {"directory": ""}), - ("Analyze Code", "analyze_code", {"code": ""}), - ( - "Get Improved Code", - "improve_code", - {"suggestions": "", "code": ""}, - ), - ( - "Write Tests", - "write_tests", - {"code": "", "focus": ""}, - ), - ("Execute Python File", "execute_python_file", {"file": ""}), - ("Generate Image", "generate_image", {"prompt": ""}), - ("Send Tweet", "send_tweet", {"text": ""}), - ] - - # Only add the audio to text command if the model is specified - if cfg.huggingface_audio_to_text_model: - commands.append( - ("Convert Audio to text", "read_audio_from_file", {"file": ""}), - ) - - # Only add shell command to the prompt if the AI is allowed to execute it - if cfg.execute_local_commands: - commands.append( - ( - "Execute Shell Command, non-interactive commands only", - "execute_shell", - {"command_line": ""}, - ), - ) - commands.append( - ( - "Execute Shell Command Popen, non-interactive commands only", - "execute_shell_popen", - {"command_line": ""}, - ), - ) - - # Only add the download file command if the AI is allowed to execute it - if cfg.allow_downloads: - commands.append( - ( - "Downloads a file from the internet, and stores it locally", - "download_file", - {"url": "", "file": ""}, - ), - ) - - # Add these command last. - commands.append( ("Do Nothing", "do_nothing", {}), - ) - commands.append( ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), - ) + ] # Add commands to the PromptGenerator object for command_label, command_name, args in commands: @@ -159,12 +71,11 @@ def get_prompt() -> str: "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." ) - - # Generate the prompt string - return prompt_generator.generate_prompt_string() + prompt_generator.add_performance_evaluation("Write all code to a file.") + return prompt_generator -def construct_prompt() -> str: +def construct_main_ai_config() -> AIConfig: """Construct the prompt for the AI to respond to Returns: @@ -196,8 +107,4 @@ Continue (y/n): """ config = prompt_user() config.save(CFG.ai_settings_file) - # Get rid of this global: - global ai_name - ai_name = config.ai_name - - return config.construct_full_prompt() + return config diff --git a/autogpt/token_counter.py b/autogpt/token_counter.py index 338fe6be..2d50547b 100644 --- a/autogpt/token_counter.py +++ b/autogpt/token_counter.py @@ -1,13 +1,16 @@ """Functions for counting the number of tokens in a message or string.""" from __future__ import annotations +from typing import List + import tiktoken from autogpt.logs import logger +from autogpt.types.openai import Message def count_message_tokens( - messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301" + messages: List[Message], model: str = "gpt-3.5-turbo-0301" ) -> int: """ Returns the number of tokens used by a list of messages. diff --git a/autogpt/types/openai.py b/autogpt/types/openai.py new file mode 100644 index 00000000..2af85785 --- /dev/null +++ b/autogpt/types/openai.py @@ -0,0 +1,9 @@ +"""Type helpers for working with the OpenAI library""" +from typing import TypedDict + + +class Message(TypedDict): + """OpenAI Message object containing a role and the message content""" + + role: str + content: str diff --git a/autogpt/utils.py b/autogpt/utils.py index e93d5ac7..dffd0662 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -3,7 +3,7 @@ import os import requests import yaml from colorama import Fore -from git import Repo +from git.repo import Repo def clean_input(prompt: str = ""): diff --git a/plugin.png b/plugin.png new file mode 100644 index 0000000000000000000000000000000000000000..865ce3c922d7783efde0ebaa273519eca202d654 GIT binary patch literal 33356 zcmd42Ra9I-*DVTxU_pZh4M76|f;&xc3GVLh?vg+P!QGt%clXBK-QC?9X!LHr@0@?! zhx>TXxc8w)Lv`(vU8~lbbJnUq^0MM+D1<05FfeG65+aH)FtB~VrvwQBC@C1TrU$-| z?IkpvU|`U@Uq7(%^yoxDA)>RSj40wB{5x2V_Zd!f3jZk*Rd@d8;%si`2vb-#ECCdu zIs-)_CQe3<7WU2-cD69s$mA42Im&Cfu${e!qlKBdGfWA_Fb`0Q{-07u6NA^PADu01 zOkmh=F_3{@kY9gMvNLsdGjKG4(XoGdt^V#mjaxVwSsMUV9bxL*Ya)SSq}O6K14mm6 zTQit;qTL{%4Dr9uyEqz|z(8ICj{bKwVLKZe6I*APo8bg7P=@eYP1(ZP+61QHbdm-J z<|B-x$Twy8wBr>IZ{?Y%my0>$WaI_)Duee473GrtUylbPq~)`0aYeofzAb*z)Ou?H zhSmn#)IQbKWg)=VH%ZGYYJU7!h5d$7aGug%=uIFyeX=CW?QNeiW3?&8fH9QMN5u4$ zV{et0^~`IO|2Bd;%O>e3rVtXjGU+L!Kk&!IyaX2tc_5}MCGjudU*7i-z>k3>$l^d* zXGCBwP-YCX5efbj)lgw#wLY1Gb8Gkd-HuZ9>w-KyK z9t(1>r>P>&;CGX4B9w6v_emF7GB^A}u@n_9gtu=u4t69J6tbH`CrfZ~@iq?*B<1Dh zLXhA zohkE=A2vm4vNwOcYT}-hRv^~+_+kzkQx=m}5w;0ye|5BznnM+AM(BuL1O(y|;~Sd^ z7=$!DKJbub(&fkcY?1;UD!cJ(iQXyLS~eXm5XYqqTHtQ%of;}@RUeiXO^g?eC9=`v zIV#n9vJDD2V7MJ;+K{gFD-90JC2^I<#mIc_i+|6d2;)!wJ#v61P!;stEUSxQS&^Yj z`2C+_#6&ty&-A`0;@I;Y3=;+#jxt-bSf*{F^b0SzSORf5j; z{y^uKLhJdxHktLYq>3^5WOfL=1Y|G9|Ix0>qTT0XI?=!7G* zTql_VxTee%Wo8kh=c%NZnRL01b$nD>bAOIVyH2O>HS(viB-=D+xopv^n{_4B1Nh(9 zrNl?fDf0qG?g=|BIwK4Jc4-3}7(i1s9b|IDwW4US!nuF^pxkA3w`MORL-42v0p$I$ zRiTKVo0B3E3)4Od3mE8@A|G}wK<4f}-GtNL`B>G_&GKRjZudKsbntY|WVuB+G9)5k_kJbgyF zbhV$6&%QK~f&6*#FIE%vE>?$fmo*3PSsX49cYQ8;zZcoY5%tg)w{2Fdgc596#?Y*O zwdolszkLj=szKnFn)_W#NkO z(WXxURWP%Gn=7=h{|_=BX3{`!troXuk3XYhQedtVfAU4&&ZD8BoY0Vyor?^sjzFz{zP7B1{ zHqPbyug1dYVf6Yl=cWaniP^S2ZTV{H+VTkog)>ZNjj17pG+zTB_(CSlovhW zGYKi8znI12hB%`;|(2y6WriOfTT;XK)blOwv%y~z8A{jc+lOQsPexyZ}+)X*6Id^{;Go~G0R zIH8S0($zvb02@4?9mH=}!sbekWfjpT@^z^=}$z7tKe8K#iwp zzPAg1Qfvu0uPEC5a8?s^GXBhJW|Y(Mjp zs&`K*A->;0#7Q?Zqw@Jie6z4^=i#hFdG=)Z@$XZ5{&UTWLmuhv)!<_$mX<@MlfsRw zfG_0Cdv80O&Gr$#S1xJ&x>kw9#?WbyI)95W^=?$`I@`pUPBhNT-2LV0)&uy!s)K!7 zyRPxQ-%P`a$WWAXYa-@S!KW8`2-ca|?g{KS^xsbp{&YYB6TYx1ExA#)W6Ou(Y@e)W zY9(B1eCxTB1g}R|-0ZXI(=JQ@e%eVR$uRx(OnUwN41^-mD<=!0{R@M-1 zm6@!?%{=AmyuF_99<`$Esv<6-pT$7hXFlm zUW-ZIlb+@ut9JiwRP1`mi|n1D0FM(5m3-rOcNO$^&i(@>(;h}9aW)d|;9VT(i-xQt zZt>LCA9!B}Ih@Dy&we_W9BVo^|N6#s*YO=@)+ob;f--Z@vUhq5PmCP{8E72I|AJzt zA?HSiIeW?TGl|P#b{@CoWRBy_ARoAr1gzUPVJ|zT>@Tv zOM^q|l^`SGva6M5$n zP}S-yKDn?C)o<@JE!15pIm1XC696JjEMallc7gR8w;aigCjY3_{rn*Z3qEf6JM2I20a zcpRQ8`exp-dhXL;UtBD40Gm=nyub90l(y9~aCoad4BxT_wJd}D%CaZg)q*kkgPH#z z&nhaFuQAT5+U>b)=iFbo!s3Ssw+q_)hz}?hUY0E$%T$}Ez9RA>o{O+BEN&UQz5UtO zB%COdb#NfzHQ{#)FFSJ50+d8|ww!^!Y)U7B|H!5?Nt2!ZH0}!#sz1aD;zyG;8o`I!jq?L{dezPs^gUXjZ&GvKPQPV zp{-AxBO)Q$Aex9n*HhtDVTgI9*2h-$(@gNMn-N>lBgnk|;A7~)*I{BMbPl@;Ck-{L zxGpYSPRaV#U!etE6#~u-LCbehqwh!ypVV+x-)Wdchc}=QT+e@Ud2E*a$if))BX+mz zP)laJX&zd#!sWk0lXVnq;0%ekcaC$2I70XpRdFg(k!-zw-1bsIJ{QUH)SGjbl?8Ku zVO8%;y)&rVF; z=S}HUM;wb5t6GocTrwpi3VEDhk5g#@pEARW9KrI+@5a1DLBGbu6ljOJ4QfZqjM(UC zafyn>n`dbUKa5(p)NUWA{-TvaVg$b&-z0b4)Dwa}@xhB;a8kv&H7K6W;M zk3?`pH~ie%G!Cx8WKb(=x8!!te2(**j-q?mv`=eUS$9zEQw%Pv&0`X=ikv7t_Za2c zV|;EqT0w=!bGw!JkEK6j+!AmP?*g{tHQKP(FZ}ChLxje3$)Z<>SVl@OW-dlBhy)ex z{BU8RVBWR$R3##vvGq~jZvNY^!T~=v1FRogjr;=Z!HXl;4HJRgvYYkk3B_x;8boR_ zOM4E~tGNLzaEViGarVwNKd!4mx#E!uUy`6Cp#ps0ZW6}S(ZcRVXs>bkDV3PC5p$3Z zu#lJNTz~4pxAA=3^WKrEnqFa{yM;@%6I4DReNJxpL8NLVRi4&(@{cpALGl{yM^5$9 zg2(af055nomtdk1@!oy@ZUrsGKX`rgu5!h@ZMKSmzRj=tD%4;8(G0@IV*`?`?-h?8aV4qJ_V&jC+@i*q^o-N|I8TY71XeFs$bBa?+^*3>p8K0~ zB>YKM5j8YIM;MPHH;tJe5t?c&$F|zVVU}eoRJP{Cp>uaCUJQM5Z397#v+tO8o!O-< z3`l;*Bjh$eU6JR!Xbjwy?|z-UH{J^SQKFdnIAN-YERHBj+1eID`f`g}c_FR-!O4G` zqA?CZVy!&h@)~!rl&}{!et5uro_Gk9^H*J9!O__x{ zTCq=}BZ&&EUK0EsaIL-{G5YqZIG`UJ$>D;*G%?3|t+GmuOp?b1x;5Fs!*TWMHcyQ$ zta0;u2$M$pY!a)2rZ+1h&TXkLw~AOVvO)6?r__U%muEMb2KWBH-J|NACGBSo%f6`C z2+&!bZ}lX*!~6=@1fWS+TXq)l@??FzJOC<9s9j=F^#hCGS`5rJ=!};5f)E+0L{|@6$ZwOyyp% zB3ua?UC*}Sy#vpX&A5Nq-6e14_#|3sYf}3Gd^N&pewAvL%%jiZluHvZAfY?z#&uw( z@or{ewZ2)78fy*AE@eRNbplFVs=MkMhV6DH?C7P${-;N4}m4+$!new4C@ypT^<3jr&$=RgI zaP0DwEvnNZ;XvCp1MelNvGq|1WCZ$d?BC&CpFdXE$t|%FY}i~sm&;*YT_1z&qCCBp zHTJy$j*Q?^1-0_>Amnf*h+d7c8DQXI_V!49ktC(ggbOsMQ&A6QQF0iJ@q12f*EwAc zsU+0VWLJCw_-<2&$DTE6=*u@p8pWfY=$gJXYIeG%Q*$<{E4C?EXz^s`>OZG!-(!R${Hr;zepy28lTxioS;}JM zjL-fHFDEDWJ@1l7yU4zGY6@FeQZoKQ?tgTSmSmkkGq14_t8mc~4HdXjjymDDRWa+= zm)E3Qx`9_RdwX+AYFFhw9{@7X+K7x~<=p4r>Qs?sJ=&kfXVB74G!^8JCBR zAEUuV^!tc1<%JG02VcotL*D8UO4q6S%kz!RXQx`a4BqKYHSUtdTihj3kApl3d~V*N z-&~nae#vlPc_z5{*B_qsqdhr|HGNVcL8CgXxkIQF&+k%SD>uNTA zbMw=iCpe3py�Qu_k*VOPd=Te@4YaGyOisrS>;Q>73+o{g2Q!_A}O_Cc-jEj|d#X zZ$b-Q*;o-G)wj-xe_hageEWf2-n6Ije&XhtS zoOhcGqqhjeb3?yeK~`61vxJ3-87#Y}6xz&-f3G_QF%g!FHRagu7^bSDwLzZ8=Vf&rY;qCMq#8R%-t}mMqK5u4EJR4#Zo_Iw za`8|b>|B_(I7cWHeb>1Z*JuW&Cs8-49zJ2U3ho z9%!GLm8GAmUn389nX)41ui_ogizb~&u92?!*XxZU_BhA2>OmagG~S6&<)onVho6e! zI6}d_c{=i#A+h>36@}ka9WhkGXI@Uo7SZh$L!F(_E>*djP~2Q!(Y z2mfITO=C2d?`LJLO9e~_70={MwAU7^RYKd`Vi0HE4fNthktT$4SmA|*KtV@VcewIr zaap6~9k%FKey(nc1(!mvx-q2du(__O18LU+r$>9J2HK`}mZZd-z2p04R&O7D?Fi*& zBY(CMCzTvc411hP z|E0L$qS;5uiVF6i44sFx7Fi7ESH(HfATIV?neM-ahTy76MP#vwoFp9Vf4z!jI*mVb zl8~%ddRIi>=O{3LM(OF{{7lD2efJVMi>piiC`HsW=rC>nqEjUtV@AGmaDr zsk6qbow>dJWnRce7(U~2GWsT_C{>M!mdYkorBr(F6TwcCFtF0KdFw16t!7&^!@s6L z$Sv|^qi-(K2=S-ri=@@quZLggc8@sORU>CYA@N0LrWC+=hjQL0BesN8L|PPcHe86v z1JN7xj5$891U??r%PZu4C;V!Sfg-};BbSZNx3(*v`VRSWsgQ7;vT>vp>DTe8_%DGr zro}|(7vPQGO3P}>>>AqUk6AOA2qt<*d|8p$FUGi7Co`C#pJDnJ3LYQhue<-v27tnMubLw!cPc@3_BsA02oHe{k@-*BgnZ9OH zXsM=TmOp1uK!02lscH13af`qUf3wBYpV6Ap{^qP;RWm4Rp^7Vnq{k-I}rs@;XEzPF&aIN!~jtLEdJxtEGlmEU4kYM-Gi45rf>=c z+|1*hH6=^L5Ew`#)T-`^yxcq&9-scKK$VM)MvFecm5y>RUdz{dhWwi~*T6@Xcr_)X zsh24pSR2o>MYJLoS6i9$pdUKx*Wd7XzZ=fy@seeyN!Wa0iE_zIJLige z(v?gRhN(fo9K|LTkTw=uW63^p(FV$ap8X#m`PzvSHn5@(ohE)-;Na}c{(HgY>|+M? zUgW{d)_*OXM3iIM4j80X?{8hfYa*zRp+PKD&h+5cWy*0dSFDwkY#auf>`z5yNB0OSbe75NeZQf6fQ(8|7s)R94UfWz!*DhG%xkB zUHRuvVHjF^29vJekhGTrahDVI4`@s;Gmz%34@FYO1*tZcB_45{LjEk*4T%$L#-Ox!;9>CF`1caTUE++ zHkt?zs1CD)g$lxQBLeuG1BK-xmyXJq-)RC zW;IDRC7yJ2yV8WtoKC4cW<;Zl5yLjkXFPrMrb;840s~gV?et|V(vF8SD#NdF1shH~ zmsBpEPf-Sz)i!{ceBg2BI%3rM1b8lQEojk|>&c>ykBuFZ-L>%Vg}{^@7c>Mo-BnGO zw*S1Q7xU4y(N2BZjnx4Rm~&gv{JGE~tLb zMYOSDz|;}MIU>Yx>NgrCO*!00ZV!@Vadd1FY`FHji>(1Z$E2O1gu*R7U#N+ql9Gqv z?swn|Rzhw}@TioEjcu5po&p~8J>Ktn)uD*9`l2pi1UW!7IuDk}uN~X!W8ro?RV}TO zuD3N#=PAqp^EvT^PEN82bq=4|=;?tV;x5%yzVeAmnyP~d45`+;)h%Tm@}0M9{|cs_ z@1`Q%t!nq3c|P&F=C~YjaF__h`2v>RqM7)wr}`*pieRf}D4R8+_$ple^v@iDTCN8Q z8k#ULuG>qK)D7+h?$?R!4kzCs-Z{}ybR9e*BO`xL=1}p`RaVw6-@#=_?a*B& zgt^BiyxEaleOn$EmE9&r=^;ZMiW|3bUng#j`ioGuI$59AG#laSEA>&LRJK74%p&i!s9h$v@1 z=NAisR6|s(Orq_Iwrl(6FLcUQ#y7PEz^O71nz}U7jt;mCVG*nO>c?yZ+0gb~<#*tn z86G$Mv0iA2ek9@~&a3`{Kin~2?pL!2Pqxd>NS zx?I^wt2jsfR+$}iHrpDs45~t3xaD~3guQ=M&OpWOp%dRKdy*TL&A#FU>F%c{n+fXE8mw#rCy={dNWPEA80Y6LPwB-t^-~X)?GYk9BEn*?zm_Fup+%M31y^1r zi>O6ibon}5i)+>`pNMh)<+w!yIUMoP_SR`%HX3}EX7Sca4rDj5u!x8Fe)iH$u;FH# z`CvQjagy83+dHq89R5X&X3GZ>LJFAbJhnbLq-hH%=q)HixW#MX!`D#@fq+RpRyR7w zRje!9S?PeMj6`lp4+2F3aYZmi`PX4>cs^I<1Y}H21-y%dOG8b?2V|8@t4{S}mPe%L zOv`eHNOt^9l7CH9cq__mz>@rx5LLU?wP(cy01Fe=>}9FL^V7sS>F|(CSD$yPK$y+h zn6U#Th1ffjhTL#=RX)Epdo7&s*z~!{nC~m1$ux9Xl#F;e4}jWrO-h-1rbF`4&!0=Q zge_f1U(;3!SlJ70W{ejFe=~X`JTC<3t~5^-KO`Evcoa;cbKMkSMJTEO2`w99wdUXW zpCJ0Ct#%%D!4GYNKN;aXN>i?iH)wCM@qxkdZO3{|+KvxsqacwRCQN-Gc5mTeLf;<^ zQ(Ie?V*&}`i(2@5VE{dvUue%D5q-N>UMQhsmjXsBvvOSNgWNx6ykx1S1pj{VYL>NJ z4SNa(n+$woh0{Y>D@JasbRE^hAUo=+al5S*SVR3>& z1K>xtZ@Qo-_g?QME>WXr1*0P}o;tpSt4JXEM-#JAIO0EiClzlUx8<97Lk~Cz!5rkf+rmb;-naqn z#BqdgafaM~t@=E~#-{sdF}3*2$;uLgWEb0N2uJWaxgs+7y;v-}`w79O>gohz;B&rQ zvG7kHzhZ`q2}=&hv^{Q01JwsQ)$kn-HbwJf(lxhC-jRzaF&|{O^N71_94Aj%ue2Zs zOz5-f=Fc#DF2i53<;zuXfCpSIoBq3-QO(EA*|HSIX$i;8VTM2neT0II?6QRf`ITQv z4O>jmi#?8hE~TizJw8b;CHC^#fpv1B85K`i)5Zz`Dz#`?~mt+N@80B z_I^C4(TfAKjBKsj)8nQ3$hEenB;dS!AuKX7^EfXCnOvj`2_4jWGlo(p=F!nJC;fDyuy+i8|rxh|y2yH$70 z9~Q2`_lbwB?d6B?*QU$6;pCE+F~51ga~MR{$%Q%}BJlG_IjmFC>2hmo?V4}bpvqb0 z%R21jP5#BKmfgnQ-n%5LVmt5^N1;XG;o}(J!}rz_fD9=Ab5h2Ksk2@Ei+%)57rd_y ztV-{%W9v->jBKYw#>2bPRL<V5ekFR9v>kL*7SaXB*r>p|NfhktDpX z%2pXcq`tu(a${X{bBXMFUtw_Qoan@KF;@TF0RY$eS|@y-bc}Rf*WJ-(v8jALknGbr zTqqvn_LIt4wZ|3S&QpiASeOa2yRUnQ9Jndn`xdh79l}5d5 zxbT?@j10*Qe5u^nmpdw+^wCEVr{ZaG*2oI8TUS&n1nN~EGV*K#_&Qq~Ry$u^(qh#w zh=h_{H^(!f@n@y*;lH`7Ge6Pr1~D7;jaQq=xun>EpAyoJ(=-|UmS)cYYyDCQXP7MD z!=s^*PfJHv=A1#F$TY=G7gDy;RMgekDRgmjQ!*=gz-l&XKc)ZiK^c$C@4B*!3-FHS zYGvC+_zz1k9vBs^FP*}3;E3V}x2+G&JgG!bNo>zrUHnp@zSrEY6fY!~N3E%@AT zWX@`Tx)B@uETh~18yvUeHj+uNqUX(N&nmwccbtJ%>TDvbxA4 z+y!k&C*YonyjydVlePOt^R(*A!iYc-XW%s{K+`>#D+k#z>FFLp0Oz>a6qP=b)ge4? z;euYu}LY6|qhm|vSI05H42KzN9UrY7MOnNP{hTRfp)9%sPZfq3xM zdprI5cxGu=v(Lq9j_dncwVEB^u?^-TY=9YX8Y>)$>gwv&=DTQd;o;!D2SR~a!GepQ zL%>!;lv1-35KSj0Nd)3#-9;#K`((46_R6eRHaBk^jA+IiL!o$13@4{t>%I1mbK+#I zPZuB9+iPv78FOb$7kboo4US^d5RR-Zq8O^)mMl2 zAq;rhd+(&)%3JSM>U04xxHIFn@f3H_J2w%L@oF`)_T^j`!1bnaIh1Iy{hL({<=F$2 zpUni|geKK0>|t3ardPa741hpD((kJ-B~txu}0zs&KGr z?nh8$e?Pj2m}^C&4{?`RK|PJndGrC}UTxHC+VF&Ri7qr^Y6A`dZ5JTwuy@bR0530p z6e<4i<7ey7KHF;3sMWq)8avnj$wk%-JPgeFb^R(P{U1v@4#qRMPPzyyyOZfF>X%am z5{H1D3btnIiIHKh>cN$*Ab-#anBR4qg7={oH2JCG?4p@J8hRzbV-ziQ0 zm4^=4cO)&$yyWK?@$cOrln^o49v0o#Y-*7$$oy6{5;F4Ja0E1Y`KZ2(72f?NCPM#V ziXh-S1(7VdY&14Av9AbsXl8G+1UL`K(cpI1C&Q*$L2U0ud)eKO?t%fjhi}i&;hnFx zLiD+Zm~F0H>>4!JFMSc_*?tA0swkJ2(|u6Y$des4mh}I=Vw3)J1st4h!1DWmk!#Cx zI^WTvT&By>yk>XjEFhtxQnFOR0X#p}V;eI%ngkGv7af{rOaB~?M+1W!o^shYXVu9? z>jT7&3dqCRpU6yWRz3k=mRCvZRRHnpcmu@QXkXf{4JHTCPgXH8X%S0}UYr*fvFW3k z4WA}CN=D8@|L*e#6B z7(A^Oetx)(aal-jmFs939liO1ne&YLf#ZGdgy8*;r@v(LkrU?n`(OJOx5f=}0WLzk zoY~v)WI=#dH^shaUlIWb99W#R5%c>5vj7F;$%(gi42NgOUk(2KOj?cLof zSDE*f6U<=*9ASVa1cQTzSLr@sXqaoH-eS(|^8ir;2hLGaVG$DxSv!X{rNyLDp;g&d zXQqs>jtKaVJt8`{_Z*OQQRQDUm7-?@bod)oYYd*ImoYGR~S64Ps zmowcde<`WIr~I`w8>&MXKZ1PGs|&;G`d#V0z>;;@Zeb3*=LO((`)l3+qrSYlA`*oL zV%9ae9{u>)8D&60z+w%Lg0j-&=9W~Z<#y#P929B1e;$oH1(ENQwo{=W7&rG?hpych;ZM1k-@_Ro(Vx}Fzm zk9S*<=vzdgfV+HzdxU1<>sBnd0xeF%#q~;+&s{w-Wzt;r8|;pxo3Wk_&}6_As4yra zDfrF$T(JW_!L+@dYuubG9Ri1Vdj_DlrCPn@d?Gdi{}Lq{o4i>28f;Q5^rV1_ima&p zB!c?5+-~#sF5#|mV^gCqR>K?gxBfPPqzIOn_* zItJS7kEU=wnhlRHR=tcgd78&l>w~4`!$*EUI-DORL)ZqK!ksDrPY@l+yk=N_awGI zb9!1**3PH{M={9)M!%=1C&vK@_D>9mi8K4`0A^t^AE zPGE%92LxJnBqb%KWG>$~fURlV9e#pH*OI5e$(g0^F63vL-lTE!ecgc-iy)R{@c$8@es^g%ZL+<$SM7CcDxAjOp4v=n;X^03)amq}1^as6E!qGcz90xLM}0}uhv-;u=Mw3oBQ z)`sZ=3mO3smU@-|$59^%*VnwMZZ*9zkD2zif`15F38>F}FGG8PYBT+Dk2EwS1lqT@ zW;in#|2P&3HXMvYbl;v3*GR3w^Vey1eT4+SF~~aTLv;=MqdHz8Ix)W^j(<$d`?lvx z`qV05FxD0J!y~PMz@eRaf9bi@|LDNy=Uo__65Mn~76NY6faX}Us##C^9f{m_tOM== z3DvnJ4(=mp4TAQD(52eaC^jQwBY|lsC&y4xQL!PzEdW3w%=Y7gsTa+zCzSmBB&Dk2 zLH!YEnstMJ^YR>ThCkG&*O#J8_tBe8-kfx)TT(LG%EKi}At|UukaCx-GLAlvIcjm6 zX?l-L-~xQ<=U#@<0_df1w#CrMi13(rTFb6g^nrIVHa=dou`q45^#OC5F}T(11`}^K z!{z0f(@Zv#8vGH%4-A8F1Od3S{oQ$X5&o*bYMJ`LtK7wGf*p$As#(r%D=ov$F-zPF zSVHnwi}#|U^_IU^H5s=0BGq`;hm&~`zjBr@O3TQY%tfRF`fB`nuzf1^$}Q;5zr5|S zXmwT;X+!sY1S2r3ODRE^KlEw^U5-vpM^EooH$8Pp;WiN_TE#Kg2+@Eu$^}3b_j>?? z-+#dR=Hc<7>o;1DR`I`>pl%Y6YRz417h&dHOx)EXD_Gvgs~S9=R=t{i%l(S8{$Ei`%&~r> zlMRe-P!zM5F1u^LVl@2uCU(HLzgaimHMqReeo3JCFL-4N#}s?zmQ{L;=mgT8Lbql*Bs{NcokhR)AuRa-HlJ26(bq| zCp*j9sK4K*LaAQwjFNK8=dgGaZtpeH;^xJ zH0wJ8xJcOd5w;V|?LpfX!sew{u5Jq2_nRW<;OFOp=1>5pvO7}UzE6-?zR^L^&;W5c zw=LZ3plE@m$tzr}fKb+XC@CrOBK%mTW@Ev^aM0?Z*#l-})A#GNC zuM2o3%!H`k5GdGMNO^0HTI8Do3|dHzGHMwCd=@S3|6R7x(K`at9dbUP0LP2!y9R;* zV!5#4KgA~ed$p%nUyM9TVB@s5;zAq}TovhhwRS5~@ISJ##a+an=DMWLdOtz)w|8}` zK1%~m8059he0g=LemgRUUMpSrABoHruDY#&cW#>V3@5_o}8=AMj8aDVj-dL28M>*+!U5OOE(H;EG<^@&`eBBaBy~=ste}R zd$5Fp5hfa)%oz3Bj!z96wt*XM`T6+1&#-s*_fAkpMxgE~qiTTTk_=2sYpcan9YbCf zOPO9XbqBDo!UG44b84m~R>O(n;uEpnZ@OdoIPKNhUtitk*VM!n&y_V6BLSSENM2hx zuvb|4X!ge`qf~&S7T2M(rqr=7Nct6}D**`^<>lb!(21;Nyoihp-N`rripvNB{)Z=h zcBRK$-uA5Y6`Z>QRyhhV;|WV0#KZ}6WS6WJ`WKC8}3=Eu*}=ZlK-l z`U}v+8q%o09HS$kk$P%v%)gfeE@6sb@03DzC;z=8mICcNrtGUDS^0^>B@}s>tAh8E zqN3VISDFE|tzBN8J9o5@RP_f4W{`WHoDBd|m;&$wxRKJ(Oq+wZUH$n`sv1RHZI?-1 z@bVr4;N~EQKlJYUJot-Bpqa5b_}sVT{I!kNHRLuNd37`4AH69gG{BirRK>3{pZFWwwL473!6QbZ8V{U^ zQb%OA18CZ|xJ5<{#@P}@@40Mb2VFD<b z?9Sm1^7E2@vd{nf7ZcOGZG7gwVD>(EZf4>X z!1e%Q6f!rb1+bQbCias~Ps^-{c5K{V-}ST^ynK+r%0_TA^-_OR*lexw5Y`J2k>&wY zC|&a#1Iz<@eKkfL5e1NqnSun9*6@`29sdFe_u31r{C&RGPkDkp=fax?-ukmx!P=Wp17u`2W@jOekhIGCj~&#0UM@bT2mNp3+w)Ob`BU7`lb!NCXf*4n_PJ2d{#K>_^P zhXGH&hI{K?z~*EW-wzzk&Fxn!e>j?3OsYDlOHk2q2ncd`T2ukx;kteA4+M{qgc8YL zTWO6>v|nhIBhBr!!5u}ns)gdAzyjqJ6v(HqvhV>GF`7B1Y88C7J4(T5i)l8NM$Tv( zb^iky4f$LC%*Eps(^t=70*Ltqu+ZqlTFHZGa3B$2mZI)P`(YW&fN} zzt5jrn*Q{^s9~3Qggh9P*Y&u)jD**wE`6t8z~xLAFiv25{BHQ>08qOU)g6Q+PPdFE z@Ci3P+`R1>KtLzC=?1Co0rLcMIR56F&3`F3JJSXJh)Sp#0v&2)tdl*xWL zK7e@gh>JP^N8Hrv=_G={K?cBDq%{|x2RU?VdwP5APZnz}>rdxjlT!FSuZ=HqdMt-} z^3aGRWOdanWY|i4ydYO@>+P_;kz{&DE=fQ&2hY|KvqExpx3=hvd`P+EFC!h4NTj{{ z+N16Ri1%f=-^G;NUPD}}mR-5qg(4%pS_%C6VEo~VeFLy`HX+sC+$yf!BxIlu0J}`x zf7WjPg^G%*$6(b$DPOf1iu#d_vU_)EtZjMr zs0LuWn`f=BDHW+se867m7(1I?;bPPsGvW zI%_3BLr5Kuh|K&hFOLa&y2A$ODPU$LE>o5&CKY8S+`M25Zh!4&;3iW3_f#psVVaI* z^x*4xE0`oLWNOuvHA11#;?mErE@r*?Czv`y75 zr;#l;djh}Mu<`*LVezre_#noUA)wgF?$_I2@pb4WIgnL$)0HLK(aJKxWyK3U*HJ-$B!9>8hUo_y(BLv3t-GjRk7vT}le2dxk(Qx$*g8|2#wG!Z>!>v9vmQ;>Ovwd0$Z7NaPx8)@on7VmjHu%B8Xq8EKZ*Rd4BOTo?WG*2lCFSsPh!Pl%4zFv`!>li_^6rw|Y~hjCf0Yr9 zHtXajvOy{7fnh!=K|vV%;>0|)p2!jEGVScsd;m2#A1uE0XVhuN1aW7m6sxM;-xsfH z?NqcfAld+=k%obNX1ARVSSnyCDdrCA9|3;5wfb;MVSBzVwT3ae+6VaiV&JCUtLz16 z7LZmHF8(>qOh8g>di>J3hz?LTfQZI4l_faJrSi<|Tpk{J7yf$kB9`}R=ByDuK%{Sl z6_lQBwgXYGl8nUGkH{}sW z+40H08Vh?3%*7*-BxCNIpMmIDz=q1}eB2GdOpOP~UcHtGA~`vE8o$>~T>=&F1J!(vosgaWh5P+6$LYi4eP;lUA&V*43?(we-b(WW zSzhv1asG>%<8ePL1!`^P(EVN#AXADrLPA1~R>NG3dZQ*&zqR%r9eKhK5}y| z|MT-Rr4OX%1n>W9>@B0}h?;gm5+GQD1-D?q-JRe8f^%>Q5Zv7%fk1E)NN^|E!QI^w z-0k4*65Q|Redn7sv+i1V<_8Nnr+fGA+O?~zo_eZ!R7|aazh_DG7dok6xmC9y4|h-A zrm+0GkHl^BNUI>nRIuhE@Fp#ZH{E&5@6*Cu=@*)(LXeQSUF91;uk!+D<>GmX~Y1O{wZv;Yg5QPj{554Z8ZkwyFXI9g!) zHfSKxztLZBbI%eK8hc3f@L*{(S4IY;kS&8Qc=mpB$0xz#()a7v;PR8!zdZ3N^~@xZ zJ=i`jB2W2`(LdmEqpCIIVn+>71!TC_M^Ut83dGg$QPqFYN$Z^lJj59TMn(7*@S>!G zq^c}oaT#t0bzTQajbR{i%))MI@k{uC>gjn6f}2P{`VklN6Nx>nk4Adq7ct*opHfms zYYT{DD6d{UrEW?^9^)k(0*E`yL5S2u%xeK+DqC1}c9$|`^?MLLA_xfy9iN?b4G)t< z#IsH>`R477gJAyT1gY@R-+PPHQ0mvk$^dj(*pYOOBCnw#XLMvZ%8!xTTI+amg}diNJ!QRk0K!{$mw*`3~nQxg-) zJ~u8hni7}@VwhYkDG&7l0fONC^XJc>DMOSYlO&Ww|7;&7vUNKCPEJqn zi4UNFBRjf7VLRheRa`Yta2mPZK!`5nX{7!CIbV_l5e(CgjwJ(riIquosr;Y5&hC63 z9Ze`m_F1^m-4b?#5t8MeE@E@-q$UH|3uyazynaJJK2ysYHE>oHRWiiiK6@n5%IbBj z42O!Qc(Rc@%$5k<79|fud0qN!`Ge`(bpM7QV`JEPt&D(G*`rvOL<*}B_@K>cI_IJq z8!Id1cBYq_Mvk1Cf}Z|MVtc;oJSe9*DEIL*-BU<=d(;y7S6%AUCqwa$4mbig)0cbG z_px9&ud^_6YU-b3V=-?0CFIEQc$85ch*H3(;-A${s*aB_PMd86W5~&cakDpRF3d%c6TLmA=m z_|@FpHVX|w17}>s(}{~Yxiaa}>6%K>23UHYrcB-4c{`#X$>Ej`=EYXavw3=7@WnWM z2XZrF^Wd<(VMajwX0fA`o6Bbd&t=`8wDo5N0Xv#BN2V0{#mkV@R^3`I9VP1pxVyW% ztGn??m%T*^`Q+qS=P7IVg>$v5^L5!05-e+H^eJC3zq4K3_NiiBxC9|jRJ+4No@p_| z*4xAMPVgDuulOy87L7uOzN($GeZoxAVOH|bX~I<7vz!6jVu9CA)vMs1i~Si zno;C$ulWEmQ~o>IhI%Fp${eHn@Edj7+<&UE0sc&5Z+h2nKl)00>VgBLYU+udc36`bVkm&erT+ zNJ~9M)S=m>rEi|}atM;$!_MaQUE)EJF)Ao1=tF`>WMmK~$iW_e@BicAgFLJ25rmk> zMj%AWG!PK*eAPb+egI=3CdRnI^+?RpGE^ZidM=5`=l-6)lrGQO1JDDzW^2?u%}#}= znIb|Auu|U3QF95s3~YbRn$6qGL`ir#j=^L1ZttX@v5%gPZuyfHD%_&9y{aO$CB}Sg z0sYyYeT|#^zs0J;+rSuTEZ6M3ZDO!6H3A!tlvH*n} zFVQ>_@qMqLX!lP57~n(xI?=QzMP~5AoSdZbW*+fVG=b-7p%*hoZ!f`ZiKX>@Ef#(;8yT2j` z1s8h+9BT$|+FYD3mmNANoxR}H=>Ap~EBnnn$W{ShSH`@z&!0UYO>3UM(S>WV$2$$2{ z9g*t5O+k?fHzyxSj!fF?zI52uxh$)GMm|-nFJUR$xnyf83t;h<{-WEhfBio?eXOU+ zP)PZP%A2xMmY0>iv?QN4{mhSSlEsEvpL$tm8IYL6p4|$gdGqih@R>UXMj!J51gn|S z!QK49Mo6Y_8^(|~2R_Tool+uXm@B%hAHja$W z91?zsts68-vLesjh}Sh-Vi=-cl;$Wl?Mx&G_ zezu&siPS1(ks6upT=R;E?8>u#bBJ?RJMm>BhxF^zE)yBsC!rV$vJZVwqPm}s)tQ@5 zAwfk&O=@cS^$S-LgQ#fopfOdjs-~iH0m)j&y4eKCQDEKX)(eeyNuYCOaet-;Vto|& z0!@^gKM_2hM+J)c8n?XNj#hi$Tyl8+!=d@mB)#{2En-c!*G53z^waNIm$-_fqDefE zeCbXoU#mCYe3K;P4pu4RjO%HY4qJgrDarMr(0sUi<-Tx>boxTd@q_c~*YT#XQBg@L z$TLI;ggKcx8BQG+q9Rs(s7Z*)9kO3kbRiOb8aBCba~{V^I!DRsQj{uHM&QxCmuG%QwYBp~t*|e4W8%rq}F!z62d{55L zsTjQ2;AEny0ju%7tccy3-Q>8X;5u?gP(b^#>{&)T34hASCgD>q*|;(X=#=vug!jDR4v?L+V~&4b|O+`n0&;P|5dI8uDAE?;#g`xZFOv9PTU zsT0%Fx7fCq$P#}&`D=CgJEN8OV1xP8It#*D7C(RtjIh^*6-RU(1bAIv!-=eWCGRoI z)vm7TZdS-ef*wiC1Y~MhWJaCrJ6pu{#lb7~2Jk2nQWXh|U2kAfm|Nr*d z{y&_$PG)j)OgIQwj=^zN?UuoGFZF&Ljq~&K2Fv0nA9H23>)>pMQb!;?`@cS9mB4Y` zsD;h27w{;N#q%;JSG81BM$^upJVzEMK*1DjKpm6@M4u;Dm6YS@Gy(Ns0-?I`rEj6q ztZ)~p1*`X~9IHd>&*XYFlrV>+XI$sKFWH_y!|A>YS9lCFCB~@ECwOv6;c!`{j8xuZ zRzCN!y*P8Z+%z+^PwQtjc2z;4qx>#zPMbRDZ>WR{Cy{V7^J*aFXYZW7Q?h;lknc)N zkI*OUg!Z$SaX3BR6}($WZY`j$ucQzRM)Mt-W`sMcX3rw>dRm0iNiC$B9xCis)u_?@#->rUq>(0HC}guxw~ zp9!l_Mg=%nX0DGYAgVO_{m%AD)o?#+2&VfpYe(Eku;fzs@#lUHRR#p$zc-S-b}9CbXZm zFrlJ!ZI5|zd)MY`q4Cns(Yg3Jh2H*yyFY44oH5kEicgkTVW)mP!{?#pN>u|Hybh^w zPwX!xB_%f7ghTR1Vz$^lc@@sH8aigWE#xk4PPG-+a1m)diSNky=_5s^7o^gqIYG;# zc}$?nE5=fg75}>?PkJU~(fFxWB+ZHGQ!Qyc;&6FKM@L+1mAYlRn@#5n>4&sLPABb} z6N#x8!$zBHN<7qbv%@;Y5{m5JXxYjU4-{7N<{Je6H6y3)W&z|}#1IIIi|fj-7tWRYwVNXww!%}HB#hloIA6n&PUR1(7Y&&9h{Q9j_lmorUcYi? z`I~+ie$Jw!3k$E`N)Na;GWC2B%7lu~__W^a8BmXUx+Wa5E>b@cTZY|t8-!#+i9IN4 z)ie=dlRD~p%1n=NDl#isQctVlpbjyu(!`H*eU+Q%z4!Rr8uCli`JV?jZ@Pd3jx>Jw zZU4V=zQ(y7CWK5*hmgV2q(HN!7054&G_s~q=ASKOhqesXQV;Y7fBM>g?-of7T20jQ z>IkT0gs`CE3qRchg%Jwg!}~JP)4=CCBo7KoUas8E{(~QtJQ1fB18eY%J9$yvI_P%k z^yXdU9QDc^o-K-q2;H7>lIr7%Z42cy_zKN?X6w>P2qnK^nkRe%xICYvd2-IrdBj7q zmb2~aLGoq6Bkg{v#Bvm)=qMDv-d9ki0|X(bqs zRz^mX$v%ir8Ay2*my){>K>3q)Qr;Qj1tS<+1S(aQ)x_0X6524*ciJH8NRD(b>Zo&R zLJ#UV#p<}W*&Lg07ro!heV~5W^00*spPZoY#KgpCRXhEvA>*VBZaiwy?NX`ZQigcd zHI^4yvt>6oR2Aea5$x?yNNEue6^W1fzMM>b?=pwqWGXvkAIUslFO=C^Lt6G{9nx|B66OIb}{K~9c= zwdI)d*|P^GpMS&a8yhEoBrsG|RCbhm0Nc#tUxC#6#-`IAb1mP*Us!K)9=@=Yp5DCn z-uRfaR+9%i@6w#Z2MC0ok$Iyx-OqSo&i}`Ehq#aqg^>x*RKo%Io1^1XOXm+0F%1G~ z1AokXM=foNq!chy-5Z{jp~!5x;fc+b`%B0>oVrsJ7c}tPU z{vpQDk3YCDnNd0$zn!8Tkla@G)+9h%7C1(Y;*YmOq#sfRiuH$zg#k(JsmKR%|4c37Jz=(@-0JzQvn6+avdrp!9qOa%r?AiqRzTPQxSwSAyE-5IZC z747Yw)x-A|dE9qSK0huiaXC4&I(HmKEci>$WY9zampvNZ+Im4mWo%M^e*qhw^|g)m z9^o^yc7#U%(Ci3=kvtsC7TIWQ;cqjE_g+Yq;~}>5r}jBD*-!bBD9MdYMT4m>72WFS zDXOb;1%7*(*;EvvB=XQD4aZ|Q^XCgZ(#h#5t=lIiwLC?WVGPmZ+1z36ht1@!qj2-3 zFZVvjM;M`@q0a^|7IXZy`xf8$uY=pac(|ehg(ys);~r00c}|9s+-KcCik{rAYPb0C zOJ^4+NJ>f`Z}{_^4%P}1`ky1f85lkYAfc0fIyoJNKVIHP3Js4o0e_!f?AC@Z;SbMy zo<8XuhR_up04utpND6FgIUaUT_U}(*&`AXT_}ADtS_wyz3dZG&jRLj(HJ448zr8I_JBrDXzdDL~bXpz}b31rQ>rMA) zT6qTzD_!kK)|b#)5Ux__ATU(N$K?VyH4T}`^5-u? z$II|=cb=n^CCNU*y|FPK8?3Cc@$vl=mb@H$E!wZHz0ZEPw4}K&dB(4fVM|95{f!W` zVP34N;!;&pJq8%007Ftp2(p)qbQT@z{Y^6S=AXqM6O)s%2?-Xl?rS~Kv&W+^Kt#>w zu}nTJbjoDte~$|qVCWO@*wF1w`vV@R41gGjIqlL{92S8`+XZOB14TtnY<5<<{lP*H1j7o{Mj5AtZ=XFn_Ank{5Y|#Vsmrn+%i{zX!z#M zVX7xg0Vo7O3Ob=YXHUFSdnTE=a9sAAhUwWNHN__n=NDvE~3LQYBf91dSo zvnP^t__DEOt5Qi6QOamef!Fbrxi$mJpy};>qHsiZ7oF}NUYhz#2eOf~uV?pOt|l~m z*Z96JLG%?1+tiY?x8Qs0xo0 zV@j6Bh^57AT|jK)|CQdm^mxIGjg8Ib!p>$sL?RtQ(C*J;|7=;^kIbIMKM+t#I~s$X z$H8>8OZ!QpVjbwdsAPlg(x83CH*`=r;v_^L3IazkSiMjMZ(g{fGv5kZERDj%zbBca z;8|BT%ynt%)>z!`T3KK3j}L2ug0j1dA;F#TJPeSCacW}Ge_1zEZP?s=V+#T_gp8Wp zU~cP0Dq&`Qn4yZw--`oSBG^PwXw0v~!4mx?Yg>>y-8dar5}p0YKc;FRpI=zGk$wKS zx!}HpnBjd%_L79}vr*?a?n1R_Vc*hi*t)O0yggrU%09`%*&iwo?^`zGui6p%diqxf zsqW8@hju5ElBXWX=Gjb9;j^><4h$^Uo2S2JW{&iK^cFrU@r(}XrzGI^s&Y#KU7$|J zV&1}exSp&gE$MY;i1>z}pc!kt>kQYi=F@<5!xvbR6mVFYye58ew%5Znhp~->B_-w{ zp7&bDqM1(TZ(m<-Qxkw0L`z861N4{t%qvCN_!0nCg^G$+Wotg9ZaJk6*&wvCwpPIj z8u`xF)a=Vf)U4HbXvnisw~L^a-r7=AGTJ~Y;*+qticNWYk(qzR#XUK>ztoyeoTp%N zmcWjHi0H^i@dcc}A>`oxnnm~DKB|j= z80l<(G<&wU3UIZD#>3NUX3Y+Y=di7;cK`JEPfsc2GT=zHPgY-xj+W?mKB0^8@$!?2 zP?{rHaI^?zmL}_y(h%D>%Yt&Begkg>`SD_1P4F1M@|$J>=6ziVO8m=t8Fe-vTG}iIUxEfE6?ddo& z3^8@&zJ;1-d-AhW87C;52AgVy5@Jb!J}0r3QDnm6|3Y6!kcvh%)~ki2{SO!5s3F`} z^wDOV!hEJI^XJbvr{gv;&|L@4;nHCmfr^fg0JI3}>+kzJ(D%EkDI#Xoc+qo<6sj;e z=LkvwkYByBR=HnWUH$WSXa!UlQqj-^&(8~89KnWv(yN7#;Y2nz3d+j%>Z+SL&sLiA z5^5)&9Zjc;ia3Z43!QTvm3qEm)BRqI)my2esTmH1^03Hv|1COWVF{zj2hRtPW15(p zR`JZu((Xk;LjyqS4uR0C{e{YJv8`z~|5|c#e{+~$kO0%ZnfZ;(?C$HyK+3hC$o;w+ zTt%gGenE-Zd|kOv?G=BT;a$`CT5g`XGDKg&LM!3owtT9!PhCO7?1e<|OKH$Z>3F20 zHB$5h%_zM{rojGAYS(7wtAAx!`uMT^|HyR>=~1V5;c_wCRmXzGbYr7PXY_35{pVN2 zJR|n+{K205bT9Mf$5lW3^T0p?A&>8#k1}t{0sDMVc&6H#H_dC)^5SrzGg8g23kFMW zZoE9+p-p!e?eDC5gC4l+?Bs-ujJ*6qW>Za5xBqa# z+hcIm?6&psRy56H6{RGiK4wJ|J+G!FYW+1WVD?y)Zy-CYA7c%vCoElHkP1mkNxh|~ zjR}p2SnmRU2fD=7k^f_VLb(Ycmjx#8k`rg)l3yNVdZU0ApWXYABr7|+uF2ir7Snfr z*s<&W?DfiuKIy-MnU>T>z(2C4HIz{`R;}!Igw@zaH0?6jr3eJU%;u#H>{#gO3(gM4 zdujAdHwZz~j707|jJk&UA3uJy@78Ksm&lpFBoj(#4@6Sa)s+E) zPXchlE}=S9L`ZcFM4RUee@S*$fYm}N2e}pIthV-n4%VADZ)$w*aRuEEC%TS<-m-Ey zu1eh%XA_eCL#AE0&V7Q~kG6b0vnt<>Qvu)&iqzqd^4L3qt}IOw+1+XSR*W z!o<|xVUBp^Fo<-trlB^zn%k=)tE3daEOeI~9+^YeL5FPH%S_%jpR5QOWM_b! z2GkTm&-3x*D_U9%%=K$nHKRb5OEHD(M`#1Z%T3!@I4qeJaj_f_@stK5agM^!=Ex&p z@^YTk%&|7|M?0Ik;glf(BX~bEjtw^5^-^IIOy9L6M;l%#@ZIW-mM$zn0~}UN9t7)W zr<3D51CO1@7;Ed*0hZ#UfnKPDo24LGbpGBI`$3^dYUTO6d2saKOzh>=4#t)C59jPP z1;4MtPWKYL(GZ=(*oEnME9~_P6#-OM3ch1M|fai5)y$bQGM>Ena ziY#y_>eHd@Ahh{^02~oTD@z6gedSu=V3wEy@BnQXp;Q-A)*>KkOL^DI^XsR9{$SAr z*99c4eN?CFo<}#FyuAZ)<&J!n4tcP{hFO|WNNZO3zaYlJ!QJz;VW*dmxslpGpWO$H zFn~TyE=N(tkGfa@EUU~~MHRx{AHK_go1e>2c+$U^xW7@V`d7S-D}+NGt)bwC2D8hS z)f<0Of#D;KYw&Kp>MDzLkvoN7*)kl&W{m@lm9q5p9f!X$4GAW9;REws#;Ifz!QAf6 z`VRNfwrmd7+WHA5U{%D|6`1D2UXjj?!~Uum7?MpslL=)IURcnY;!YSf(4SMEnp#yQ zQ>gvw@9gMUg-810EU-=mF>?(}72#E7wa2$~jNxyHHT-ul&cxX!cW4>bWx>K{KM(1zzl8*={1FR+}~ap94am;(on12O5XCKPuUh)5ft zV9<n)}9Ve6M)7npIMPI6LT&Q;~okj!8jITpd@gkKY|0TYF#CBB_qcawQUB z;IY8pAg%<60Z0UaT{nC!LLc10S3BF6cn>;uBL0Jat-0=!5zz3zqG3FZA`UrmQqjU8 z`cnkA#DnYF)Ymm{I6kmsYXi01iqVPu4*WcJB=Wr>SXrK1%^yz=<; z@@SiZi`cOc6`y|dx9GzkMOoWPYURCoh7sXD(~5^_fBuwN`C zNr1S5Ex{zS(q!Zs>*nfgYNtHT{-iwRTudQ`h1Q--)ENp{T3WjO^Hncve*HAU6V(5d zOj9IPrpWmgxg#eSs9TzHbt&RtgsYbfF%0(^Pb$mi=oypq6|e7i2lH|jQ!9EgOeU~g zTZn~|d?THyOErPpsgeILj82NzW64b8l`kyoA{|g+=l~U_zIO~o1_Cj7i6OG}?Y5>W z!n4)vyT0z;yw1;$D*@ywh+x~MnEa3~Ip^V+n*pz*$jfR;+p2WZN;MpLU*-Tq39((s zrwjw&`tnpx__a|Ebd%S6vMGbP&`KC;I-u?A+dPG9h?x`wHBQw%nJvq3c1r*cmd%GG zyC|pen2#%~;D_}XM2!xXP3f{_iNVbJ^xoy%}O)Iq6kfv872|2H>Yb+xxx zRsivK`ll6_ti;SGUFT@or-OhcEryvgx?(rEGj6|nf{1o}a^l$F4=N>r54kHeEoXsI z#H!W8f*Ku!z>`{sOHv43{W8H7rJm>#mkA+xl3E@0^}_94l?9GShA@0Sw@Tatvo+bH zp$3_q0Xr#6R5YvJdsSn~6VYCHHp?+d&>Zv$a=y7d8s6u3-kvKK92P$PfHbXcfeY;m zY=8E(fm>v`HD{uL!F~h3j7tzHc6VM!5;QqvM(xOv*`Qag=mXt;Bs{2vy_C!z8Xr4w zyjiW2el&M>kM_cnuhQ4FtDW9{tf78pdk=gVE0`oXD@hT{Rf%26fMEaU$2g}B^}%p$ z{s~H8V#F-IHXS7hoYS8=b%~4IuSQEEO0z#;lx;RwnjbkhR&53E*~5M8?0NMRes8=u zy*b66)0~{Ut8t|w-CI@wNzAqWGdDVH@*GGyN4hv#8iv0V z9XZ_49lkzQ5;Zwl?N+?x&n{|8$&oi$jnlO=yE zM3x~g4dHRgzJ`C$757(y_eaH8n`~06QL+2<4rGIe-ZTFGJ>_SDbJ=v`zNr-2)eE1` z{Fc)Xo^}3|V`&tn+i~@G=+({DaeIU7T#b3U-wVSxhGxTw3pS2bG2g$x^YS7xFpvgb zZ~eubH6ae^puD_+@#W#df&RT>{}=dM$J4Urh5K6!at@9dP%wHt8OJ>D)IX?rs#9t- zOx7F{7G-ugU;pWM9`8+n5orU~#>$Yb-w-SY))!EcX~WF8m0vI?w3HS5?4DeL3qcI7 zgiub&@UtpvH2m4esRsY@V+O;%DF6OVL%9<|5AN3`H&p(&=Fiw$z4J~(*8&jHKEFU4 zlQTA^)KqF}=-$Yo$+OV-3BsWzL+?ne^747V+k^O@e0FnZh{-&@nZ?5m_IOW7H7N19 zGl7hgrkzb6r&~{t`M(nl?tN)5NkUxIv(*tWhx59cejX>tU-E33_pjxIg`GM+VHO2) zKw=q+&Eg$ySk|()HZ?`U`)d#O=!0iHLVXLG*Bz{Tb=SGwpPnDny?Hxex?T2nAn$dU ziHY6hMMhRu3;=|`K7)VZ;Q7g(k`nZ# zQC*-xTtI+Y)A^>Z&%^8%`uPs&Z1T0g*UyB6|KvSDTrqYcY2bZ+)YaR23KKl~1R5TG z4f?jSyDMjDNe>btU#38qh^WMNkehsbex3tJpFh`sn(2R=nbA6|TSJFhTU~E2J<@Q$ zt)87OYshT~ZH}XO2knzI!SC_*=5Cyy8d(}|i(R2;`ff;Pf{!Rx*3J0!52HA9GeqfR z!O(uIebS8ZlN^pP+>6D|sCDmsjvMR_#8nMjrLgeu0?_=8;N{n#pp#yt!>wTA-0yK0 zjBko`WgZ{k33%<8FhuV#hlTE67SPj25OM}@=Ua>^KFA&Yd5|CwUR|yQCzo zt4k6{C9E)+n4O*7=k9Os+1_-tO189Puf6{sc@%&r(C}H-`x3B7_yqOw>)+&F>+}*o8CFHVfzNxI9J@bqm>!o@J|sM%v*s8Ks|mu%Z(Uae z9+i14u(}b-%jW!7ot(v%7;mtNiREQw*-P&aj*^mH0YuK>b!O2MMJ(Tui69+G)@_P~ z^^#No)W7~Y`Ar=V5C9ssZo z$lAGSKiu&do5R9+Y_ian_T@q^XvDymnAG8Ioiv_FXxDO%S8SsAq-eHR`Jw0v4DMH+ z<4en))o$|9MDMZA&`uf$q zwf@5+cQrjd@lQ)4IJoJ#xeZD8x0lXqRhA&_7Yq zx(U#INJv=d&u~8#^PrD8NMrzdQ2WAQWLw+2r`s_bsEY)xl1977Ujx<^`1%<#I!fl8 zy6u`=aZ!=Yzxm!MQji`P2L#;NG9!t~g#u7Om?Ct|7}|Q*0Xob{7#O6*s&8fzgUGGp z`>6@H&d$bR4_c;}P@3lF_kh50;d8lu>cj8B&3O}PeApmbaggSIefULX-mRMs&#&g! ziFcH$#lIYx0SBRrms@dpZ8_PDwo2f|WR+V3H0<=D{zX>W!xNRD{Y6jJ&N#aV(M+Q` z#*QV(5~u@SSno`^Nwv$D_V$UXiSPcqI3Vr6{%H#no8$u+p}yX8f2sCObxG8=*l1m2 zW9(0trfyeRrQr-{L()hY8?a$;m3w1=iN>JD4YCv-+nL|C=cudodCgO9eVw5+diOW4L1(}_Q@Co4#41~-WGlgrMNdU5mV?P|_xpY-qO`|(mnk>D0B&kkN>gK z8N3F>5G{Jw2K~iYke^SzqeW@b8@;9!Ei9+*g7d@o#$ggt4idYSx3@>n_*}Nh0frvJ zMg;O%(NDl=9x5sUA6powIfE>pOMdtH-1$Q7Jm)Fd&E?+kDQi>PWuqIGAV*w_=Gj|z z`XB2*KMB2j0ac*+FiAtB&l!3s+yMG!AhrGIlpzqXA*fw+V@ykX0SJ_)Gsi}eWcDBh z4-mhUiZKqMS1+Dwa}F4p={K_d{U=tP%Zw+pFnRIGP|+=1`G@F=ir-GED_B8rX%!|@ zib`mzp-BTtME~}7RY0OO*h#BL2&w}E3YChT=bv#v&GvXFO!c_)={8O317Y3!T8dbF z|64>>9@P(dPK7-Y1kc(=+9miB96}PXs6wKn?bQ+kkuc1Tmw~2C{fJNQ7k#`IIoj!< zYC0Q}7xLOv=}|~%1Lc|3WZCx2{_@tEf=*Y+V7t6i;d430#KryfJLNqM@DD)$xmv6D z?*ZWUzr$2rk9VB*&DVJJ^nX7zcwuMvMr;B^FE%CRgTW&GbNE4axh3s;9wVd74-ID6 zmqRJC($X((*VYmp&L`1(iTpMkN|J>L-FbOIpP72Uu;M}i&;Y>R-4C|v%w}@UUdRwG z9-za$ELz-<6bqqIRiN?XhQyjdX(m4zl#bNr5NFMEN%mkNi1FdmNk3vjl7GR=N54>4 zpUhf?S|#eXHyE0}n;@^KteDGrzI3!8;WIycXZhd*{G`r({z~pA1QQo94?Y233a}=G zU5=&>np`HZbUtrfvV$0*IR4T>prjmXu<`q^WYS+rXhBje&1vOE z>U9b&FLl#!M7=G6x&qHFHSGW5+?|dvhUd&+4$6tCFbzLPv~9)Pk&}X%=eCcj+dNw1 z3(PzDod42H;PGu3teJH-g8@gp49?!rP@~vliRXQ0G1mv#^Souas3fRtF~<)BTfDa4 z46g|xG#rJbz0y`4dG|lGYIgl^r`{f5NY&c#qJ66`a`u3Nx&>92I=)@1+VxM##iO&% zn=?h>)ibi?Ug|3Ktep7OEo9X#X<6>pehDl*i)#lH_PmeGb z)PNB(5R)BzUhoM;&6ulsCeWaAFrUstnLk54N!nh*ZqWKhMOv)xH(zbb6it`P3sp3Q zY`RT$(DeBgY}42+8w$-<+P@LM9!6+s)A?Q;ysD zqS__KK)1@!LDlPY3n(Nh=dZ?Tv&r3=F63kLFm01Zt zr>UG%RF%LYUFxjO`owai%nBb3-`xc-;q6I8hDK@R#D3ZuTXi|qsOHbwkaXc>!2z!U z-rWt3H;;GL;(j{AxUIhut)#?GJ+)x0UOlynw?_J4=!ba2=T-7`uRu-5T^-2ZzjD3H z8$qS%Zw~Da$6nBbiCg4D0~(VBTm1ZhEBxoLTT=&1>hzHm#T}W=#`W*s~ zE80ucnl3en3glD_g4YN%3aZXVJx>Amj8`yp3E9$f5&bg4NDh zyA4hr3j`N-Z8&Ujzpql0_2*)OB894ZJ7W?Nh*eH zeQL`xASl+!q-*#csTElh#HBy&JQK5JhAXIWnu1z+zFt%qhhhg=khOjuYVsv6-0EN? zTRR4wHg~&=pb?D*j8`S9$b?Ot-R<_z31(jT+IsZgfIFsi*+KkRLIXGd*K9%^wkyR= z!@b@`48VtkmGL`_kx1hgeRA%Y4TrlWhlAP?Lbl9nY^psal!?4bcbBO5mndz9MpolB zidwdU{a<-b1V>k6se$dnC!>{C$un#K2Q#lS_H@&I$R6GDq3W>kyGVWwJ6ljM+Rdya zO1)6EZAsKpN6iK#(z028636kV*^u)Ywr3SmY`FTj*^2?c#mFIrZ?x^D0#dKC1T`V< zb79+=U`=IK2D06!T)f;14!@MXmS$QCYq1=p5sn%gNs$ng#nHNhPp(UfJmN9ZVt+d3o)aCVOt^PGc6ALbA;fm^GqvWN4|1!=HY)jD5%{*()0khB^wxEojT=TA(JgXAB`jvpn#D>~W=^&MI`MI*JxuVp2 ze*1Xx6$ze8@0iga-8i^#>#FJ`?GuC&T4v>B@U)S+N^=H0dlmJT+|F$J%=hZaZ@+JH z;3OYAOc}cbS3vKP=xXrHv$v$ zq#3O`1Bzel&Jh-S+p>6chFOWSbP+;3fu;qF&|5?$k52_|e;3=5J|J`?B($U@#_uuH zjoexk7^~(Na4+St6Vth*$3uf9zLk~mObXszBsnWIMnwgN#>r*a-F7nccA~-prk=YS zfIQ-4Wb^4jZ^cYEC=QvrAw?nd_uEvnAeeT@MR1GIqY0uRi*+ lMi+*@qeB(3HS2fd&&hUnqbl#H0d5&iR!UK_MEqmG{{rW%L>K@7 literal 0 HcmV?d00001 diff --git a/plugins/__PUT_PLUGIN_ZIPS_HERE__ b/plugins/__PUT_PLUGIN_ZIPS_HERE__ new file mode 100644 index 00000000..e69de29b diff --git a/requirements.txt b/requirements.txt index 66c90c79..2052a9ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,6 +31,7 @@ pre-commit black isort gitpython==3.1.31 +auto-gpt-plugin-template # Items below this point will not be included in the Docker Image @@ -42,3 +43,7 @@ pytest-benchmark pytest-cov pytest-integration pytest-mock + + +# OpenAI and Generic plugins import +openapi-python-client==0.13.4 diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/mocks/__init__.py b/tests/mocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/mocks/mock_commands.py b/tests/mocks/mock_commands.py new file mode 100644 index 00000000..d64284bc --- /dev/null +++ b/tests/mocks/mock_commands.py @@ -0,0 +1,6 @@ +from autogpt.commands.command import command + + +@command("function_based", "Function-based test command") +def function_based(arg1: int, arg2: str) -> str: + return f"{arg1} - {arg2}" diff --git a/tests/test_commands.py b/tests/test_commands.py new file mode 100644 index 00000000..8a7771f6 --- /dev/null +++ b/tests/test_commands.py @@ -0,0 +1,177 @@ +import os +import shutil +import sys +from pathlib import Path + +import pytest + +from autogpt.commands.command import Command, CommandRegistry + + +class TestCommand: + @staticmethod + def example_function(arg1: int, arg2: str) -> str: + return f"{arg1} - {arg2}" + + def test_command_creation(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + assert cmd.name == "example" + assert cmd.description == "Example command" + assert cmd.method == self.example_function + assert cmd.signature == "(arg1: int, arg2: str) -> str" + + def test_command_call(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + result = cmd(arg1=1, arg2="test") + assert result == "1 - test" + + def test_command_call_with_invalid_arguments(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + with pytest.raises(TypeError): + cmd(arg1="invalid", does_not_exist="test") + + def test_command_default_signature(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + assert cmd.signature == "(arg1: int, arg2: str) -> str" + + def test_command_custom_signature(self): + custom_signature = "custom_arg1: int, custom_arg2: str" + cmd = Command( + name="example", + description="Example command", + method=self.example_function, + signature=custom_signature, + ) + + assert cmd.signature == custom_signature + + +class TestCommandRegistry: + @staticmethod + def example_function(arg1: int, arg2: str) -> str: + return f"{arg1} - {arg2}" + + def test_register_command(self): + """Test that a command can be registered to the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + + assert cmd.name in registry.commands + assert registry.commands[cmd.name] == cmd + + def test_unregister_command(self): + """Test that a command can be unregistered from the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + registry.unregister(cmd.name) + + assert cmd.name not in registry.commands + + def test_get_command(self): + """Test that a command can be retrieved from the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + retrieved_cmd = registry.get_command(cmd.name) + + assert retrieved_cmd == cmd + + def test_get_nonexistent_command(self): + """Test that attempting to get a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.get_command("nonexistent_command") + + def test_call_command(self): + """Test that a command can be called through the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + result = registry.call("example", arg1=1, arg2="test") + + assert result == "1 - test" + + def test_call_nonexistent_command(self): + """Test that attempting to call a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.call("nonexistent_command", arg1=1, arg2="test") + + def test_get_command_prompt(self): + """Test that the command prompt is correctly formatted.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + command_prompt = registry.command_prompt() + + assert f"(arg1: int, arg2: str)" in command_prompt + + def test_import_mock_commands_module(self): + """Test that the registry can import a module with mock command plugins.""" + registry = CommandRegistry() + mock_commands_module = "tests.mocks.mock_commands" + + registry.import_commands(mock_commands_module) + + assert "function_based" in registry.commands + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description + == "Function-based test command" + ) + + def test_import_temp_command_file_module(self, tmp_path): + """Test that the registry can import a command plugins module from a temp file.""" + registry = CommandRegistry() + + # Create a temp command file + src = Path(os.getcwd()) / "tests/mocks/mock_commands.py" + temp_commands_file = tmp_path / "mock_commands.py" + shutil.copyfile(src, temp_commands_file) + + # Add the temp directory to sys.path to make the module importable + sys.path.append(str(tmp_path)) + + temp_commands_module = "mock_commands" + registry.import_commands(temp_commands_module) + + # Remove the temp directory from sys.path + sys.path.remove(str(tmp_path)) + + assert "function_based" in registry.commands + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description + == "Function-based test command" + ) diff --git a/tests/test_prompt_generator.py b/tests/test_prompt_generator.py index 6a0bfd6c..1fa1754d 100644 --- a/tests/test_prompt_generator.py +++ b/tests/test_prompt_generator.py @@ -1,6 +1,6 @@ from unittest import TestCase -from autogpt.promptgenerator import PromptGenerator +from autogpt.prompts.generator import PromptGenerator class TestPromptGenerator(TestCase): @@ -38,6 +38,7 @@ class TestPromptGenerator(TestCase): "label": command_label, "name": command_name, "args": args, + "function": None, } self.assertIn(command, self.generator.commands) diff --git a/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip b/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip new file mode 100644 index 0000000000000000000000000000000000000000..00bc1f4f58dc1c8e07c7ca3adce6be605fe6a3ce GIT binary patch literal 14927 zcmbW8WmuibvbJ$|4esvl?iSnv1eb-oJHdi`un^qcg1ZOT1b27$k2%?UvWMAca<1>? zY8L#ttKRNj)m7b3E69L?!2td7Z0N_T{m&o&`h^Pw4J72`XhSEVphBl$>0}D9rc*I? zaHO-+cW^YeXHZoE2Lk3j;5SnE7Xs>k5FrF3$Ns=TK&GHTKoq}0&>Pw~8{6xf8rvKG zNo!FOvRGn-Yq?j4a@SD5%jJROuM>uyHx91Sy4T0ml7fpwGs@q;KBk};-qCA`*hJBE z_m+1*%icCcifI>$n`;6i zg1PIoFWbx@fhkijv#z@vPGE|9mN9iU{zeh!h8$Hi^@%!?MTRRG*0mj2mr-OCgeq1% z5Lnmq9f&Xkub=54QvnwlbF$CekK&JuMlOTpM_3k!{*a`zWPIUJ4V*A(H#iUmc2bt%nCfe@Yf?5G-F^yX3EZ2?CocC0y zxw>`S4MY7&Ca7Xd!bvz(VR!;qdxUbjf`fU@K>X_>%{4Kt4a{?ZL@v6F^4!foS`P(9U6 z@JN!(>}~$m`Ziu$M?@%S<(D3Ai6z||hJTOe&nYq2nWl^RG9~_KRKFWfVF#%W_6Y>!nSn3;E!2H|H&uOEXG0b59Vj}IOk^Ih#iKV`U zG3TEKqNO(9e=DGrlVoDPG3K5(=>2=Qlu>_ zLPJ&oNi$0fU=1*zzmG>Cpq9=tMI;-7_bqOC ztHt3c_>8vMeG3wYfj979eK#pO6a4lv-4P*yfXIL6yD7jCU}|k+Z~UkGR;u&X3+zZ< zv+78;ejq_)z*fpRhsr2C1!1IYDLD*cm0t>CL>YSYrOapdX0PFDwQN4})AC)Xj)a#_ z?XxLcxgyLcmcQ#En*aW!vor5D*)i4a=NZ= zX1kPlK(X6ZKibvNj9xF-s*W*<#Ir{QL|Qqeh2R=>1T~Ib%4%M7Tg=IEE8$4*-C4TQ z*;(vEio@(H&meS}ePgsP+IuWUytX;!_l^z1pSh0{1Iy>n2frvf6Ai5cP~|A}nXv?A zHmU+@HH<$%hFE{y8`ILv*%={EKxne|Rh9?h-d@eh+ZMMJ-IB>qp zT12zQp#$l~o-8B?mQGz!nP)KeqVAYr=+YZ6^P3A)NxNYM5fF*DB^YKeBHXetenKkS zYa)N#w9K<%=)a3e+Nq6WwDCKBR##6M^%YW)GKgN^c7GYwsm~UP8vjy=-}oFJ zzTQgO5ohA9AWMN1K@ONEnq|K>Y}%n-#2%I8dmCDe$aPH5P@l7l64{OTJqM_SM8f_S zF1BKmmZHfCA1pR`Y>VEf{gO!BYmV4n(03@02ZRFhq@Z6f#KMw9D0Q!ogj!s0hvm(s z7<)Cpm{?Ako!2-DJ}xX}t+YN}!h4J&Q<&R&(gwRUCC6`E4JP*#^`iDuD}Rb~EJGz6 zXNaUXlB@9YBUo78=UsgeA85sM!iD_a1drnKq(|Z*WZIdI?uxt7gJ@dY>_Xse6KKQT zvTTH-deQbF+0s3QJ*jE-5C}Sn@-o<8F|kh zeO-1Qt}I5pgZyjvaAmI+%e{0>z)S38`CZ(!wKt|Sw6U@RIKF)RvA)o|>04R;8AN+x zM$JDkBK017ee}bx%O!0L#HYSk2&T;KHc|_?B^jWkIq2eO44dwuYhi=LGif~@8fDVk zH?)InC(NnxO(}>FOH@tSE8Ccur@f<>gabu&FN*_>&1U$re9kwzioX=&YYs*NWk@Xs z9=vj>i&||IqHKBS#ljR3?576)ZI)hAd5OCgzJFrg{9U}IwARYjLxR9^qr?m)4tczA zPdF5MRRH@N25jG{kKmr|kxVp^a<^Bp+dMZ5{gjZxTLGQX@tGYJnK8tXpN{luav~af zVpcGPa7)I^LW^TMtBN&pE^uhMxe+lvSl+`pSO9b137&6r>z_^-3e=tEtT1X1McArR z6d3^^i&|14S!*~C4;L~p6YV1>9I3f(l&`)x=Aa&rU*_*BI{zr>3l|tZQ|rEAdy0*r zB~#k5?a{?1N~L)!tRHSmd`5ic`0Fr%9-&c)z6?=31P~DQ?}o|2#>w8$*xv20qoboT z{BeO3sr6bJ?VCP(w6nZB6j3W3v|kH&tc(Umc4D!W)l{s6Y6?E8{XECq{CoT5{2`JY zX?yu=xHma>ijSU`3FRx>QP0nQquC_UHj!K%@oURww<#-eAsCZz>_II!_RI$czAfi- zC2Hy?WF{5lg5+R*?Z^-t=f#Gyx;k&#!B>!1%}nG7O;?fxzenmxW_=~<&hci=bn-AI zT3ynhmqr;aw@#T_YWg4+k;B9g=0#;^;BRPO@|hjA4>ac>Mmkr(=i#2f6A6)|1(!T3 zJOvyTBJ3R}4akpA8zQuqSL0it5M^>WiHs!YqK76VM{3S`T}o4Usa@zR!CJH4qrKYRqNR3%DkUzHMd!k#JT!v@rX2uU)!vx(Fz|wD|_FnMAF8;6xH8xe*Chf?}Ohe#A|8n>gUT2n%Q8 zI(}D+^iadl$hG|yic)0HS6o~IHJYBbZ14}x;A@7?_?P6PM*ZI%8W2(8?PDZ+5PY7% z<%^~TPx?IE)$|2-wLY=JNiInG#zt>L7jAteREoh=7b*%72FI&d$WQKj)5y|UKRmy$ z1ab<_VcJ5?;mIqFd!zB0FUw~@a)3>aiHU7AEYh=R9eP~s>rfdf3uB{x{LJTG23h(v z>x@nZsq1YKxS4HyCtY0FL;q72F3pcLyUUlQ@ztM6smi57B4L7$a=w8KkaO7it$WcL z?|;TL<>bkzORdeA3>uLOB_cJHJ!|E?D+Q!riXF?nslqZEL%+_F<#aQY8LOlG(A7e7 z8y13SfVfpciB_i0dFh5SoBLU;J=m0e5uui2Qrpp;!dNHan?fM?VHtI+m-@H+cd@4l zapu&9TwhMQ={FaQ*YQU2qM!lc@AZ~it*xiz*#Y00oc;8JpVI1&wCc^mq~A2jI_Q+b zD!FJQ>9#d*}rkk3_msaUoi!mVaKl_h8pfI7E4Kp$dF2EeAf2w^$j z7wnvGq$l`=Icv!r8cS!L>4XaFz;!%Zd zGp>Jo?DHz;G~LMuxPt%zv4Q>+ zuP2D!s7s(BJ9ApIS;xych+cMQ-X^3pf_Cto12aS$TVbObE0DhNcPA2r7X#!-d{bRG z+6QlJ+X)N~Avn^$1VMmf5gP`xb`Y%B;?wzS6^NWf6vrUGTb4ED>E@1I8oy+37dfD% zExPYsv%>&bEwXR`T`pe6x(b+*U!&4;43+WN;aZ88ZyjhcgsyoJF=DgrDeRSk7?7s`dz&@sd zQE6~~dayykoU_A*jITMD-M2HYUSpUSEkb(OxnI(ydbb`tqnLG)&J<93x8gI;HFVe? z5M_30;)h^++q~C;-q?7&s&@=4qN8o|sGb(HA=2H|)EFE_VA}F*$e;Q2PB2%^q^}l8 zvqQXCJvkH(qq*~%*e{*cqL$4UYFMTkvKHk1(P}VlHCXp(1coZOG>fLCJyiwy5bLr@ zm8T!sNCM3BKvgt^R0If0v$B*TvL=yZV=sm7pmvH^r`kk(cY_i(r{!06=EW`TE|X%8 zA{5FDA}gw)1aWl5kO|&Q>KVc2HP9lqQHNLq&#)u5+W-}-t9Pv>@Fl9Bp=s&Cxz~rZ z7jhU?QXDi%qfhXEDVv`&HO3`K`dVm? z3#+|Dht76d`;a1KTZYDg@s91m0r|t2hgK+}a6i~+OmUX}#~TE110SHjE-bJuizkjR z=~38ABm2$5q9i6HDkny7W%TDPrW>wA2w;R08|Ty2FJv(~hLBMwEjj|2hmggI&IK*o zeB>5Lcd=polz7C%*MuoH1WaC+P~`yXzy)pq15;_4pjc?+7so1elerGk;=n>oI!*0z zJj*`cWYZbd&D&?B6$0(z6ZjO9dq7yAwi>N}Xp>ux6c;=A`tqx_BkhL*X&~8XzBeb*VkdG<51&Rl-Aevh2Oas?(Q}w zy8&)(k?Fi>{oMuADXF7x#0tT0#mIUH>0fneI(=p{CK?b>KRXZ*>2F-Jb+ZImJO0Pk zORbiN!@PLxb4CV;aXxj9Tzgeq>EK@w<+HZ}uHhA6L?;hU>AkcI4f}74yZKx{r(!}i%*16@%9)Y0K))ou5y!&32 zwVI$gn_Cn<954vsR9Oi?QNiJJr=PZM?tJ{)VDWdyA_M<0PbB~mfs}z3eT~qJP z_qrcIk+kn-$;TOh$)VU)Rlg>UelB&>tYM$ddoSM0zn;*1qFK4QJf1f=F>YwdQ>Kkn zys$;C2GD~|4<4B@E(w_G^{=pNgXAC24wuNzSjKy=Ju>QprGJcr)CXP%hoZdqwJNtm zeN4X))W!ItFa>gdVZGn{9QKEf8znBh4eCj2?2S4Y7*#86R_6VMM0aO~F z3MM#HK?O5qOto{*%@ehLc=_A->41y9E0?nD9UPC81%GRpO6)sJUAp^OgUoZ(QfYD+ zs{=ZQsg^n!MElS@Ax{gtb9yI-SwYGmButO~=#4ILpqVI+un(o~Iw*bGG{6`}JCBkM zY#cgFJJU~8$5kpy{JBbo<35ocFR!p=wJUT*P$5A^IiY9)Lc*`Lhc7+T1M>We<-FYp zyvDx=8!`kX`a-upD*{>vBX+dv6W$zB$x_@j#*5{wS>d>|ZY0M5R(UVF4>FIKivS(>+T_7V;^4krd`h z-$Kc@eezQubIRc-xouSwW5)4;1^fLTTi*0BXfYjN#&79#l8KVWDUS&QjxQTV;xuX> zUHkz^m~?lBDWR=cMgeZ1A3`4U%vE%S_*Ai6eJgw6>1!aY@WP}|DB)1oLl~oY*-NbM z)Qklos~$C2zJKyBH{Q$;6b1rCZW9&02Eqo3?eho@I*LLLwl>KRzD zRwUqGOY2RV{jK_aKmxT0w2bgf-qOdkK;MARNC++T=Lin7x{&u@<+%{R&N5Fe z&Z_aBMf(=f0!yL%u}fv|SVXFZvk^e7t(!Vz(RB^fd!31cj+~hA$5HC{&%uT?Zq%Ld ztACKOW3cU)jwcudpsX=~#Osu8jWLaOM3>F9Dfx_~`Y+|c>jzs(I}3BxvOuEyl_b8? zIYd+@Hau%}Gc%DkUKnGnb~a6uQ$*s;LsFr5Om1=p5kdQA5S)~iPKCmy5uM)g5J!Lb zSz{hJAdD9_%hsx2{4jD0J286h2c|;`(3>6gL0);6{F|%HdE()HDcFXGaR2Fw5!g=) zcv~1NPHZ`)U}2=|Wvubky88@Lx`oyl*E_OYIj6aA_lWK_%{Qn*Os>GdP|;M6Z7Q7j z84r%C)ksHSsxn(p_jh>#(piHa=!Sv%8wFv27*apT9n1_{>3k~Gl9WZ<9Qm}MpF|vbc#N7cdSbwjs%r*V#T@TujJi%w z1hpaW1F5z_$Mo0(Cc_gB#8({_PH0UzDK!T#5oCZ((TH3#)(U<^7bBwFM#_YYk)34j zA_8SOuMyORRY@4WgFX<_5>F*NV|HO}&r^xc6D)x9iiD6|cJGlbLi_LySf~Q!nHkuZ z)F!F{F;`tvsrO9{^LSQ=y#mP|cu;2(=eOT?Z#Thb#pxY;#;H5uq>-b)85Mo>@3M$a zBeBAYnQHKF$&f~pa!{t`otyB?GGlX?MT?5m-#0h9FDpVdq`+-duFW3?BZFK)6TdYI zfkEH!$@6=!Y+p|%F1VUyNdv*0#QhzH{pFW8W-jdNh_g*Z+NXvy7HCE(Wb{vkX;({KFn;PYG;aB z_$Qao4sk`%YArQiM(`L5VJdoGiG2uU-lkLU23)*2#+nMy~`Rc+$Pg zzi9xSMII--!BUSv6EzV&wIAFSwoal}RzE{d=aOqsM4soRBr~AZan!?CS&h5`dJowV+b-*E%-)3XSt%)zJy3UZR?|Ln z02x9}r(NRIp#?oO3Qyq3m@qX`g_%LV3Xc?6gXK15pD9b_W)(D~o`vhRza8hudd=H@ zVatM6Io6~-5vkglEy7XevJ_S#E?{h|@A!j_(RvG1Zp0?_xE{AS=()L2f!-vP*$4PF zMRpwE;YoH=MpNG2u&~Lbj(ju;RMazvX#C0K*Z5suuMon-;aA4;uc|YZ^O^cfj8tM%gGvtxz!PZ!833|<}{ zWUQ+*^p%f1LHG@P-1CrJ30`x^RlWVOw2^VU&*Kbt_{B5bGf#&JD6f5V_ZYL>n;-6Z zKPf}?2D%Ngv+e0jbmptJ&aN|jo*O&{ZH2+vv_6f3fs&|A|= zS56&LXtT%|7+^dWZzrCH?8T;L%KZlvrpaxh^!!kg0!y9+1ieOj`sl)XWn79u_FK*} zDBABcRVlQeT>(4DG1ju+5nd$3Yi%-TJ<()X6LI&Y1CixhzVQQdn&MhlWbIAwX;w*j zhs$eUqCp)Q-dDwWbk`3{VPY3xMr2Q3(nAGgD+Z%1*io(^V=9$6-bPj5jz^tm`#f(F zEhVq%*g`oDB4;F|N98?*e9JY#x{dNiZ`xblEdaKeRbA%e!al@DaRxHEHefz9dB2eS zWAwe}CVjMd!pm-v<2s8&n2#8Hdq zpD5qN;jV*8s5(hAlUpq(tKa-cPPNLM;S0Jfg@KLiuPZtF<`_l?&B&NI1J$Ew_#t0% z+_n6eQ2-Q@GTphgt-C!2%Y7AORaP+{cZi%#3bR(XAW5u6VZ_0lRHeSXg2a#jeIpA$ z;zO}AE{bxizJZiqnh?32Y?UAv4_R ztrYFh3GYHPAZ5v@!!fjZQs|vqkNQI~O&W;WLV(_Ru{JDQr^$0}@ec*HHhp|)8zGVmqxQ^^f6tgJCjV2ic7UC?TkH3i`fBq~HuS44U^ z&-P$`y16lAbi1rq_iR%Cy3`kzzub1W=fKiNZFM!y*F*V~-W)rK!7nb~mg0!Wv(ua_ zSNBjwLeWIVd5cg>0@_L;|Pq&I?;3i0L zVVwbe0nofpK!Pi8H_k^o{QNEHF4fJEM4NUBdn8YbWyw!oa+iJOp`6D}Yq6iKJY#F+ zfP_ZE-_^u#7kkR(z=1JXKa;)>cmOsR<3pIkOCN&$a%)BOb{G?c_q@N{?ow7_zh58@ zYjWB>E)HOO7Bs@mE#QV4Q4>r`Oe~YKM%=c+)ykQYk%rrvk>UMy4qEjqkVauG+Bp(T z>;`MPY2t^~EZ81%z5DP3pmbY1H}8ihx&V`|X6fk^XvH7z6)Gl2A8N^=gbd3IeoU)O z&D{-5=FPCzDiwh&VX=~ZuA)1=?8qlZP(0V&qU%rgXPVUKgZx$wpCNiU0cbK5oM?RrA z#>l;R$C$$C3wOR(p+iSC-baBu7KN$ng`t9(;kIZnk>tCC%3-&9psZsd>SY*zv@LTs zcH(fj*c8RSi!P;H+f*k$n9>|0opdO!Py2Bj!8$9h z$fd#tOhb*5=YxE91%@o5d)1Fa&c`OeJ%%{o@L5vTi;1xqHDm6tUB8z*!{8) zFsvL26F-;NecgBuYLiLzRI@vnmZ&!tgR|%wNHetgz027YV=$~S5L!B$iuHZ1_hf%L zfa!WU-+XtX;8v+}K&N9vyq7)Ok}Glmii2oEQ9 z+R*vfGVg|**5P9C%a&y0GgwgscuG%%)+J*j*r||02)yeXq_&y1j%Ln)MeDC_ zitXg%z1@s7J5>3YH+9R?Z-KXD_AU*tCOUhyPf_b}wBADtG^Bp~q{|Iwi(OH})iRT? zG~IvOCG7td=Xlx85cX4O(nSYS8|2V(jLWxf!ijUeSQ)aYQ&Ihtq!lqhS)uBL&dsuh zDYT%1AbodN-N~bSFBhq6bw?|9Z8@^yhI&KG0L}xQn~V2AJX~6sly{l)v*K` zWLCUSjv&3HJ2RgOR`A&R3KcQ0fcNYrhpxp}=#Iki)%3=&{0kdLiC7Hb;x8vcW$&F2 zXO>1cA08G@cY9VA5G#autd^JLrOF#2Obebny|6Y^&;zCSmC$jlFUEPKj^}>7i95?8 zH&1I1CQQrbXc0Gf7Ye=MMm6bq;E}a$3b~BmjKs2>)AIy9X|$0#>Una2ul*1sLgBs2 zdmQTWHYZpNk+ZhtGQ)wIFX21SWkl|CKr_SY z4;2VHHkml3lJpegfh{uZ&;xcFk>u}BtA^;)UnMI~TewW;(>j%j~maou_egIzLI8487qacmVZ$By7J-3&tBJ_FEphQO%_%K54D&E9*dgwVoBm{R4Pl+7FxpYU%{Egt`3&Ba z+XF2I)WcE6*5H*lAeTz=aPBKvf^G}O85cBB$P(IJh%>a74&%}YOnYOI4L5xwi z2w=yV6ztWUc%onLSFOSX{6+86?2*!sDmNJoG`}(JJP1T>nI`q+AB&-FKj4Ojsg~ey zK==iVpE+}EYndN)7GT4mcifh$q${Dx)kaIu#Y>QMcjuBu7W(XqwwqcRd9BJ#cwS>2 zrDr(1!QKwZtb#c;d0w>rb}tr}|Jvwebc2+6>96T*VO7h~@j)}t(H z%9q;>AM6|SU@b8WdS4&c&*~o8xG7h~Q6|r?SLp;JRx`C3h?*n&|DfX35v0;`)p8qW zEr4%9&6Cao;ZC0r(~a;kWIrf$k@kniyQ@f>vhc`N(BE?O|CCZ1f@Ggh(5t;Nj3bSaIg9e^;qher)gv^vNOQ$`H zD>a1}qoM(fswI_&M#5Pq=-mftO_%M5eVDgj2nQw82WR#>5Rb0*)>DD~wnHY2Pdq=& z0sLWR3~Lla(7VJk3E4p) zS=YEcydY9#=XC!%$0`}XN4Q@$)!)7};@|9I+v(FA=sW(orS2g4_S=9WWdZF#|ETiV zu&U&c)VH^_3S-Izg^H4;{j`c9Qc8>)_JV4vDk+${3dwQF>RIXvp+yLPZlwQ|y}>K{ zZ1KOC|DzH8-i80z)F&ZgFaTIH80b5g!8jN@64DtvnGn+15ZVH4jZO3cmM}lnYlQSL zwr-AQHr9mpPS(0V_u^l4a)0dz=4x6~`4zgLny8 zByn7rQ0W|amOA^oL^&Kv)+i4)naq~`C!YT5;h}#e{N0Opg)cge-^m02@}hr^m{kHl(B%;(a{wXcAW=y>AU`@YH8@gJ`xYg&Kh~YQcA%}?90x@h5v*~&<#-XA=#4xZ z(yaof*q65%iySNCUkP#-RXoL@Sh;aNCe`Oxl?=`rINuaw(G84TF?Y*Hy%m8%$xeVm z8;`sqU1g4ymHa5guP&QYVRwcQx=b6sTeuC6NfQG(72IVK_gU}5tovK7OCAn4P%tSU zk^;1p;i5L_8ahPKH()9bx#uFr9i2)0>Rbjx~b-d$A$zOyZY zJaq3D&B-ni{mk;!?BrUaNXHcJ;&9>+8Sjk#BwllQRk;{t^f~we{NFwKckM0VZ^qie z-th1G+rMKN{|^}b7m2T~sjZ`~Gr-WvTL16Zf601h%!L$R&YEFyfPi@ZKd`#G0BeAw zuI|ee|8wN0HI{AW*^#`jb?qjHHaeLaXMy&S^fVOoL;^Z?WEi1JJnL%5atM{I9+rXE9E+8io|tCt949GZDlzg0i0RnpWaxzIYO(=pu)Yt?Z` zq&Tx5E+uGw02A-DXXfWOLrDc^MHRlOT=cFG5nL)~n!K7-hY@vF_#kz2j(aF~paM%MDqOBrj#!$}Sx8y|$^5}x!o%FZPT0dw2R*Ko! zGj7~`vdd^Px$C#8{MxgMLdlBzyO4*hb%s?n=T9KlVR+Eirc77xZG?8sAqX^T)h^Cg zcY_6xP<+C3KD5?dH0nRqdJs6(_ z_A^3v(k!3i9ce|R?Gz4KkVQdR&V5*TjD2BqRcd$+^BOrZL3B;GI(;|SwL_FMgwAbY z6=Nzti5gP0pWDdwB$TqMvG+PKgb6RCw~&0H3k zM{g2U62Xzc*;4K(BFH5vN0h;hirDx@jhGR2^@^7Ew3xicx_{`{rlGe|B@ zl6C1ywj!;0cY;F7M?+OQ%QD3IRn!yM(9Nt-_;@b2&4O`Otd!PxkWuMsg;`^;!qI~1 z^$R%rP1A5Awd(5fp3=|tVQ6*Z4?iFY%^gYZ-l=yi^`b4mQC@G;H05xHqem@NE@)t9 zax7XeP$a^e?0=u0is#^FfAN@}ir+OOPuLFx7eLl)u%6MYn#jm3ldNoJ-%PooWac>| zWao`4W9s7&cqyP#E^lWp-BqtX>{WO}zI@5}Pc6+!o7>vMf7EK+jX@ ztg`YxHMI511$09JZd=t_)llBwJj2*+a-GEXE7xLo_4qwTYCMIdqwgeWIC(NX;v<=t zOfO|9(!AFWiw+4a_|R6@*g@|_p-%j!)+VXJ{QGMp;FW}jznhMZg+mPB5{~W8UjS+M zx$JpR%!Nb^BS1pgzHwoGT0Y$hH{KTbcR>9avY;5ml0;sXCh?c{J9&YFqnoAiUlNHS z+7Sk7#W7_CWywJ)*%7s|X}MC#5h^*UTGffkkK?lwe;n2qQppU{6pw?(krnokHHsDX z|1+cb>CUGXIJWmcjsd@9@c;X@j~5SL9;BZi|JO?(|3?06y?AvE^xH~r@#_xe)8AKahxKT4B-r2p3|3cvV2y?=ch;TQi8&DB5hU)@c3O@4h> z;1`+wrJ(vp^51U`yoSHl8~=jKzg$iDNBG~>$FJe9)w;jn)=5>1L z{a?jLuko*?L%;AHzrp`aNc0;0de7h&nuPqHJ^x>J5MCR2T`>MK5KQ^c240nqzra5O z&FeDo7Z`@>x8VPX-@mv&1I+8f?iV-uH{AcV%zJI(b!GO;1T*zNyZ+znv|s$c7aD)m zD?gt<77b6Df9AicUj7=||Hb*Ak3Z^{pU)rIuKxp8|9`J${%z)8>(i@x<_|MJR}gI4 z-+TYs%zvwHe)ZQ+i?7p>UlxZMe`E1)$;oT>>)hfOTbt?s!v4!l1%J``0|8;Y{NTO> z_RALlB@iiG2dl9$8!M-=zVZJ9CNEMY literal 0 HcmV?d00001 diff --git a/tests/unit/models/test_base_open_api_plugin.py b/tests/unit/models/test_base_open_api_plugin.py new file mode 100644 index 00000000..950a3266 --- /dev/null +++ b/tests/unit/models/test_base_open_api_plugin.py @@ -0,0 +1,79 @@ +from typing import Any, Dict, List, Optional, Tuple + +import pytest + +from autogpt.models.base_open_ai_plugin import ( + BaseOpenAIPlugin, + Message, + PromptGenerator, +) + + +class DummyPlugin(BaseOpenAIPlugin): + pass + + +@pytest.fixture +def dummy_plugin(): + manifests_specs_clients = { + "manifest": { + "name_for_model": "Dummy", + "schema_version": "1.0", + "description_for_model": "A dummy plugin for testing purposes", + }, + "client": None, + "openapi_spec": None, + } + return DummyPlugin(manifests_specs_clients) + + +def test_dummy_plugin_inheritance(dummy_plugin): + assert isinstance(dummy_plugin, BaseOpenAIPlugin) + + +def test_dummy_plugin_name(dummy_plugin): + assert dummy_plugin._name == "Dummy" + + +def test_dummy_plugin_version(dummy_plugin): + assert dummy_plugin._version == "1.0" + + +def test_dummy_plugin_description(dummy_plugin): + assert dummy_plugin._description == "A dummy plugin for testing purposes" + + +def test_dummy_plugin_default_methods(dummy_plugin): + assert not dummy_plugin.can_handle_on_response() + assert not dummy_plugin.can_handle_post_prompt() + assert not dummy_plugin.can_handle_on_planning() + assert not dummy_plugin.can_handle_post_planning() + assert not dummy_plugin.can_handle_pre_instruction() + assert not dummy_plugin.can_handle_on_instruction() + assert not dummy_plugin.can_handle_post_instruction() + assert not dummy_plugin.can_handle_pre_command() + assert not dummy_plugin.can_handle_post_command() + assert not dummy_plugin.can_handle_chat_completion(None, None, None, None) + + assert dummy_plugin.on_response("hello") == "hello" + assert dummy_plugin.post_prompt(None) is None + assert dummy_plugin.on_planning(None, None) is None + assert dummy_plugin.post_planning("world") == "world" + pre_instruction = dummy_plugin.pre_instruction( + [{"role": "system", "content": "Beep, bop, boop"}] + ) + assert isinstance(pre_instruction, list) + assert len(pre_instruction) == 1 + assert pre_instruction[0]["role"] == "system" + assert pre_instruction[0]["content"] == "Beep, bop, boop" + assert dummy_plugin.on_instruction(None) is None + assert dummy_plugin.post_instruction("I'm a robot") == "I'm a robot" + pre_command = dummy_plugin.pre_command("evolve", {"continuously": True}) + assert isinstance(pre_command, tuple) + assert len(pre_command) == 2 + assert pre_command[0] == "evolve" + assert pre_command[1]["continuously"] == True + post_command = dummy_plugin.post_command("evolve", "upgraded successfully!") + assert isinstance(post_command, str) + assert post_command == "upgraded successfully!" + assert dummy_plugin.handle_chat_completion(None, None, None, None) is None diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index fea5ebfc..1a36e19b 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -9,16 +9,20 @@ Code Analysis Objective: The objective of the "scrape_text" function is to scrape the text content from -a given URL and return it as a string, after removing any unwanted HTML tags and scripts. +a given URL and return it as a string, after removing any unwanted HTML tags and + scripts. Inputs: - url: a string representing the URL of the webpage to be scraped. Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. +1. Send a GET request to the given URL using the requests library and the user agent + header from the config file. 2. Check if the response contains an HTTP error. If it does, return an error message. -3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags. -4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup. +3. Use BeautifulSoup to parse the HTML content of the response and extract all script + and style tags. +4. Get the text content of the remaining HTML using the get_text() method of + BeautifulSoup. 5. Split the text into lines and then into chunks, removing any extra whitespace. 6. Join the chunks into a single string with newline characters between them. 7. Return the cleaned text. @@ -27,9 +31,12 @@ Outputs: - A string representing the cleaned text content of the webpage. Additional aspects: -- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively. -- The function removes script and style tags from the HTML to avoid including unwanted content in the text output. -- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. +- The function uses the requests library and BeautifulSoup to handle the HTTP request + and HTML parsing, respectively. +- The function removes script and style tags from the HTML to avoid including unwanted + content in the text output. +- The function uses a generator expression to split the text into lines and chunks, + which can improve performance for large amounts of text. """ @@ -40,26 +47,33 @@ class TestScrapeText: expected_text = "This is some sample text" mock_response = mocker.Mock() mock_response.status_code = 200 - mock_response.text = f"

{expected_text}

" + mock_response.text = ( + "

" + f"{expected_text}

" + ) mocker.patch("requests.Session.get", return_value=mock_response) - # Call the function with a valid URL and assert that it returns the expected text + # Call the function with a valid URL and assert that it returns the + # expected text url = "http://www.example.com" assert scrape_text(url) == expected_text - # Tests that the function returns an error message when an invalid or unreachable url is provided. + # Tests that the function returns an error message when an invalid or unreachable + # url is provided. def test_invalid_url(self, mocker): # Mock the requests.get() method to raise an exception mocker.patch( "requests.Session.get", side_effect=requests.exceptions.RequestException ) - # Call the function with an invalid URL and assert that it returns an error message + # Call the function with an invalid URL and assert that it returns an error + # message url = "http://www.invalidurl.com" error_message = scrape_text(url) assert "Error:" in error_message - # Tests that the function returns an empty string when the html page contains no text to be scraped. + # Tests that the function returns an empty string when the html page contains no + # text to be scraped. def test_no_text(self, mocker): # Mock the requests.get() method to return a response with no text mock_response = mocker.Mock() @@ -71,7 +85,8 @@ class TestScrapeText: url = "http://www.example.com" assert scrape_text(url) == "" - # Tests that the function returns an error message when the response status code is an http error (>=400). + # Tests that the function returns an error message when the response status code is + # an http error (>=400). def test_http_error(self, mocker): # Mock the requests.get() method to return a response with a 404 status code mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py new file mode 100644 index 00000000..739e69bb --- /dev/null +++ b/tests/unit/test_plugins.py @@ -0,0 +1,112 @@ +import pytest + +from autogpt.config import Config +from autogpt.plugins import ( + blacklist_whitelist_check, + inspect_zip_for_module, + scan_plugins, +) + +PLUGINS_TEST_DIR = "tests/unit/data/test_plugins" +PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip" +PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__.py" +PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/" + + +def test_inspect_zip_for_module(): + result = inspect_zip_for_module(str(f"{PLUGINS_TEST_DIR}/{PLUGIN_TEST_ZIP_FILE}")) + assert result == PLUGIN_TEST_INIT_PY + + +@pytest.fixture +def mock_config_blacklist_whitelist_check(): + class MockConfig: + plugins_blacklist = ["BadPlugin"] + plugins_whitelist = ["GoodPlugin"] + + return MockConfig() + + +def test_blacklist_whitelist_check_blacklist( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "y") + assert not blacklist_whitelist_check( + "BadPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_whitelist( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "y") + assert blacklist_whitelist_check( + "GoodPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_user_input_yes( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "y") + assert blacklist_whitelist_check( + "UnknownPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_user_input_no( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "n") + assert not blacklist_whitelist_check( + "UnknownPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_user_input_invalid( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "invalid") + assert not blacklist_whitelist_check( + "UnknownPlugin", mock_config_blacklist_whitelist_check + ) + + +@pytest.fixture +def config_with_plugins(): + cfg = Config() + cfg.plugins_dir = PLUGINS_TEST_DIR + cfg.plugins_openai = ["https://weathergpt.vercel.app/"] + return cfg + + +@pytest.fixture +def mock_config_openai_plugin(): + class MockConfig: + plugins_dir = PLUGINS_TEST_DIR + plugins_openai = [PLUGIN_TEST_OPENAI] + plugins_blacklist = ["AutoGPTPVicuna"] + plugins_whitelist = [PLUGIN_TEST_OPENAI] + + return MockConfig() + + +def test_scan_plugins_openai(mock_config_openai_plugin): + result = scan_plugins(mock_config_openai_plugin, debug=True) + assert len(result) == 1 + + +@pytest.fixture +def mock_config_generic_plugin(): + class MockConfig: + plugins_dir = PLUGINS_TEST_DIR + plugins_openai = [] + plugins_blacklist = [] + plugins_whitelist = ["AutoGPTPVicuna"] + + return MockConfig() + + +def test_scan_plugins_generic(mock_config_generic_plugin): + result = scan_plugins(mock_config_generic_plugin, debug=True) + assert len(result) == 1