From 3a80e2f399b3d802f0c962487f8071d97ee72bb1 Mon Sep 17 00:00:00 2001
From: Reinier van der Leer
Date: Tue, 2 May 2023 13:26:30 +0200
Subject: [PATCH] Revert "Revert "Merge branch 'master' into stable""
This reverts commit 999990b614f9c5e32dbfb560ab9516755d212884.
---
.env.template | 7 +
.github/workflows/pr-label.yml | 20 ++
.gitignore | 2 +
.isort.cfg | 10 +
README.md | 31 +-
autogpt/agent/agent.py | 95 ++++---
autogpt/agent/agent_manager.py | 56 +++-
autogpt/app.py | 148 +++-------
autogpt/chat.py | 24 +-
autogpt/cli.py | 30 +-
autogpt/commands/analyze_code.py | 6 +
autogpt/commands/audio_text.py | 41 ++-
autogpt/commands/command.py | 153 ++++++++++
autogpt/commands/execute_code.py | 38 ++-
autogpt/commands/file_operations.py | 17 ++
autogpt/commands/git_operations.py | 20 +-
autogpt/commands/google_search.py | 34 ++-
autogpt/commands/image_gen.py | 3 +-
autogpt/commands/improve_code.py | 6 +
autogpt/commands/twitter.py | 24 +-
autogpt/commands/web_selenium.py | 6 +
autogpt/commands/write_tests.py | 6 +
autogpt/config/ai_config.py | 40 ++-
autogpt/config/config.py | 18 ++
autogpt/llm_utils.py | 63 +++--
autogpt/models/base_open_ai_plugin.py | 199 +++++++++++++
autogpt/plugins.py | 265 ++++++++++++++++++
autogpt/prompts/__init__.py | 0
.../generator.py} | 39 ++-
autogpt/{ => prompts}/prompt.py | 107 +------
autogpt/token_counter.py | 5 +-
autogpt/types/openai.py | 9 +
autogpt/utils.py | 2 +-
plugin.png | Bin 0 -> 33356 bytes
plugins/__PUT_PLUGIN_ZIPS_HERE__ | 0
requirements.txt | 5 +
scripts/__init__.py | 0
tests/mocks/__init__.py | 0
tests/mocks/mock_commands.py | 6 +
tests/test_commands.py | 177 ++++++++++++
tests/test_prompt_generator.py | 3 +-
.../Auto-GPT-Plugin-Test-master.zip | Bin 0 -> 14927 bytes
.../unit/models/test_base_open_api_plugin.py | 79 ++++++
tests/unit/test_browse_scrape_text.py | 41 ++-
tests/unit/test_plugins.py | 112 ++++++++
45 files changed, 1601 insertions(+), 346 deletions(-)
create mode 100644 .isort.cfg
create mode 100644 autogpt/commands/command.py
create mode 100644 autogpt/models/base_open_ai_plugin.py
create mode 100644 autogpt/plugins.py
create mode 100644 autogpt/prompts/__init__.py
rename autogpt/{promptgenerator.py => prompts/generator.py} (78%)
rename autogpt/{ => prompts}/prompt.py (50%)
create mode 100644 autogpt/types/openai.py
create mode 100644 plugin.png
create mode 100644 plugins/__PUT_PLUGIN_ZIPS_HERE__
create mode 100644 scripts/__init__.py
create mode 100644 tests/mocks/__init__.py
create mode 100644 tests/mocks/mock_commands.py
create mode 100644 tests/test_commands.py
create mode 100644 tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip
create mode 100644 tests/unit/models/test_base_open_api_plugin.py
create mode 100644 tests/unit/test_plugins.py
diff --git a/.env.template b/.env.template
index f1b511c2..60edecd6 100644
--- a/.env.template
+++ b/.env.template
@@ -188,3 +188,10 @@ OPENAI_API_KEY=your-openai-api-key
# TW_CONSUMER_SECRET=
# TW_ACCESS_TOKEN=
# TW_ACCESS_TOKEN_SECRET=
+
+################################################################################
+### ALLOWLISTED PLUGINS
+################################################################################
+
+#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
+ALLOWLISTED_PLUGINS=
diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml
index 92c5a66b..f1b200b0 100644
--- a/.github/workflows/pr-label.yml
+++ b/.github/workflows/pr-label.yml
@@ -26,3 +26,23 @@ jobs:
repoToken: "${{ secrets.GITHUB_TOKEN }}"
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
+
+ size:
+ if: ${{ github.event_name == 'pull_request_target' }}
+ permissions:
+ issues: write
+ pull-requests: write
+ runs-on: ubuntu-latest
+ steps:
+ - uses: codelytv/pr-size-labeler@v1.7.0
+ with:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ s_label: "size/s"
+ s_max_size: "10"
+ m_label: "size/m"
+ m_max_size: "50"
+ l_label: "size/l"
+ l_max_size: "200"
+ xl_label: "size/xl"
+ fail_if_xl: "false"
+ github_api_url: "api.github.com"
diff --git a/.gitignore b/.gitignore
index a6b3b80f..0c277d89 100644
--- a/.gitignore
+++ b/.gitignore
@@ -157,5 +157,7 @@ vicuna-*
# mac
.DS_Store
+openai/
+
# news
CURRENT_BULLETIN.md
\ No newline at end of file
diff --git a/.isort.cfg b/.isort.cfg
new file mode 100644
index 00000000..17eab482
--- /dev/null
+++ b/.isort.cfg
@@ -0,0 +1,10 @@
+[settings]
+profile = black
+multi_line_output = 3
+include_trailing_comma = true
+force_grid_wrap = 0
+use_parentheses = true
+ensure_newline_before_comments = true
+line_length = 88
+sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
+skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist
diff --git a/README.md b/README.md
index 6d636c56..3e811a1c 100644
--- a/README.md
+++ b/README.md
@@ -31,14 +31,25 @@ Your support is greatly appreciated
Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.
+
+
+
+

+

+

+
+
+
+
-
+

+
## 🚀 Features
@@ -254,6 +265,22 @@ export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"
export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID"
```
+## Plugins
+
+See https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template for the template of the plugins.
+
+⚠️💀 WARNING 💀⚠️: Review the code of any plugin you use, this allows for any Python to be executed and do malicious things. Like stealing your API keys.
+
+Drop the repo's zipfile in the plugins folder.
+
+
+
+If you add the plugins class name to the `ALLOWLISTED_PLUGINS` in the `.env` you will not be prompted otherwise you'll be warned before loading the plugin:
+
+```
+ALLOWLISTED_PLUGINS=example-plugin1,example-plugin2,example-plugin3
+```
+
## Setting Your Cache Type
By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone.
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index ee7885f8..189338f5 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -19,18 +19,25 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
- system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
- Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
+ system_prompt: The system prompt is the initial prompt that defines everything
+ the AI needs to know to achieve its task successfully.
+ Currently, the dynamic and customizable information in the system prompt are
+ ai_name, description and goals.
- triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
- Determine which next command to use, and respond using the format specified above:
- The triggering prompt is not part of the system prompt because between the system prompt and the triggering
- prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
+ triggering_prompt: The last sentence the AI will see before answering.
+ For Auto-GPT, this prompt is:
+ Determine which next command to use, and respond using the format specified
+ above:
+ The triggering prompt is not part of the system prompt because between the
+ system prompt and the triggering
+ prompt we have contextual information that can distract the AI and make it
+ forget that its goal is to find the next task to achieve.
SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
- The triggering prompt reminds the AI about its short term meta task (defining the next task)
+ The triggering prompt reminds the AI about its short term meta task
+ (defining the next task)
"""
def __init__(
@@ -39,6 +46,8 @@ class Agent:
memory,
full_message_history,
next_action_count,
+ command_registry,
+ config,
system_prompt,
triggering_prompt,
):
@@ -46,6 +55,8 @@ class Agent:
self.memory = memory
self.full_message_history = full_message_history
self.next_action_count = next_action_count
+ self.command_registry = command_registry
+ self.config = config
self.system_prompt = system_prompt
self.triggering_prompt = triggering_prompt
@@ -73,6 +84,7 @@ class Agent:
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat_with_ai(
+ self,
self.system_prompt,
self.triggering_prompt,
self.full_message_history,
@@ -81,6 +93,10 @@ class Agent:
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
+ for plugin in cfg.plugins:
+ if not plugin.can_handle_post_planning():
+ continue
+ assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
# Print Assistant thoughts
if assistant_reply_json != {}:
@@ -89,14 +105,13 @@ class Agent:
try:
print_assistant_thoughts(self.ai_name, assistant_reply_json)
command_name, arguments = get_command(assistant_reply_json)
- # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
if cfg.speak_mode:
say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
- ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
+ # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
logger.typewriter_log(
@@ -168,30 +183,46 @@ class Agent:
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
else:
- result = (
- f"Command {command_name} returned: "
- f"{execute_command(command_name, arguments)}"
+ for plugin in cfg.plugins:
+ if not plugin.can_handle_pre_command():
+ continue
+ command_name, arguments = plugin.pre_command(
+ command_name, arguments
+ )
+ command_result = execute_command(
+ self.command_registry,
+ command_name,
+ arguments,
+ self.config.prompt_generator,
)
+ result = f"Command {command_name} returned: " f"{command_result}"
+
+ for plugin in cfg.plugins:
+ if not plugin.can_handle_post_command():
+ continue
+ result = plugin.post_command(command_name, result)
if self.next_action_count > 0:
self.next_action_count -= 1
-
- memory_to_add = (
- f"Assistant Reply: {assistant_reply} "
- f"\nResult: {result} "
- f"\nHuman Feedback: {user_input} "
- )
-
- self.memory.add(memory_to_add)
-
- # Check if there's a result from the command append it to the message
- # history
- if result is not None:
- self.full_message_history.append(create_chat_message("system", result))
- logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
- else:
- self.full_message_history.append(
- create_chat_message("system", "Unable to execute command")
- )
- logger.typewriter_log(
- "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
+ if command_name != "do_nothing":
+ memory_to_add = (
+ f"Assistant Reply: {assistant_reply} "
+ f"\nResult: {result} "
+ f"\nHuman Feedback: {user_input} "
)
+
+ self.memory.add(memory_to_add)
+
+ # Check if there's a result from the command append it to the message
+ # history
+ if result is not None:
+ self.full_message_history.append(
+ create_chat_message("system", result)
+ )
+ logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
+ else:
+ self.full_message_history.append(
+ create_chat_message("system", "Unable to execute command")
+ )
+ logger.typewriter_log(
+ "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
+ )
diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py
index 898767a4..9a62ef61 100644
--- a/autogpt/agent/agent_manager.py
+++ b/autogpt/agent/agent_manager.py
@@ -1,10 +1,11 @@
"""Agent manager for managing GPT agents"""
from __future__ import annotations
-from typing import Union
+from typing import List, Union
-from autogpt.config.config import Singleton
+from autogpt.config.config import Config, Singleton
from autogpt.llm_utils import create_chat_completion
+from autogpt.types.openai import Message
class AgentManager(metaclass=Singleton):
@@ -13,6 +14,7 @@ class AgentManager(metaclass=Singleton):
def __init__(self):
self.next_key = 0
self.agents = {} # key, (task, full_message_history, model)
+ self.cfg = Config()
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
@@ -28,19 +30,32 @@ class AgentManager(metaclass=Singleton):
Returns:
The key of the new agent
"""
- messages = [
+ messages: List[Message] = [
{"role": "user", "content": prompt},
]
-
+ for plugin in self.cfg.plugins:
+ if not plugin.can_handle_pre_instruction():
+ continue
+ if plugin_messages := plugin.pre_instruction(messages):
+ messages.extend(iter(plugin_messages))
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
- # Update full message history
messages.append({"role": "assistant", "content": agent_reply})
+ plugins_reply = ""
+ for i, plugin in enumerate(self.cfg.plugins):
+ if not plugin.can_handle_on_instruction():
+ continue
+ if plugin_result := plugin.on_instruction(messages):
+ sep = "\n" if i else ""
+ plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
+
+ if plugins_reply and plugins_reply != "":
+ messages.append({"role": "assistant", "content": plugins_reply})
key = self.next_key
# This is done instead of len(agents) to make keys unique even if agents
# are deleted
@@ -48,6 +63,11 @@ class AgentManager(metaclass=Singleton):
self.agents[key] = (task, messages, model)
+ for plugin in self.cfg.plugins:
+ if not plugin.can_handle_post_instruction():
+ continue
+ agent_reply = plugin.post_instruction(agent_reply)
+
return key, agent_reply
def message_agent(self, key: str | int, message: str) -> str:
@@ -65,15 +85,37 @@ class AgentManager(metaclass=Singleton):
# Add user message to message history before sending to agent
messages.append({"role": "user", "content": message})
+ for plugin in self.cfg.plugins:
+ if not plugin.can_handle_pre_instruction():
+ continue
+ if plugin_messages := plugin.pre_instruction(messages):
+ for plugin_message in plugin_messages:
+ messages.append(plugin_message)
+
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
- # Update full message history
messages.append({"role": "assistant", "content": agent_reply})
+ plugins_reply = agent_reply
+ for i, plugin in enumerate(self.cfg.plugins):
+ if not plugin.can_handle_on_instruction():
+ continue
+ if plugin_result := plugin.on_instruction(messages):
+ sep = "\n" if i else ""
+ plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
+ # Update full message history
+ if plugins_reply and plugins_reply != "":
+ messages.append({"role": "assistant", "content": plugins_reply})
+
+ for plugin in self.cfg.plugins:
+ if not plugin.can_handle_post_instruction():
+ continue
+ agent_reply = plugin.post_instruction(agent_reply)
+
return agent_reply
def list_agents(self) -> list[tuple[str | int, str]]:
@@ -86,7 +128,7 @@ class AgentManager(metaclass=Singleton):
# Return a list of agent keys and their tasks
return [(key, task) for key, (task, _, _) in self.agents.items()]
- def delete_agent(self, key: Union[str, int]) -> bool:
+ def delete_agent(self, key: str | int) -> bool:
"""Delete an agent from the agent manager
Args:
diff --git a/autogpt/app.py b/autogpt/app.py
index 58d9f716..cf8e29a3 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -3,33 +3,12 @@ import json
from typing import Dict, List, NoReturn, Union
from autogpt.agent.agent_manager import AgentManager
-from autogpt.commands.analyze_code import analyze_code
-from autogpt.commands.audio_text import read_audio_from_file
-from autogpt.commands.execute_code import (
- execute_python_file,
- execute_shell,
- execute_shell_popen,
-)
-from autogpt.commands.file_operations import (
- append_to_file,
- delete_file,
- download_file,
- read_file,
- search_files,
- write_to_file,
-)
-from autogpt.commands.git_operations import clone_repository
-from autogpt.commands.google_search import google_official_search, google_search
-from autogpt.commands.image_gen import generate_image
-from autogpt.commands.improve_code import improve_code
-from autogpt.commands.twitter import send_tweet
+from autogpt.commands.command import CommandRegistry, command
from autogpt.commands.web_requests import scrape_links, scrape_text
-from autogpt.commands.web_selenium import browse_website
-from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
-from autogpt.json_utils.json_fix_llm import fix_and_parse_json
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
+from autogpt.prompts.generator import PromptGenerator
from autogpt.speech import say_text
CFG = Config()
@@ -108,7 +87,12 @@ def map_command_synonyms(command_name: str):
return command_name
-def execute_command(command_name: str, arguments):
+def execute_command(
+ command_registry: CommandRegistry,
+ command_name: str,
+ arguments,
+ prompt: PromptGenerator,
+):
"""Execute the command and return the result
Args:
@@ -119,105 +103,29 @@ def execute_command(command_name: str, arguments):
str: The result of the command
"""
try:
+ cmd = command_registry.commands.get(command_name)
+
+ # If the command is found, call it with the provided arguments
+ if cmd:
+ return cmd(**arguments)
+
+ # TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name.lower())
- if command_name == "google":
- # Check if the Google API key is set and use the official search method
- # If the API key is not set or has only whitespaces, use the unofficial
- # search method
- key = CFG.google_api_key
- if key and key.strip() and key != "your-google-api-key":
- google_result = google_official_search(arguments["input"])
- return google_result
- else:
- google_result = google_search(arguments["input"])
- # google_result can be a list or a string depending on the search results
- if isinstance(google_result, list):
- safe_message = [
- google_result_single.encode("utf-8", "ignore")
- for google_result_single in google_result
- ]
- else:
- safe_message = google_result.encode("utf-8", "ignore")
+ if command_name == "memory_add":
+ return get_memory(CFG).add(arguments["string"])
- return safe_message.decode("utf-8")
- elif command_name == "memory_add":
- memory = get_memory(CFG)
- return memory.add(arguments["string"])
- elif command_name == "start_agent":
- return start_agent(
- arguments["name"], arguments["task"], arguments["prompt"]
- )
- elif command_name == "message_agent":
- return message_agent(arguments["key"], arguments["message"])
- elif command_name == "list_agents":
- return list_agents()
- elif command_name == "delete_agent":
- return delete_agent(arguments["key"])
- elif command_name == "get_text_summary":
- return get_text_summary(arguments["url"], arguments["question"])
- elif command_name == "get_hyperlinks":
- return get_hyperlinks(arguments["url"])
- elif command_name == "clone_repository":
- return clone_repository(
- arguments["repository_url"], arguments["clone_path"]
- )
- elif command_name == "read_file":
- return read_file(arguments["file"])
- elif command_name == "write_to_file":
- return write_to_file(arguments["file"], arguments["text"])
- elif command_name == "append_to_file":
- return append_to_file(arguments["file"], arguments["text"])
- elif command_name == "delete_file":
- return delete_file(arguments["file"])
- elif command_name == "search_files":
- return search_files(arguments["directory"])
- elif command_name == "download_file":
- if not CFG.allow_downloads:
- return "Error: You do not have user authorization to download files locally."
- return download_file(arguments["url"], arguments["file"])
- elif command_name == "browse_website":
- return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
- # filepath, write your code to file and try again"
- elif command_name == "analyze_code":
- return analyze_code(arguments["code"])
- elif command_name == "improve_code":
- return improve_code(arguments["suggestions"], arguments["code"])
- elif command_name == "write_tests":
- return write_tests(arguments["code"], arguments.get("focus"))
- elif command_name == "execute_python_file": # Add this command
- return execute_python_file(arguments["file"])
- elif command_name == "execute_shell":
- if CFG.execute_local_commands:
- return execute_shell(arguments["command_line"])
- else:
- return (
- "You are not allowed to run local shell commands. To execute"
- " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
- "in your config. Do not attempt to bypass the restriction."
- )
- elif command_name == "execute_shell_popen":
- if CFG.execute_local_commands:
- return execute_shell_popen(arguments["command_line"])
- else:
- return (
- "You are not allowed to run local shell commands. To execute"
- " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
- "in your config. Do not attempt to bypass the restriction."
- )
- elif command_name == "read_audio_from_file":
- return read_audio_from_file(arguments["file"])
- elif command_name == "generate_image":
- return generate_image(arguments["prompt"])
- elif command_name == "send_tweet":
- return send_tweet(arguments["text"])
+ # filepath, write your code to file and try again
elif command_name == "do_nothing":
return "No action performed."
elif command_name == "task_complete":
shutdown()
else:
+ for command in prompt.commands:
+ if command_name == command["label"] or command_name == command["name"]:
+ return command["function"](*arguments.values())
return (
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
" list for available commands and only respond in the specified JSON"
@@ -227,6 +135,9 @@ def execute_command(command_name: str, arguments):
return f"Error: {str(e)}"
+@command(
+ "get_text_summary", "Get text summary", '"url": "
", "question": ""'
+)
def get_text_summary(url: str, question: str) -> str:
"""Return the results of a Google search
@@ -242,6 +153,7 @@ def get_text_summary(url: str, question: str) -> str:
return f""" "Result" : {summary}"""
+@command("get_hyperlinks", "Get text summary", '"url": ""')
def get_hyperlinks(url: str) -> Union[str, List[str]]:
"""Return the results of a Google search
@@ -260,6 +172,11 @@ def shutdown() -> NoReturn:
quit()
+@command(
+ "start_agent",
+ "Start GPT Agent",
+ '"name": "", "task": "", "prompt": ""',
+)
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
"""Start an agent with a given name, task, and prompt
@@ -292,6 +209,7 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
return f"Agent {name} created with key {key}. First response: {agent_response}"
+@command("message_agent", "Message GPT Agent", '"key": "", "message": ""')
def message_agent(key: str, message: str) -> str:
"""Message an agent with a given key and message"""
# Check if the key is a valid integer
@@ -306,7 +224,8 @@ def message_agent(key: str, message: str) -> str:
return agent_response
-def list_agents():
+@command("list_agents", "List GPT Agents", "")
+def list_agents() -> str:
"""List all agents
Returns:
@@ -317,6 +236,7 @@ def list_agents():
)
+@command("delete_agent", "Delete GPT Agent", '"key": ""')
def delete_agent(key: str) -> str:
"""Delete an agent with a given key
diff --git a/autogpt/chat.py b/autogpt/chat.py
index 1f6bca96..f9fc9471 100644
--- a/autogpt/chat.py
+++ b/autogpt/chat.py
@@ -6,11 +6,12 @@ from autogpt import token_counter
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
from autogpt.logs import logger
+from autogpt.types.openai import Message
cfg = Config()
-def create_chat_message(role, content):
+def create_chat_message(role, content) -> Message:
"""
Create a chat message with the given role and content.
@@ -51,7 +52,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model):
# TODO: Change debug from hardcode to argument
def chat_with_ai(
- prompt, user_input, full_message_history, permanent_memory, token_limit
+ agent, prompt, user_input, full_message_history, permanent_memory, token_limit
):
"""Interact with the OpenAI API, sending the prompt, user input, message history,
and permanent memory."""
@@ -135,6 +136,25 @@ def chat_with_ai(
# Append user input, the length of this is accounted for above
current_context.extend([create_chat_message("user", user_input)])
+ plugin_count = len(cfg.plugins)
+ for i, plugin in enumerate(cfg.plugins):
+ if not plugin.can_handle_on_planning():
+ continue
+ plugin_response = plugin.on_planning(
+ agent.prompt_generator, current_context
+ )
+ if not plugin_response or plugin_response == "":
+ continue
+ tokens_to_add = token_counter.count_message_tokens(
+ [create_chat_message("system", plugin_response)], model
+ )
+ if current_tokens_used + tokens_to_add > send_token_limit:
+ if cfg.debug_mode:
+ print("Plugin response too long, skipping:", plugin_response)
+ print("Plugins remaining at stop:", plugin_count - i)
+ break
+ current_context.append(create_chat_message("system", plugin_response))
+
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
# assert tokens_remaining >= 0, "Tokens remaining is negative.
diff --git a/autogpt/cli.py b/autogpt/cli.py
index 6fe9ecbb..51a946a7 100644
--- a/autogpt/cli.py
+++ b/autogpt/cli.py
@@ -75,11 +75,13 @@ def main(
from colorama import Fore
from autogpt.agent.agent import Agent
+ from autogpt.commands.command import CommandRegistry
from autogpt.config import Config, check_openai_api_key
from autogpt.configurator import create_config
from autogpt.logs import logger
from autogpt.memory import get_memory
- from autogpt.prompt import construct_prompt
+ from autogpt.plugins import scan_plugins
+ from autogpt.prompts.prompt import construct_main_ai_config
from autogpt.utils import get_current_git_branch, get_latest_bulletin
if ctx.invoked_subcommand is None:
@@ -123,7 +125,26 @@ def main(
"parts of Auto-GPT with this version. "
"Please consider upgrading to Python 3.10 or higher.",
)
- system_prompt = construct_prompt()
+
+ cfg = Config()
+ cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
+ # Create a CommandRegistry instance and scan default folder
+ command_registry = CommandRegistry()
+ command_registry.import_commands("autogpt.commands.analyze_code")
+ command_registry.import_commands("autogpt.commands.audio_text")
+ command_registry.import_commands("autogpt.commands.execute_code")
+ command_registry.import_commands("autogpt.commands.file_operations")
+ command_registry.import_commands("autogpt.commands.git_operations")
+ command_registry.import_commands("autogpt.commands.google_search")
+ command_registry.import_commands("autogpt.commands.image_gen")
+ command_registry.import_commands("autogpt.commands.improve_code")
+ command_registry.import_commands("autogpt.commands.twitter")
+ command_registry.import_commands("autogpt.commands.web_selenium")
+ command_registry.import_commands("autogpt.commands.write_tests")
+ command_registry.import_commands("autogpt.app")
+ ai_name = ""
+ ai_config = construct_main_ai_config()
+ ai_config.command_registry = command_registry
# print(prompt)
# Initialize variables
full_message_history = []
@@ -140,11 +161,16 @@ def main(
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
+ system_prompt = ai_config.construct_full_prompt()
+ if cfg.debug_mode:
+ logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
agent = Agent(
ai_name=ai_name,
memory=memory,
full_message_history=full_message_history,
next_action_count=next_action_count,
+ command_registry=command_registry,
+ config=ai_config,
system_prompt=system_prompt,
triggering_prompt=triggering_prompt,
)
diff --git a/autogpt/commands/analyze_code.py b/autogpt/commands/analyze_code.py
index e02ea4c5..b87d73e1 100644
--- a/autogpt/commands/analyze_code.py
+++ b/autogpt/commands/analyze_code.py
@@ -1,9 +1,15 @@
"""Code evaluation module."""
from __future__ import annotations
+from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
+@command(
+ "analyze_code",
+ "Analyze Code",
+ '"code": ""',
+)
def analyze_code(code: str) -> list[str]:
"""
A function that takes in a string and returns a response from create chat
diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py
index cae32d4e..b409fefd 100644
--- a/autogpt/commands/audio_text.py
+++ b/autogpt/commands/audio_text.py
@@ -1,24 +1,51 @@
+"""Commands for converting audio to text."""
import json
import requests
+from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
-cfg = Config()
+CFG = Config()
-def read_audio_from_file(audio_path):
- audio_path = path_in_workspace(audio_path)
+@command(
+ "read_audio_from_file",
+ "Convert Audio to text",
+ '"filename": ""',
+ CFG.huggingface_audio_to_text_model,
+ "Configure huggingface_audio_to_text_model.",
+)
+def read_audio_from_file(filename: str) -> str:
+ """
+ Convert audio to text.
+
+ Args:
+ audio_path (str): The path to the audio file
+
+ Returns:
+ str: The text from the audio
+ """
+ audio_path = path_in_workspace(filename)
with open(audio_path, "rb") as audio_file:
audio = audio_file.read()
return read_audio(audio)
-def read_audio(audio):
- model = cfg.huggingface_audio_to_text_model
+def read_audio(audio: bytes) -> str:
+ """
+ Convert audio to text.
+
+ Args:
+ audio (bytes): The audio to convert
+
+ Returns:
+ str: The text from the audio
+ """
+ model = CFG.huggingface_audio_to_text_model
api_url = f"https://api-inference.huggingface.co/models/{model}"
- api_token = cfg.huggingface_api_token
+ api_token = CFG.huggingface_api_token
headers = {"Authorization": f"Bearer {api_token}"}
if api_token is None:
@@ -33,4 +60,4 @@ def read_audio(audio):
)
text = json.loads(response.content.decode("utf-8"))["text"]
- return "The audio says: " + text
+ return f"The audio says: {text}"
diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py
new file mode 100644
index 00000000..e97af008
--- /dev/null
+++ b/autogpt/commands/command.py
@@ -0,0 +1,153 @@
+import importlib
+import inspect
+from typing import Any, Callable, Optional
+
+# Unique identifier for auto-gpt commands
+AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
+
+
+class Command:
+ """A class representing a command.
+
+ Attributes:
+ name (str): The name of the command.
+ description (str): A brief description of what the command does.
+ signature (str): The signature of the function that the command executes. Defaults to None.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ description: str,
+ method: Callable[..., Any],
+ signature: str = "",
+ enabled: bool = True,
+ disabled_reason: Optional[str] = None,
+ ):
+ self.name = name
+ self.description = description
+ self.method = method
+ self.signature = signature if signature else str(inspect.signature(self.method))
+ self.enabled = enabled
+ self.disabled_reason = disabled_reason
+
+ def __call__(self, *args, **kwargs) -> Any:
+ if not self.enabled:
+ return f"Command '{self.name}' is disabled: {self.disabled_reason}"
+ return self.method(*args, **kwargs)
+
+ def __str__(self) -> str:
+ return f"{self.name}: {self.description}, args: {self.signature}"
+
+
+class CommandRegistry:
+ """
+ The CommandRegistry class is a manager for a collection of Command objects.
+ It allows the registration, modification, and retrieval of Command objects,
+ as well as the scanning and loading of command plugins from a specified
+ directory.
+ """
+
+ def __init__(self):
+ self.commands = {}
+
+ def _import_module(self, module_name: str) -> Any:
+ return importlib.import_module(module_name)
+
+ def _reload_module(self, module: Any) -> Any:
+ return importlib.reload(module)
+
+ def register(self, cmd: Command) -> None:
+ self.commands[cmd.name] = cmd
+
+ def unregister(self, command_name: str):
+ if command_name in self.commands:
+ del self.commands[command_name]
+ else:
+ raise KeyError(f"Command '{command_name}' not found in registry.")
+
+ def reload_commands(self) -> None:
+ """Reloads all loaded command plugins."""
+ for cmd_name in self.commands:
+ cmd = self.commands[cmd_name]
+ module = self._import_module(cmd.__module__)
+ reloaded_module = self._reload_module(module)
+ if hasattr(reloaded_module, "register"):
+ reloaded_module.register(self)
+
+ def get_command(self, name: str) -> Callable[..., Any]:
+ return self.commands[name]
+
+ def call(self, command_name: str, **kwargs) -> Any:
+ if command_name not in self.commands:
+ raise KeyError(f"Command '{command_name}' not found in registry.")
+ command = self.commands[command_name]
+ return command(**kwargs)
+
+ def command_prompt(self) -> str:
+ """
+ Returns a string representation of all registered `Command` objects for use in a prompt
+ """
+ commands_list = [
+ f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
+ ]
+ return "\n".join(commands_list)
+
+ def import_commands(self, module_name: str) -> None:
+ """
+ Imports the specified Python module containing command plugins.
+
+ This method imports the associated module and registers any functions or
+ classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
+ as `Command` objects. The registered `Command` objects are then added to the
+ `commands` dictionary of the `CommandRegistry` object.
+
+ Args:
+ module_name (str): The name of the module to import for command plugins.
+ """
+
+ module = importlib.import_module(module_name)
+
+ for attr_name in dir(module):
+ attr = getattr(module, attr_name)
+ # Register decorated functions
+ if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
+ attr, AUTO_GPT_COMMAND_IDENTIFIER
+ ):
+ self.register(attr.command)
+ # Register command classes
+ elif (
+ inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
+ ):
+ cmd_instance = attr()
+ self.register(cmd_instance)
+
+
+def command(
+ name: str,
+ description: str,
+ signature: str = "",
+ enabled: bool = True,
+ disabled_reason: Optional[str] = None,
+) -> Callable[..., Any]:
+ """The command decorator is used to create Command objects from ordinary functions."""
+
+ def decorator(func: Callable[..., Any]) -> Command:
+ cmd = Command(
+ name=name,
+ description=description,
+ method=func,
+ signature=signature,
+ enabled=enabled,
+ disabled_reason=disabled_reason,
+ )
+
+ def wrapper(*args, **kwargs) -> Any:
+ return func(*args, **kwargs)
+
+ wrapper.command = cmd
+
+ setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
+ return wrapper
+
+ return decorator
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
index 11266f85..ff35d428 100644
--- a/autogpt/commands/execute_code.py
+++ b/autogpt/commands/execute_code.py
@@ -5,19 +5,24 @@ import subprocess
import docker
from docker.errors import ImageNotFound
+from autogpt.commands.command import command
+from autogpt.config import Config
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
+CFG = Config()
-def execute_python_file(file: str) -> str:
+
+@command("execute_python_file", "Execute Python File", '"filename": ""')
+def execute_python_file(filename: str) -> str:
"""Execute a Python file in a Docker container and return the output
Args:
- file (str): The name of the file to execute
+ filename (str): The name of the file to execute
Returns:
str: The output of the file
"""
-
+ file = filename
print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'")
if not file.endswith(".py"):
@@ -94,6 +99,15 @@ def execute_python_file(file: str) -> str:
return f"Error: {str(e)}"
+@command(
+ "execute_shell",
+ "Execute Shell Command, non-interactive commands only",
+ '"command_line": ""',
+ CFG.execute_local_commands,
+ "You are not allowed to run local shell commands. To execute"
+ " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
+ "in your config. Do not attempt to bypass the restriction.",
+)
def execute_shell(command_line: str) -> str:
"""Execute a shell command and return the output
@@ -103,6 +117,13 @@ def execute_shell(command_line: str) -> str:
Returns:
str: The output of the command
"""
+
+ if not CFG.execute_local_commands:
+ return (
+ "You are not allowed to run local shell commands. To execute"
+ " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
+ "in your config. Do not attempt to bypass the restriction."
+ )
current_dir = os.getcwd()
# Change dir into workspace if necessary
if str(WORKSPACE_PATH) not in current_dir:
@@ -117,9 +138,16 @@ def execute_shell(command_line: str) -> str:
os.chdir(current_dir)
- return output
-
+@command(
+ "execute_shell_popen",
+ "Execute Shell Command, non-interactive commands only",
+ '"command_line": ""',
+ CFG.execute_local_commands,
+ "You are not allowed to run local shell commands. To execute"
+ " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
+ "in your config. Do not attempt to bypass the restriction.",
+)
def execute_shell_popen(command_line) -> str:
"""Execute a shell command with Popen and returns an english description
of the event and the process id
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index ad145ec9..b73fb987 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -9,10 +9,13 @@ import requests
from colorama import Back, Fore
from requests.adapters import HTTPAdapter, Retry
+from autogpt.commands.command import command
+from autogpt.config import Config
from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
+CFG = Config()
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
@@ -81,6 +84,7 @@ def split_file(
start += max_length - overlap
+@command("read_file", "Read file", '"filename": ""')
def read_file(filename: str) -> str:
"""Read a file and return the contents
@@ -133,6 +137,7 @@ def ingest_file(
print(f"Error while ingesting file '{filename}': {str(e)}")
+@command("write_to_file", "Write to file", '"filename": "", "text": ""')
def write_to_file(filename: str, text: str) -> str:
"""Write text to a file
@@ -158,6 +163,9 @@ def write_to_file(filename: str, text: str) -> str:
return f"Error: {str(e)}"
+@command(
+ "append_to_file", "Append to file", '"filename": "", "text": ""'
+)
def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str:
"""Append text to a file
@@ -181,6 +189,7 @@ def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str:
return f"Error: {str(e)}"
+@command("delete_file", "Delete file", '"filename": ""')
def delete_file(filename: str) -> str:
"""Delete a file
@@ -201,6 +210,7 @@ def delete_file(filename: str) -> str:
return f"Error: {str(e)}"
+@command("search_files", "Search Files", '"directory": ""')
def search_files(directory: str) -> list[str]:
"""Search for files in a directory
@@ -227,6 +237,13 @@ def search_files(directory: str) -> list[str]:
return found_files
+@command(
+ "download_file",
+ "Search Files",
+ '"url": "", "filename": ""',
+ CFG.allow_downloads,
+ "Error: You do not have user authorization to download files locally.",
+)
def download_file(url, filename):
"""Downloads a file
Args:
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
index 028f3b8d..1fb99e5b 100644
--- a/autogpt/commands/git_operations.py
+++ b/autogpt/commands/git_operations.py
@@ -1,26 +1,34 @@
"""Git operations for autogpt"""
-import git
+from git.repo import Repo
+from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
CFG = Config()
-def clone_repository(repo_url: str, clone_path: str) -> str:
+@command(
+ "clone_repository",
+ "Clone Repositoryy",
+ '"repository_url": "", "clone_path": ""',
+ CFG.github_username and CFG.github_api_key,
+ "Configure github_username and github_api_key.",
+)
+def clone_repository(repository_url: str, clone_path: str) -> str:
"""Clone a GitHub repository locally
Args:
- repo_url (str): The URL of the repository to clone
+ repository_url (str): The URL of the repository to clone
clone_path (str): The path to clone the repository to
Returns:
str: The result of the clone operation"""
- split_url = repo_url.split("//")
+ split_url = repository_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
safe_clone_path = path_in_workspace(clone_path)
try:
- git.Repo.clone_from(auth_repo_url, safe_clone_path)
- return f"""Cloned {repo_url} to {safe_clone_path}"""
+ Repo.clone_from(auth_repo_url, safe_clone_path)
+ return f"""Cloned {repository_url} to {safe_clone_path}"""
except Exception as e:
return f"Error: {str(e)}"
diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py
index 7d38ce75..fcc1a9f4 100644
--- a/autogpt/commands/google_search.py
+++ b/autogpt/commands/google_search.py
@@ -5,11 +5,13 @@ import json
from duckduckgo_search import ddg
+from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
+@command("google", "Google Search", '"query": ""', not CFG.google_api_key)
def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a Google search
@@ -31,9 +33,17 @@ def google_search(query: str, num_results: int = 8) -> str:
for j in results:
search_results.append(j)
- return json.dumps(search_results, ensure_ascii=False, indent=4)
+ results = json.dumps(search_results, ensure_ascii=False, indent=4)
+ return safe_google_results(results)
+@command(
+ "google",
+ "Google Search",
+ '"query": ""',
+ bool(CFG.google_api_key),
+ "Configure google_api_key.",
+)
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
"""Return the results of a Google search using the official Google API
@@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
return "Error: The provided Google API key is invalid or missing."
else:
return f"Error: {e}"
+ # google_result can be a list or a string depending on the search results
# Return the list of search result URLs
- return search_results_links
+ return safe_google_results(search_results_links)
+
+
+def safe_google_results(results: str | list) -> str:
+ """
+ Return the results of a google search in a safe format.
+
+ Args:
+ results (str | list): The search results.
+
+ Returns:
+ str: The results of the search.
+ """
+ if isinstance(results, list):
+ safe_message = json.dumps(
+ [result.enocde("utf-8", "ignore") for result in results]
+ )
+ else:
+ safe_message = results.encode("utf-8", "ignore").decode("utf-8")
+ return safe_message
diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py
index 0809fcdd..60cdaec0 100644
--- a/autogpt/commands/image_gen.py
+++ b/autogpt/commands/image_gen.py
@@ -1,6 +1,5 @@
""" Image Generation Module for AutoGPT."""
import io
-import os.path
import uuid
from base64 import b64decode
@@ -8,12 +7,14 @@ import openai
import requests
from PIL import Image
+from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
CFG = Config()
+@command("generate_image", "Generate Image", '"prompt": ""', CFG.image_provider)
def generate_image(prompt: str, size: int = 256) -> str:
"""Generate an image from a prompt.
diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py
index e3440d8b..41a369b4 100644
--- a/autogpt/commands/improve_code.py
+++ b/autogpt/commands/improve_code.py
@@ -2,9 +2,15 @@ from __future__ import annotations
import json
+from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
+@command(
+ "improve_code",
+ "Get Improved Code",
+ '"suggestions": "", "code": ""',
+)
def improve_code(suggestions: list[str], code: str) -> str:
"""
A function that takes in code and suggestions and returns a response from create
diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py
index 3eaed36e..f0502271 100644
--- a/autogpt/commands/twitter.py
+++ b/autogpt/commands/twitter.py
@@ -1,12 +1,30 @@
+"""A module that contains a command to send a tweet."""
import os
import tweepy
from dotenv import load_dotenv
+from autogpt.commands.command import command
+
load_dotenv()
-def send_tweet(tweet_text):
+@command(
+ "send_tweet",
+ "Send Tweet",
+ '"tweet_text": ""',
+)
+def send_tweet(tweet_text: str) -> str:
+ """
+ A function that takes in a string and returns a response from create chat
+ completion api call.
+
+ Args:
+ tweet_text (str): Text to be tweeted.
+
+ Returns:
+ A result from sending the tweet.
+ """
consumer_key = os.environ.get("TW_CONSUMER_KEY")
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
access_token = os.environ.get("TW_ACCESS_TOKEN")
@@ -21,6 +39,6 @@ def send_tweet(tweet_text):
# Send tweet
try:
api.update_status(tweet_text)
- print("Tweet sent successfully!")
+ return "Tweet sent successfully!"
except tweepy.TweepyException as e:
- print("Error sending tweet: {}".format(e.reason))
+ return f"Error sending tweet: {e.reason}"
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index 11bdfeb1..e0e0d70a 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -18,6 +18,7 @@ from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
import autogpt.processing.text as summary
+from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
@@ -25,6 +26,11 @@ FILE_DIR = Path(__file__).parent.parent
CFG = Config()
+@command(
+ "browse_website",
+ "Browse Website",
+ '"url": "", "question": ""',
+)
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
"""Browse a website and return the answer and links to the user
diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py
index 35a08653..91cd9304 100644
--- a/autogpt/commands/write_tests.py
+++ b/autogpt/commands/write_tests.py
@@ -3,9 +3,15 @@ from __future__ import annotations
import json
+from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
+@command(
+ "write_tests",
+ "Write Tests",
+ '"code": "", "focus": ""',
+)
def write_tests(code: str, focus: list[str]) -> str:
"""
A function that takes in code and focus topics and returns a response from create
diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py
index d50c30be..1e48ab4d 100644
--- a/autogpt/config/ai_config.py
+++ b/autogpt/config/ai_config.py
@@ -5,10 +5,16 @@ A module that contains the AIConfig class object that contains the configuration
from __future__ import annotations
import os
-from typing import Type
+from pathlib import Path
+from typing import Optional, Type
import yaml
+from autogpt.prompts.generator import PromptGenerator
+
+# Soon this will go in a folder where it remembers more stuff about the run(s)
+SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
+
class AIConfig:
"""
@@ -38,9 +44,8 @@ class AIConfig:
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
-
- # Soon this will go in a folder where it remembers more stuff about the run(s)
- SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
+ self.prompt_generator = None
+ self.command_registry = None
@staticmethod
def load(config_file: str = SAVE_FILE) -> "AIConfig":
@@ -89,7 +94,9 @@ class AIConfig:
with open(config_file, "w", encoding="utf-8") as file:
yaml.dump(config, file, allow_unicode=True)
- def construct_full_prompt(self) -> str:
+ def construct_full_prompt(
+ self, prompt_generator: Optional[PromptGenerator] = None
+ ) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
@@ -108,14 +115,25 @@ class AIConfig:
""
)
- from autogpt.prompt import get_prompt
+ from autogpt.config import Config
+ from autogpt.prompts.prompt import build_default_prompt_generator
+
+ cfg = Config()
+ if prompt_generator is None:
+ prompt_generator = build_default_prompt_generator()
+ prompt_generator.goals = self.ai_goals
+ prompt_generator.name = self.ai_name
+ prompt_generator.role = self.ai_role
+ prompt_generator.command_registry = self.command_registry
+ for plugin in cfg.plugins:
+ if not plugin.can_handle_post_prompt():
+ continue
+ prompt_generator = plugin.post_prompt(prompt_generator)
# Construct full prompt
- full_prompt = (
- f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
- )
+ full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"
-
- full_prompt += f"\n\n{get_prompt()}"
+ self.prompt_generator = prompt_generator
+ full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}"
return full_prompt
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
index c284a4ac..801df2bb 100644
--- a/autogpt/config/config.py
+++ b/autogpt/config/config.py
@@ -1,8 +1,10 @@
"""Configuration class to store the state of bools for different scripts access."""
import os
+from typing import List
import openai
import yaml
+from auto_gpt_plugin_template import AutoGPTPluginTemplate
from colorama import Fore
from dotenv import load_dotenv
@@ -123,6 +125,18 @@ class Config(metaclass=Singleton):
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
+ self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
+ self.plugins: List[AutoGPTPluginTemplate] = []
+ self.plugins_openai = []
+
+ plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
+ if plugins_allowlist:
+ plugins_allowlist = plugins_allowlist.split(",")
+ self.plugins_whitelist = plugins_allowlist
+ else:
+ self.plugins_whitelist = []
+ self.plugins_blacklist = []
+
def get_azure_deployment_id_for_model(self, model: str) -> str:
"""
Returns the relevant deployment id for the model specified.
@@ -241,6 +255,10 @@ class Config(metaclass=Singleton):
"""Set the debug mode value."""
self.debug_mode = value
+ def set_plugins(self, value: list) -> None:
+ """Set the plugins value."""
+ self.plugins = value
+
def check_openai_api_key() -> None:
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 821820ff..8b85959c 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import time
-from ast import List
+from typing import List, Optional
import openai
from colorama import Fore, Style
@@ -9,6 +9,7 @@ from openai.error import APIError, RateLimitError
from autogpt.config import Config
from autogpt.logs import logger
+from autogpt.types.openai import Message
CFG = Config()
@@ -37,8 +38,8 @@ def call_ai_function(
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
- args = ", ".join(args)
- messages = [
+ args: str = ", ".join(args)
+ messages: List[Message] = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
@@ -53,15 +54,15 @@ def call_ai_function(
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(
- messages: list, # type: ignore
- model: str | None = None,
+ messages: List[Message], # type: ignore
+ model: Optional[str] = None,
temperature: float = CFG.temperature,
- max_tokens: int | None = None,
+ max_tokens: Optional[int] = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
- messages (list[dict[str, str]]): The messages to send to the chat completion
+ messages (List[Message]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
@@ -69,15 +70,28 @@ def create_chat_completion(
Returns:
str: The response from the chat completion
"""
- response = None
num_retries = 10
warned_user = False
if CFG.debug_mode:
print(
- Fore.GREEN
- + f"Creating chat completion with model {model}, temperature {temperature},"
- f" max_tokens {max_tokens}" + Fore.RESET
+ f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
)
+ for plugin in CFG.plugins:
+ if plugin.can_handle_chat_completion(
+ messages=messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ ):
+ message = plugin.handle_chat_completion(
+ messages=messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ )
+ if message is not None:
+ return message
+ response = None
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
@@ -100,8 +114,7 @@ def create_chat_completion(
except RateLimitError:
if CFG.debug_mode:
print(
- Fore.RED + "Error: ",
- f"Reached rate limit, passing..." + Fore.RESET,
+ f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
)
if not warned_user:
logger.double_check(
@@ -110,16 +123,14 @@ def create_chat_completion(
)
warned_user = True
except APIError as e:
- if e.http_status == 502:
- pass
- else:
+ if e.http_status != 502:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
- Fore.RED + "Error: ",
- f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
+ f"{Fore.RED}Error: ",
+ f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
)
time.sleep(backoff)
if response is None:
@@ -134,8 +145,12 @@ def create_chat_completion(
raise RuntimeError(f"Failed to get response after {num_retries} retries")
else:
quit(1)
-
- return response.choices[0].message["content"]
+ resp = response.choices[0].message["content"]
+ for plugin in CFG.plugins:
+ if not plugin.can_handle_on_response():
+ continue
+ resp = plugin.on_response(resp)
+ return resp
def create_embedding_with_ada(text) -> list:
@@ -158,15 +173,13 @@ def create_embedding_with_ada(text) -> list:
except RateLimitError:
pass
except APIError as e:
- if e.http_status == 502:
- pass
- else:
+ if e.http_status != 502:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
- Fore.RED + "Error: ",
- f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
+ f"{Fore.RED}Error: ",
+ f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
)
time.sleep(backoff)
diff --git a/autogpt/models/base_open_ai_plugin.py b/autogpt/models/base_open_ai_plugin.py
new file mode 100644
index 00000000..046295c0
--- /dev/null
+++ b/autogpt/models/base_open_ai_plugin.py
@@ -0,0 +1,199 @@
+"""Handles loading of plugins."""
+from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
+
+from auto_gpt_plugin_template import AutoGPTPluginTemplate
+
+PromptGenerator = TypeVar("PromptGenerator")
+
+
+class Message(TypedDict):
+ role: str
+ content: str
+
+
+class BaseOpenAIPlugin(AutoGPTPluginTemplate):
+ """
+ This is a BaseOpenAIPlugin class for generating Auto-GPT plugins.
+ """
+
+ def __init__(self, manifests_specs_clients: dict):
+ # super().__init__()
+ self._name = manifests_specs_clients["manifest"]["name_for_model"]
+ self._version = manifests_specs_clients["manifest"]["schema_version"]
+ self._description = manifests_specs_clients["manifest"]["description_for_model"]
+ self._client = manifests_specs_clients["client"]
+ self._manifest = manifests_specs_clients["manifest"]
+ self._openapi_spec = manifests_specs_clients["openapi_spec"]
+
+ def can_handle_on_response(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the on_response method.
+ Returns:
+ bool: True if the plugin can handle the on_response method."""
+ return False
+
+ def on_response(self, response: str, *args, **kwargs) -> str:
+ """This method is called when a response is received from the model."""
+ return response
+
+ def can_handle_post_prompt(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the post_prompt method.
+ Returns:
+ bool: True if the plugin can handle the post_prompt method."""
+ return False
+
+ def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
+ """This method is called just after the generate_prompt is called,
+ but actually before the prompt is generated.
+ Args:
+ prompt (PromptGenerator): The prompt generator.
+ Returns:
+ PromptGenerator: The prompt generator.
+ """
+ return prompt
+
+ def can_handle_on_planning(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the on_planning method.
+ Returns:
+ bool: True if the plugin can handle the on_planning method."""
+ return False
+
+ def on_planning(
+ self, prompt: PromptGenerator, messages: List[Message]
+ ) -> Optional[str]:
+ """This method is called before the planning chat completion is done.
+ Args:
+ prompt (PromptGenerator): The prompt generator.
+ messages (List[str]): The list of messages.
+ """
+ pass
+
+ def can_handle_post_planning(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the post_planning method.
+ Returns:
+ bool: True if the plugin can handle the post_planning method."""
+ return False
+
+ def post_planning(self, response: str) -> str:
+ """This method is called after the planning chat completion is done.
+ Args:
+ response (str): The response.
+ Returns:
+ str: The resulting response.
+ """
+ return response
+
+ def can_handle_pre_instruction(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the pre_instruction method.
+ Returns:
+ bool: True if the plugin can handle the pre_instruction method."""
+ return False
+
+ def pre_instruction(self, messages: List[Message]) -> List[Message]:
+ """This method is called before the instruction chat is done.
+ Args:
+ messages (List[Message]): The list of context messages.
+ Returns:
+ List[Message]: The resulting list of messages.
+ """
+ return messages
+
+ def can_handle_on_instruction(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the on_instruction method.
+ Returns:
+ bool: True if the plugin can handle the on_instruction method."""
+ return False
+
+ def on_instruction(self, messages: List[Message]) -> Optional[str]:
+ """This method is called when the instruction chat is done.
+ Args:
+ messages (List[Message]): The list of context messages.
+ Returns:
+ Optional[str]: The resulting message.
+ """
+ pass
+
+ def can_handle_post_instruction(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the post_instruction method.
+ Returns:
+ bool: True if the plugin can handle the post_instruction method."""
+ return False
+
+ def post_instruction(self, response: str) -> str:
+ """This method is called after the instruction chat is done.
+ Args:
+ response (str): The response.
+ Returns:
+ str: The resulting response.
+ """
+ return response
+
+ def can_handle_pre_command(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the pre_command method.
+ Returns:
+ bool: True if the plugin can handle the pre_command method."""
+ return False
+
+ def pre_command(
+ self, command_name: str, arguments: Dict[str, Any]
+ ) -> Tuple[str, Dict[str, Any]]:
+ """This method is called before the command is executed.
+ Args:
+ command_name (str): The command name.
+ arguments (Dict[str, Any]): The arguments.
+ Returns:
+ Tuple[str, Dict[str, Any]]: The command name and the arguments.
+ """
+ return command_name, arguments
+
+ def can_handle_post_command(self) -> bool:
+ """This method is called to check that the plugin can
+ handle the post_command method.
+ Returns:
+ bool: True if the plugin can handle the post_command method."""
+ return False
+
+ def post_command(self, command_name: str, response: str) -> str:
+ """This method is called after the command is executed.
+ Args:
+ command_name (str): The command name.
+ response (str): The response.
+ Returns:
+ str: The resulting response.
+ """
+ return response
+
+ def can_handle_chat_completion(
+ self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
+ ) -> bool:
+ """This method is called to check that the plugin can
+ handle the chat_completion method.
+ Args:
+ messages (List[Message]): The messages.
+ model (str): The model name.
+ temperature (float): The temperature.
+ max_tokens (int): The max tokens.
+ Returns:
+ bool: True if the plugin can handle the chat_completion method."""
+ return False
+
+ def handle_chat_completion(
+ self, messages: List[Message], model: str, temperature: float, max_tokens: int
+ ) -> str:
+ """This method is called when the chat completion is done.
+ Args:
+ messages (List[Message]): The messages.
+ model (str): The model name.
+ temperature (float): The temperature.
+ max_tokens (int): The max tokens.
+ Returns:
+ str: The resulting response.
+ """
+ pass
diff --git a/autogpt/plugins.py b/autogpt/plugins.py
new file mode 100644
index 00000000..b536acbd
--- /dev/null
+++ b/autogpt/plugins.py
@@ -0,0 +1,265 @@
+"""Handles loading of plugins."""
+
+import importlib
+import json
+import os
+import zipfile
+from pathlib import Path
+from typing import List, Optional, Tuple
+from urllib.parse import urlparse
+from zipimport import zipimporter
+
+import openapi_python_client
+import requests
+from auto_gpt_plugin_template import AutoGPTPluginTemplate
+from openapi_python_client.cli import Config as OpenAPIConfig
+
+from autogpt.config import Config
+from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
+
+
+def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]:
+ """
+ Inspect a zipfile for a module.
+
+ Args:
+ zip_path (str): Path to the zipfile.
+ debug (bool, optional): Enable debug logging. Defaults to False.
+
+ Returns:
+ Optional[str]: The name of the module if found, else None.
+ """
+ with zipfile.ZipFile(zip_path, "r") as zfile:
+ for name in zfile.namelist():
+ if name.endswith("__init__.py"):
+ if debug:
+ print(f"Found module '{name}' in the zipfile at: {name}")
+ return name
+ if debug:
+ print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
+ return None
+
+
+def write_dict_to_json_file(data: dict, file_path: str) -> None:
+ """
+ Write a dictionary to a JSON file.
+ Args:
+ data (dict): Dictionary to write.
+ file_path (str): Path to the file.
+ """
+ with open(file_path, "w") as file:
+ json.dump(data, file, indent=4)
+
+
+def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
+ """
+ Fetch the manifest for a list of OpenAI plugins.
+ Args:
+ urls (List): List of URLs to fetch.
+ Returns:
+ dict: per url dictionary of manifest and spec.
+ """
+ # TODO add directory scan
+ manifests = {}
+ for url in cfg.plugins_openai:
+ openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}"
+ create_directory_if_not_exists(openai_plugin_client_dir)
+ if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
+ try:
+ response = requests.get(f"{url}/.well-known/ai-plugin.json")
+ if response.status_code == 200:
+ manifest = response.json()
+ if manifest["schema_version"] != "v1":
+ print(
+ f"Unsupported manifest version: {manifest['schem_version']} for {url}"
+ )
+ continue
+ if manifest["api"]["type"] != "openapi":
+ print(
+ f"Unsupported API type: {manifest['api']['type']} for {url}"
+ )
+ continue
+ write_dict_to_json_file(
+ manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
+ )
+ else:
+ print(f"Failed to fetch manifest for {url}: {response.status_code}")
+ except requests.exceptions.RequestException as e:
+ print(f"Error while requesting manifest from {url}: {e}")
+ else:
+ print(f"Manifest for {url} already exists")
+ manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
+ if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
+ openapi_spec = openapi_python_client._get_document(
+ url=manifest["api"]["url"], path=None, timeout=5
+ )
+ write_dict_to_json_file(
+ openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
+ )
+ else:
+ print(f"OpenAPI spec for {url} already exists")
+ openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
+ manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
+ return manifests
+
+
+def create_directory_if_not_exists(directory_path: str) -> bool:
+ """
+ Create a directory if it does not exist.
+ Args:
+ directory_path (str): Path to the directory.
+ Returns:
+ bool: True if the directory was created, else False.
+ """
+ if not os.path.exists(directory_path):
+ try:
+ os.makedirs(directory_path)
+ print(f"Created directory: {directory_path}")
+ return True
+ except OSError as e:
+ print(f"Error creating directory {directory_path}: {e}")
+ return False
+ else:
+ print(f"Directory {directory_path} already exists")
+ return True
+
+
+def initialize_openai_plugins(
+ manifests_specs: dict, cfg: Config, debug: bool = False
+) -> dict:
+ """
+ Initialize OpenAI plugins.
+ Args:
+ manifests_specs (dict): per url dictionary of manifest and spec.
+ cfg (Config): Config instance including plugins config
+ debug (bool, optional): Enable debug logging. Defaults to False.
+ Returns:
+ dict: per url dictionary of manifest, spec and client.
+ """
+ openai_plugins_dir = f"{cfg.plugins_dir}/openai"
+ if create_directory_if_not_exists(openai_plugins_dir):
+ for url, manifest_spec in manifests_specs.items():
+ openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
+ _meta_option = (openapi_python_client.MetaType.SETUP,)
+ _config = OpenAPIConfig(
+ **{
+ "project_name_override": "client",
+ "package_name_override": "client",
+ }
+ )
+ prev_cwd = Path.cwd()
+ os.chdir(openai_plugin_client_dir)
+ Path("ai-plugin.json")
+ if not os.path.exists("client"):
+ client_results = openapi_python_client.create_new_client(
+ url=manifest_spec["manifest"]["api"]["url"],
+ path=None,
+ meta=_meta_option,
+ config=_config,
+ )
+ if client_results:
+ print(
+ f"Error creating OpenAPI client: {client_results[0].header} \n"
+ f" details: {client_results[0].detail}"
+ )
+ continue
+ spec = importlib.util.spec_from_file_location(
+ "client", "client/client/client.py"
+ )
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ client = module.Client(base_url=url)
+ os.chdir(prev_cwd)
+ manifest_spec["client"] = client
+ return manifests_specs
+
+
+def instantiate_openai_plugin_clients(
+ manifests_specs_clients: dict, cfg: Config, debug: bool = False
+) -> dict:
+ """
+ Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
+ Args:
+ manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
+ cfg (Config): Config instance including plugins config
+ debug (bool, optional): Enable debug logging. Defaults to False.
+ Returns:
+ plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
+
+ """
+ plugins = {}
+ for url, manifest_spec_client in manifests_specs_clients.items():
+ plugins[url] = BaseOpenAIPlugin(manifest_spec_client)
+ return plugins
+
+
+def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
+ """Scan the plugins directory for plugins and loads them.
+
+ Args:
+ cfg (Config): Config instance including plugins config
+ debug (bool, optional): Enable debug logging. Defaults to False.
+
+ Returns:
+ List[Tuple[str, Path]]: List of plugins.
+ """
+ loaded_plugins = []
+ # Generic plugins
+ plugins_path_path = Path(cfg.plugins_dir)
+ for plugin in plugins_path_path.glob("*.zip"):
+ if module := inspect_zip_for_module(str(plugin), debug):
+ plugin = Path(plugin)
+ module = Path(module)
+ if debug:
+ print(f"Plugin: {plugin} Module: {module}")
+ zipped_package = zipimporter(str(plugin))
+ zipped_module = zipped_package.load_module(str(module.parent))
+ for key in dir(zipped_module):
+ if key.startswith("__"):
+ continue
+ a_module = getattr(zipped_module, key)
+ a_keys = dir(a_module)
+ if (
+ "_abc_impl" in a_keys
+ and a_module.__name__ != "AutoGPTPluginTemplate"
+ and blacklist_whitelist_check(a_module.__name__, cfg)
+ ):
+ loaded_plugins.append(a_module())
+ # OpenAI plugins
+ if cfg.plugins_openai:
+ manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
+ if manifests_specs.keys():
+ manifests_specs_clients = initialize_openai_plugins(
+ manifests_specs, cfg, debug
+ )
+ for url, openai_plugin_meta in manifests_specs_clients.items():
+ if blacklist_whitelist_check(url, cfg):
+ plugin = BaseOpenAIPlugin(openai_plugin_meta)
+ loaded_plugins.append(plugin)
+
+ if loaded_plugins:
+ print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
+ for plugin in loaded_plugins:
+ print(f"{plugin._name}: {plugin._version} - {plugin._description}")
+ return loaded_plugins
+
+
+def blacklist_whitelist_check(plugin_name: str, cfg: Config) -> bool:
+ """Check if the plugin is in the whitelist or blacklist.
+
+ Args:
+ plugin_name (str): Name of the plugin.
+ cfg (Config): Config object.
+
+ Returns:
+ True or False
+ """
+ if plugin_name in cfg.plugins_blacklist:
+ return False
+ if plugin_name in cfg.plugins_whitelist:
+ return True
+ ack = input(
+ f"WARNNG Plugin {plugin_name} found. But not in the"
+ " whitelist... Load? (y/n): "
+ )
+ return ack.lower() == "y"
diff --git a/autogpt/prompts/__init__.py b/autogpt/prompts/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/autogpt/promptgenerator.py b/autogpt/prompts/generator.py
similarity index 78%
rename from autogpt/promptgenerator.py
rename to autogpt/prompts/generator.py
index 0ad7046a..c9a441d8 100644
--- a/autogpt/promptgenerator.py
+++ b/autogpt/prompts/generator.py
@@ -1,8 +1,6 @@
""" A module for generating custom prompt strings."""
-from __future__ import annotations
-
import json
-from typing import Any
+from typing import Any, Callable, Dict, List, Optional
class PromptGenerator:
@@ -20,6 +18,10 @@ class PromptGenerator:
self.commands = []
self.resources = []
self.performance_evaluation = []
+ self.goals = []
+ self.command_registry = None
+ self.name = "Bob"
+ self.role = "AI"
self.response_format = {
"thoughts": {
"text": "thought",
@@ -40,7 +42,13 @@ class PromptGenerator:
"""
self.constraints.append(constraint)
- def add_command(self, command_label: str, command_name: str, args=None) -> None:
+ def add_command(
+ self,
+ command_label: str,
+ command_name: str,
+ args=None,
+ function: Optional[Callable] = None,
+ ) -> None:
"""
Add a command to the commands list with a label, name, and optional arguments.
@@ -49,6 +57,8 @@ class PromptGenerator:
command_name (str): The name of the command.
args (dict, optional): A dictionary containing argument names and their
values. Defaults to None.
+ function (callable, optional): A callable function to be called when
+ the command is executed. Defaults to None.
"""
if args is None:
args = {}
@@ -59,11 +69,12 @@ class PromptGenerator:
"label": command_label,
"name": command_name,
"args": command_args,
+ "function": function,
}
self.commands.append(command)
- def _generate_command_string(self, command: dict[str, Any]) -> str:
+ def _generate_command_string(self, command: Dict[str, Any]) -> str:
"""
Generate a formatted string representation of a command.
@@ -96,7 +107,7 @@ class PromptGenerator:
"""
self.performance_evaluation.append(evaluation)
- def _generate_numbered_list(self, items: list[Any], item_type="list") -> str:
+ def _generate_numbered_list(self, items: List[Any], item_type="list") -> str:
"""
Generate a numbered list from given items based on the item_type.
@@ -109,10 +120,16 @@ class PromptGenerator:
str: The formatted numbered list.
"""
if item_type == "command":
- return "\n".join(
- f"{i+1}. {self._generate_command_string(item)}"
- for i, item in enumerate(items)
- )
+ command_strings = []
+ if self.command_registry:
+ command_strings += [
+ str(item)
+ for item in self.command_registry.commands.values()
+ if item.enabled
+ ]
+ # These are the commands that are added manually, do_nothing and terminate
+ command_strings += [self._generate_command_string(item) for item in items]
+ return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
@@ -134,5 +151,5 @@ class PromptGenerator:
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
"You should only respond in JSON format as described below \nResponse"
f" Format: \n{formatted_response_format} \nEnsure the response can be"
- " parsed by Python json.loads"
+ "parsed by Python json.loads"
)
diff --git a/autogpt/prompt.py b/autogpt/prompts/prompt.py
similarity index 50%
rename from autogpt/prompt.py
rename to autogpt/prompts/prompt.py
index 08754605..79de04ea 100644
--- a/autogpt/prompt.py
+++ b/autogpt/prompts/prompt.py
@@ -1,17 +1,16 @@
from colorama import Fore
-from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.logs import logger
-from autogpt.promptgenerator import PromptGenerator
+from autogpt.prompts.generator import PromptGenerator
from autogpt.setup import prompt_user
from autogpt.utils import clean_input
CFG = Config()
-def get_prompt() -> str:
+def build_default_prompt_generator() -> PromptGenerator:
"""
This function generates a prompt string that includes various constraints,
commands, resources, and performance evaluations.
@@ -20,9 +19,6 @@ def get_prompt() -> str:
str: The generated prompt string.
"""
- # Initialize the Config object
- cfg = Config()
-
# Initialize the PromptGenerator object
prompt_generator = PromptGenerator()
@@ -39,96 +35,12 @@ def get_prompt() -> str:
prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"'
)
- prompt_generator.add_constraint(
- "Use subprocesses for commands that will not terminate within a few minutes"
- )
# Define the command list
commands = [
- ("Google Search", "google", {"input": ""}),
- (
- "Browse Website",
- "browse_website",
- {"url": "", "question": ""},
- ),
- (
- "Start GPT Agent",
- "start_agent",
- {"name": "", "task": "", "prompt": ""},
- ),
- (
- "Message GPT Agent",
- "message_agent",
- {"key": "", "message": ""},
- ),
- ("List GPT Agents", "list_agents", {}),
- ("Delete GPT Agent", "delete_agent", {"key": ""}),
- (
- "Clone Repository",
- "clone_repository",
- {"repository_url": "", "clone_path": ""},
- ),
- ("Write to file", "write_to_file", {"file": "", "text": ""}),
- ("Read file", "read_file", {"file": ""}),
- ("Append to file", "append_to_file", {"file": "", "text": ""}),
- ("Delete file", "delete_file", {"file": ""}),
- ("Search Files", "search_files", {"directory": ""}),
- ("Analyze Code", "analyze_code", {"code": ""}),
- (
- "Get Improved Code",
- "improve_code",
- {"suggestions": "", "code": ""},
- ),
- (
- "Write Tests",
- "write_tests",
- {"code": "", "focus": ""},
- ),
- ("Execute Python File", "execute_python_file", {"file": ""}),
- ("Generate Image", "generate_image", {"prompt": ""}),
- ("Send Tweet", "send_tweet", {"text": ""}),
- ]
-
- # Only add the audio to text command if the model is specified
- if cfg.huggingface_audio_to_text_model:
- commands.append(
- ("Convert Audio to text", "read_audio_from_file", {"file": ""}),
- )
-
- # Only add shell command to the prompt if the AI is allowed to execute it
- if cfg.execute_local_commands:
- commands.append(
- (
- "Execute Shell Command, non-interactive commands only",
- "execute_shell",
- {"command_line": ""},
- ),
- )
- commands.append(
- (
- "Execute Shell Command Popen, non-interactive commands only",
- "execute_shell_popen",
- {"command_line": ""},
- ),
- )
-
- # Only add the download file command if the AI is allowed to execute it
- if cfg.allow_downloads:
- commands.append(
- (
- "Downloads a file from the internet, and stores it locally",
- "download_file",
- {"url": "", "file": ""},
- ),
- )
-
- # Add these command last.
- commands.append(
("Do Nothing", "do_nothing", {}),
- )
- commands.append(
("Task Complete (Shutdown)", "task_complete", {"reason": ""}),
- )
+ ]
# Add commands to the PromptGenerator object
for command_label, command_name, args in commands:
@@ -159,12 +71,11 @@ def get_prompt() -> str:
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
" the least number of steps."
)
-
- # Generate the prompt string
- return prompt_generator.generate_prompt_string()
+ prompt_generator.add_performance_evaluation("Write all code to a file.")
+ return prompt_generator
-def construct_prompt() -> str:
+def construct_main_ai_config() -> AIConfig:
"""Construct the prompt for the AI to respond to
Returns:
@@ -196,8 +107,4 @@ Continue (y/n): """
config = prompt_user()
config.save(CFG.ai_settings_file)
- # Get rid of this global:
- global ai_name
- ai_name = config.ai_name
-
- return config.construct_full_prompt()
+ return config
diff --git a/autogpt/token_counter.py b/autogpt/token_counter.py
index 338fe6be..2d50547b 100644
--- a/autogpt/token_counter.py
+++ b/autogpt/token_counter.py
@@ -1,13 +1,16 @@
"""Functions for counting the number of tokens in a message or string."""
from __future__ import annotations
+from typing import List
+
import tiktoken
from autogpt.logs import logger
+from autogpt.types.openai import Message
def count_message_tokens(
- messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301"
+ messages: List[Message], model: str = "gpt-3.5-turbo-0301"
) -> int:
"""
Returns the number of tokens used by a list of messages.
diff --git a/autogpt/types/openai.py b/autogpt/types/openai.py
new file mode 100644
index 00000000..2af85785
--- /dev/null
+++ b/autogpt/types/openai.py
@@ -0,0 +1,9 @@
+"""Type helpers for working with the OpenAI library"""
+from typing import TypedDict
+
+
+class Message(TypedDict):
+ """OpenAI Message object containing a role and the message content"""
+
+ role: str
+ content: str
diff --git a/autogpt/utils.py b/autogpt/utils.py
index e93d5ac7..dffd0662 100644
--- a/autogpt/utils.py
+++ b/autogpt/utils.py
@@ -3,7 +3,7 @@ import os
import requests
import yaml
from colorama import Fore
-from git import Repo
+from git.repo import Repo
def clean_input(prompt: str = ""):
diff --git a/plugin.png b/plugin.png
new file mode 100644
index 0000000000000000000000000000000000000000..865ce3c922d7783efde0ebaa273519eca202d654
GIT binary patch
literal 33356
zcmd42Ra9I-*DVTxU_pZh4M76|f;&xc3GVLh?vg+P!QGt%clXBK-QC?9X!LHr@0@?!
zhx>TXxc8w)Lv`(vU8~lbbJnUq^0MM+D1<05FfeG65+aH)FtB~VrvwQBC@C1TrU$-|
z?IkpvU|`U@Uq7(%^yoxDA)>RSj40wB{5x2V_Zd!f3jZk*Rd@d8;%si`2vb-#ECCdu
zIs-)_CQe3<7WU2-cD69s$mA42Im&Cfu${e!qlKBdGfWA_Fb`0Q{-07u6NA^PADu01
zOkmh=F_3{@kY9gMvNLsdGjKG4(XoGdt^V#mjaxVwSsMUV9bxL*Ya)SSq}O6K14mm6
zTQit;qTL{%4Dr9uyEqz|z(8ICj{bKwVLKZe6I*APo8bg7P=@eYP1(ZP+61QHbdm-J
z<|B-x$Twy8wBr>IZ{?Y%my0>$WaI_)Duee473GrtUylbPq~)`0aYeofzAb*z)Ou?H
zhSmn#)IQbKWg)=VH%ZGYYJU7!h5d$7aGug%=uIFyeX=CW?QNeiW3?&8fH9QMN5u4$
zV{et0^~`IO|2Bd;%O>e3rVtXjGU+L!Kk&!IyaX2tc_5}MCGjudU*7i-z>k3>$l^d*
zXGCBwP-YCX5efbj)lgw#wLY1Gb8Gkd-HuZ9>w-KyK
z9t(1>r>P>&;CGX4B9w6v_emF7GB^A}u@n_9gtu=u4t69J6tbH`CrfZ~@iq?*B<1Dh
zLXhA
zohkE=A2vm4vNwOcYT}-hRv^~+_+kzkQx=m}5w;0ye|5BznnM+AM(BuL1O(y|;~Sd^
z7=$!DKJbub(&fkcY?1;UD!cJ(iQXyLS~eXm5XYqqTHtQ%of;}@RUeiXO^g?eC9=`v
zIV#n9vJDD2V7MJ;+K{gFD-90JC2^I<#mIc_i+|6d2;)!wJ#v61P!;stEUSxQS&^Yj
z`2C+_#6&ty&-A`0;@I;Y3=;+#jxt-bSf*{F^b0SzSORf5j;
z{y^uKLhJdxHktLYq>3^5WOfL=1Y|G9|Ix0>qTT0XI?=!7G*
zTql_VxTee%Wo8kh=c%NZnRL01b$nD>bAOIVyH2O>HS(viB-=D+xopv^n{_4B1Nh(9
zrNl?fDf0qG?g=|BIwK4Jc4-3}7(i1s9b|IDwW4US!nuF^pxkA3w`MORL-42v0p$I$
zRiTKVo0B3E3)4Od3mE8@A|G}wK<4f}-GtNL`B>G_&GKRjZudKsbntY|WVuB+G9)5k_kJbgyF
zbhV$6&%QK~f&6*#FIE%vE>?$fmo*3PSsX49cYQ8;zZcoY5%tg)w{2Fdgc596#?Y*O
zwdolszkLj=szKnFn)_W#NkO
z(WXxURWP%Gn=7=h{|_=BX3{`!troXuk3XYhQedtVfAU4&&ZD8BoY0Vyor?^sjzFz{zP7B1{
zHqPbyug1dYVf6Yl=cWaniP^S2ZTV{H+VTkog)>ZNjj17pG+zTB_(CSlovhW
zGYKi8znI12hB%`;|(2y6WriOfTT;XK)blOwv%y~z8A{jc+lOQsPexyZ}+)X*6Id^{;Go~G0R
zIH8S0($zvb02@4?9mH=}!sbekWfjpT@^z^=}$z7tKe8K#iwp
zzPAg1Qfvu0uPEC5a8?s^GXBhJW|Y(Mjp
zs&`K*A->;0#7Q?Zqw@Jie6z4^=i#hFdG=)Z@$XZ5{&UTWLmuhv)!<_$mX<@MlfsRw
zfG_0Cdv80O&Gr$#S1xJ&x>kw9#?WbyI)95W^=?$`I@`pUPBhNT-2LV0)&uy!s)K!7
zyRPxQ-%P`a$WWAXYa-@S!KW8`2-ca|?g{KS^xsbp{&YYB6TYx1ExA#)W6Ou(Y@e)W
zY9(B1eCxTB1g}R|-0ZXI(=JQ@e%eVR$uRx(OnUwN41^-mD<=!0{R@M-1
zm6@!?%{=AmyuF_99<`$Esv<6-pT$7hXFlm
zUW-ZIlb+@ut9JiwRP1`mi|n1D0FM(5m3-rOcNO$^&i(@>(;h}9aW)d|;9VT(i-xQt
zZt>LCA9!B}Ih@Dy&we_W9BVo^|N6#s*YO=@)+ob;f--Z@vUhq5PmCP{8E72I|AJzt
zA?HSiIeW?TGl|P#b{@CoWRBy_ARoAr1gzUPVJ|zT>@Tv
zOM^q|l^`SGva6M5$n
zP}S-yKDn?C)o<@JE!15pIm1XC696JjEMallc7gR8w;aigCjY3_{rn*Z3qEf6JM2I20a
zcpRQ8`exp-dhXL;UtBD40Gm=nyub90l(y9~aCoad4BxT_wJd}D%CaZg)q*kkgPH#z
z&nhaFuQAT5+U>b)=iFbo!s3Ssw+q_)hz}?hUY0E$%T$}Ez9RA>o{O+BEN&UQz5UtO
zB%COdb#NfzHQ{#)FFSJ50+d8|ww!^!Y)U7B|H!5?Nt2!ZH0}!#sz1aD;zyG;8o`I!jq?L{dezPs^gUXjZ&GvKPQPV
zp{-AxBO)Q$Aex9n*HhtDVTgI9*2h-$(@gNMn-N>lBgnk|;A7~)*I{BMbPl@;Ck-{L
zxGpYSPRaV#U!etE6#~u-LCbehqwh!ypVV+x-)Wdchc}=QT+e@Ud2E*a$if))BX+mz
zP)laJX&zd#!sWk0lXVnq;0%ekcaC$2I70XpRdFg(k!-zw-1bsIJ{QUH)SGjbl?8Ku
zVO8%;y)&rVF;
z=S}HUM;wb5t6GocTrwpi3VEDhk5g#@pEARW9KrI+@5a1DLBGbu6ljOJ4QfZqjM(UC
zafyn>n`dbUKa5(p)NUWA{-TvaVg$b&-z0b4)Dwa}@xhB;a8kv&H7K6W;M
zk3?`pH~ie%G!Cx8WKb(=x8!!te2(**j-q?mv`=eUS$9zEQw%Pv&0`X=ikv7t_Za2c
zV|;EqT0w=!bGw!JkEK6j+!AmP?*g{tHQKP(FZ}ChLxje3$)Z<>SVl@OW-dlBhy)ex
z{BU8RVBWR$R3##vvGq~jZvNY^!T~=v1FRogjr;=Z!HXl;4HJRgvYYkk3B_x;8boR_
zOM4E~tGNLzaEViGarVwNKd!4mx#E!uUy`6Cp#ps0ZW6}S(ZcRVXs>bkDV3PC5p$3Z
zu#lJNTz~4pxAA=3^WKrEnqFa{yM;@%6I4DReNJxpL8NLVRi4&(@{cpALGl{yM^5$9
zg2(af055nomtdk1@!oy@ZUrsGKX`rgu5!h@ZMKSmzRj=tD%4;8(G0@IV*`?`?-h?8aV4qJ_V&jC+@i*q^o-N|I8TY71XeFs$bBa?+^*3>p8K0~
zB>YKM5j8YIM;MPHH;tJe5t?c&$F|zVVU}eoRJP{Cp>uaCUJQM5Z397#v+tO8o!O-<
z3`l;*Bjh$eU6JR!Xbjwy?|z-UH{J^SQKFdnIAN-YERHBj+1eID`f`g}c_FR-!O4G`
zqA?CZVy!&h@)~!rl&}{!et5uro_Gk9^H*J9!O__x{
zTCq=}BZ&&EUK0EsaIL-{G5YqZIG`UJ$>D;*G%?3|t+GmuOp?b1x;5Fs!*TWMHcyQ$
zta0;u2$M$pY!a)2rZ+1h&TXkLw~AOVvO)6?r__U%muEMb2KWBH-J|NACGBSo%f6`C
z2+&!bZ}lX*!~6=@1fWS+TXq)l@??FzJOC<9s9j=F^#hCGS`5rJ=!};5f)E+0L{|@6$ZwOyyp%
zB3ua?UC*}Sy#vpX&A5Nq-6e14_#|3sYf}3Gd^N&pewAvL%%jiZluHvZAfY?z#&uw(
z@or{ewZ2)78fy*AE@eRNbplFVs=MkMhV6DH?C7P${-;N4}m4+$!new4C@ypT^<3jr&$=RgI
zaP0DwEvnNZ;XvCp1MelNvGq|1WCZ$d?BC&CpFdXE$t|%FY}i~sm&;*YT_1z&qCCBp
zHTJy$j*Q?^1-0_>Amnf*h+d7c8DQXI_V!49ktC(ggbOsMQ&A6QQF0iJ@q12f*EwAc
zsU+0VWLJCw_-<2&$DTE6=*u@p8pWfY=$gJXYIeG%Q*$<{E4C?EXz^s`>OZG!-(!R${Hr;zepy28lTxioS;}JM
zjL-fHFDEDWJ@1l7yU4zGY6@FeQZoKQ?tgTSmSmkkGq14_t8mc~4HdXjjymDDRWa+=
zm)E3Qx`9_RdwX+AYFFhw9{@7X+K7x~<=p4r>Qs?sJ=&kfXVB74G!^8JCBR
zAEUuV^!tc1<%JG02VcotL*D8UO4q6S%kz!RXQx`a4BqKYHSUtdTihj3kApl3d~V*N
z-&~nae#vlPc_z5{*B_qsqdhr|HGNVcL8CgXxkIQF&+k%SD>uNTA
zbMw=iCpe3pyQu_k*VOPd=Te@4YaGyOisrS>;Q>73+o{g2Q!_A}O_Cc-jEj|d#X
zZ$b-Q*;o-G)wj-xe_hageEWf2-n6Ije&XhtS
zoOhcGqqhjeb3?yeK~`61vxJ3-87#Y}6xz&-f3G_QF%g!FHRagu7^bSDwLzZ8=Vf&rY;qCMq#8R%-t}mMqK5u4EJR4#Zo_Iw
za`8|b>|B_(I7cWHeb>1Z*JuW&Cs8-49zJ2U3ho
z9%!GLm8GAmUn389nX)41ui_ogizb~&u92?!*XxZU_BhA2>OmagG~S6&<)onVho6e!
zI6}d_c{=i#A+h>36@}ka9WhkGXI@Uo7SZh$L!F(_E>*djP~2Q!(Y
z2mfITO=C2d?`LJLO9e~_70={MwAU7^RYKd`Vi0HE4fNthktT$4SmA|*KtV@VcewIr
zaap6~9k%FKey(nc1(!mvx-q2du(__O18LU+r$>9J2HK`}mZZd-z2p04R&O7D?Fi*&
zBY(CMCzTvc411hP
z|E0L$qS;5uiVF6i44sFx7Fi7ESH(HfATIV?neM-ahTy76MP#vwoFp9Vf4z!jI*mVb
zl8~%ddRIi>=O{3LM(OF{{7lD2efJVMi>piiC`HsW=rC>nqEjUtV@AGmaDr
zsk6qbow>dJWnRce7(U~2GWsT_C{>M!mdYkorBr(F6TwcCFtF0KdFw16t!7&^!@s6L
z$Sv|^qi-(K2=S-ri=@@quZLggc8@sORU>CYA@N0LrWC+=hjQL0BesN8L|PPcHe86v
z1JN7xj5$891U??r%PZu4C;V!Sfg-};BbSZNx3(*v`VRSWsgQ7;vT>vp>DTe8_%DGr
zro}|(7vPQGO3P}>>>AqUk6AOA2qt<*d|8p$FUGi7Co`C#pJDnJ3LYQhue<-v27tnMubLw!cPc@3_BsA02oHe{k@-*BgnZ9OH
zXsM=TmOp1uK!02lscH13af`qUf3wBYpV6Ap{^qP;RWm4Rp^7Vnq{k-I}rs@;XEzPF&aIN!~jtLEdJxtEGlmEU4kYM-Gi45rf>=c
z+|1*hH6=^L5Ew`#)T-`^yxcq&9-scKK$VM)MvFecm5y>RUdz{dhWwi~*T6@Xcr_)X
zsh24pSR2o>MYJLoS6i9$pdUKx*Wd7XzZ=fy@seeyN!Wa0iE_zIJLige
z(v?gRhN(fo9K|LTkTw=uW63^p(FV$ap8X#m`PzvSHn5@(ohE)-;Na}c{(HgY>|+M?
zUgW{d)_*OXM3iIM4j80X?{8hfYa*zRp+PKD&h+5cWy*0dSFDwkY#auf>`z5yNB0OSbe75NeZQf6fQ(8|7s)R94UfWz!*DhG%xkB
zUHRuvVHjF^29vJekhGTrahDVI4`@s;Gmz%34@FYO1*tZcB_45{LjEk*4T%$L#-Ox!;9>CF`1caTUE++
zHkt?zs1CD)g$lxQBLeuG1BK-xmyXJq-)RC
zW;IDRC7yJ2yV8WtoKC4cW<;Zl5yLjkXFPrMrb;840s~gV?et|V(vF8SD#NdF1shH~
zmsBpEPf-Sz)i!{ceBg2BI%3rM1b8lQEojk|>&c>ykBuFZ-L>%Vg}{^@7c>Mo-BnGO
zw*S1Q7xU4y(N2BZjnx4Rm~&gv{JGE~tLb
zMYOSDz|;}MIU>Yx>NgrCO*!00ZV!@Vadd1FY`FHji>(1Z$E2O1gu*R7U#N+ql9Gqv
z?swn|Rzhw}@TioEjcu5po&p~8J>Ktn)uD*9`l2pi1UW!7IuDk}uN~X!W8ro?RV}TO
zuD3N#=PAqp^EvT^PEN82bq=4|=;?tV;x5%yzVeAmnyP~d45`+;)h%Tm@}0M9{|cs_
z@1`Q%t!nq3c|P&F=C~YjaF__h`2v>RqM7)wr}`*pieRf}D4R8+_$ple^v@iDTCN8Q
z8k#ULuG>qK)D7+h?$?R!4kzCs-Z{}ybR9e*BO`xL=1}p`RaVw6-@#=_?a*B&
zgt^BiyxEaleOn$EmE9&r=^;ZMiW|3bUng#j`ioGuI$59AG#laSEA>&LRJK74%p&i!s9h$v@1
z=NAisR6|s(Orq_Iwrl(6FLcUQ#y7PEz^O71nz}U7jt;mCVG*nO>c?yZ+0gb~<#*tn
z86G$Mv0iA2ek9@~&a3`{Kin~2?pL!2Pqxd>NS
zx?I^wt2jsfR+$}iHrpDs45~t3xaD~3guQ=M&OpWOp%dRKdy*TL&A#FU>F%c{n+fXE8mw#rCy={dNWPEA80Y6LPwB-t^-~X)?GYk9BEn*?zm_Fup+%M31y^1r
zi>O6ibon}5i)+>`pNMh)<+w!yIUMoP_SR`%HX3}EX7Sca4rDj5u!x8Fe)iH$u;FH#
z`CvQjagy83+dHq89R5X&X3GZ>LJFAbJhnbLq-hH%=q)HixW#MX!`D#@fq+RpRyR7w
zRje!9S?PeMj6`lp4+2F3aYZmi`PX4>cs^I<1Y}H21-y%dOG8b?2V|8@t4{S}mPe%L
zOv`eHNOt^9l7CH9cq__mz>@rx5LLU?wP(cy01Fe=>}9FL^V7sS>F|(CSD$yPK$y+h
zn6U#Th1ffjhTL#=RX)Epdo7&s*z~!{nC~m1$ux9Xl#F;e4}jWrO-h-1rbF`4&!0=Q
zge_f1U(;3!SlJ70W{ejFe=~X`JTC<3t~5^-KO`Evcoa;cbKMkSMJTEO2`w99wdUXW
zpCJ0Ct#%%D!4GYNKN;aXN>i?iH)wCM@qxkdZO3{|+KvxsqacwRCQN-Gc5mTeLf;<^
zQ(Ie?V*&}`i(2@5VE{dvUue%D5q-N>UMQhsmjXsBvvOSNgWNx6ykx1S1pj{VYL>NJ
z4SNa(n+$woh0{Y>D@JasbRE^hAUo=+al5S*SVR3>&
z1K>xtZ@Qo-_g?QME>WXr1*0P}o;tpSt4JXEM-#JAIO0EiClzlUx8<97Lk~Cz!5rkf+rmb;-naqn
z#BqdgafaM~t@=E~#-{sdF}3*2$;uLgWEb0N2uJWaxgs+7y;v-}`w79O>gohz;B&rQ
zvG7kHzhZ`q2}=&hv^{Q01JwsQ)$kn-HbwJf(lxhC-jRzaF&|{O^N71_94Aj%ue2Zs
zOz5-f=Fc#DF2i53<;zuXfCpSIoBq3-QO(EA*|HSIX$i;8VTM2neT0II?6QRf`ITQv
z4O>jmi#?8hE~TizJw8b;CHC^#fpv1B85K`i)5Zz`Dz#`?~mt+N@80B
z_I^C4(TfAKjBKsj)8nQ3$hEenB;dS!AuKX7^EfXCnOvj`2_4jWGlo(p=F!nJC;fDyuy+i8|rxh|y2yH$70
z9~Q2`_lbwB?d6B?*QU$6;pCE+F~51ga~MR{$%Q%}BJlG_IjmFC>2hmo?V4}bpvqb0
z%R21jP5#BKmfgnQ-n%5LVmt5^N1;XG;o}(J!}rz_fD9=Ab5h2Ksk2@Ei+%)57rd_y
ztV-{%W9v->jBKYw#>2bPRL<V5ekFR9v>kL*7SaXB*r>p|NfhktDpX
z%2pXcq`tu(a${X{bBXMFUtw_Qoan@KF;@TF0RY$eS|@y-bc}Rf*WJ-(v8jALknGbr
zTqqvn_LIt4wZ|3S&QpiASeOa2yRUnQ9Jndn`xdh79l}5d5
zxbT?@j10*Qe5u^nmpdw+^wCEVr{ZaG*2oI8TUS&n1nN~EGV*K#_&Qq~Ry$u^(qh#w
zh=h_{H^(!f@n@y*;lH`7Ge6Pr1~D7;jaQq=xun>EpAyoJ(=-|UmS)cYYyDCQXP7MD
z!=s^*PfJHv=A1#F$TY=G7gDy;RMgekDRgmjQ!*=gz-l&XKc)ZiK^c$C@4B*!3-FHS
zYGvC+_zz1k9vBs^FP*}3;E3V}x2+G&JgG!bNo>zrUHnp@zSrEY6fY!~N3E%@AT
zWX@`Tx)B@uETh~18yvUeHj+uNqUX(N&nmwccbtJ%>TDvbxA4
z+y!k&C*YonyjydVlePOt^R(*A!iYc-XW%s{K+`>#D+k#z>FFLp0Oz>a6qP=b)ge4?
z;euYu}LY6|qhm|vSI05H42KzN9UrY7MOnNP{hTRfp)9%sPZfq3xM
zdprI5cxGu=v(Lq9j_dncwVEB^u?^-TY=9YX8Y>)$>gwv&=DTQd;o;!D2SR~a!GepQ
zL%>!;lv1-35KSj0Nd)3#-9;#K`((46_R6eRHaBk^jA+IiL!o$13@4{t>%I1mbK+#I
zPZuB9+iPv78FOb$7kboo4US^d5RR-Zq8O^)mMl2
zAq;rhd+(&)%3JSM>U04xxHIFn@f3H_J2w%L@oF`)_T^j`!1bnaIh1Iy{hL({<=F$2
zpUni|geKK0>|t3ardPa741hpD((kJ-B~txu}0zs&KGr
z?nh8$e?Pj2m}^C&4{?`RK|PJndGrC}UTxHC+VF&Ri7qr^Y6A`dZ5JTwuy@bR0530p
z6e<4i<7ey7KHF;3sMWq)8avnj$wk%-JPgeFb^R(P{U1v@4#qRMPPzyyyOZfF>X%am
z5{H1D3btnIiIHKh>cN$*Ab-#anBR4qg7={oH2JCG?4p@J8hRzbV-ziQ0
zm4^=4cO)&$yyWK?@$cOrln^o49v0o#Y-*7$$oy6{5;F4Ja0E1Y`KZ2(72f?NCPM#V
ziXh-S1(7VdY&14Av9AbsXl8G+1UL`K(cpI1C&Q*$L2U0ud)eKO?t%fjhi}i&;hnFx
zLiD+Zm~F0H>>4!JFMSc_*?tA0swkJ2(|u6Y$des4mh}I=Vw3)J1st4h!1DWmk!#Cx
zI^WTvT&By>yk>XjEFhtxQnFOR0X#p}V;eI%ngkGv7af{rOaB~?M+1W!o^shYXVu9?
z>jT7&3dqCRpU6yWRz3k=mRCvZRRHnpcmu@QXkXf{4JHTCPgXH8X%S0}UYr*fvFW3k
z4WA}CN=D8@|L*e#6B
z7(A^Oetx)(aal-jmFs939liO1ne&YLf#ZGdgy8*;r@v(LkrU?n`(OJOx5f=}0WLzk
zoY~v)WI=#dH^shaUlIWb99W#R5%c>5vj7F;$%(gi42NgOUk(2KOj?cLof
zSDE*f6U<=*9ASVa1cQTzSLr@sXqaoH-eS(|^8ir;2hLGaVG$DxSv!X{rNyLDp;g&d
zXQqs>jtKaVJt8`{_Z*OQQRQDUm7-?@bod)oYYd*ImoYGR~S64Ps
zmowcde<`WIr~I`w8>&MXKZ1PGs|&;G`d#V0z>;;@Zeb3*=LO((`)l3+qrSYlA`*oL
zV%9ae9{u>)8D&60z+w%Lg0j-&=9W~Z<#y#P929B1e;$oH1(ENQwo{=W7&rG?hpych;ZM1k-@_Ro(Vx}Fzm
zk9S*<=vzdgfV+HzdxU1<>sBnd0xeF%#q~;+&s{w-Wzt;r8|;pxo3Wk_&}6_As4yra
zDfrF$T(JW_!L+@dYuubG9Ri1Vdj_DlrCPn@d?Gdi{}Lq{o4i>28f;Q5^rV1_ima&p
zB!c?5+-~#sF5#|mV^gCqR>K?gxBfPPqzIOn_*
zItJS7kEU=wnhlRHR=tcgd78&l>w~4`!$*EUI-DORL)ZqK!ksDrPY@l+yk=N_awGI
zb9!1**3PH{M={9)M!%=1C&vK@_D>9mi8K4`0A^t^AE
zPGE%92LxJnBqb%KWG>$~fURlV9e#pH*OI5e$(g0^F63vL-lTE!ecgc-iy)R{@c$8@es^g%ZL+<$SM7CcDxAjOp4v=n;X^03)amq}1^as6E!qGcz90xLM}0}uhv-;u=Mw3oBQ
z)`sZ=3mO3smU@-|$59^%*VnwMZZ*9zkD2zif`15F38>F}FGG8PYBT+Dk2EwS1lqT@
zW;in#|2P&3HXMvYbl;v3*GR3w^Vey1eT4+SF~~aTLv;=MqdHz8Ix)W^j(<$d`?lvx
z`qV05FxD0J!y~PMz@eRaf9bi@|LDNy=Uo__65Mn~76NY6faX}Us##C^9f{m_tOM==
z3DvnJ4(=mp4TAQD(52eaC^jQwBY|lsC&y4xQL!PzEdW3w%=Y7gsTa+zCzSmBB&Dk2
zLH!YEnstMJ^YR>ThCkG&*O#J8_tBe8-kfx)TT(LG%EKi}At|UukaCx-GLAlvIcjm6
zX?l-L-~xQ<=U#@<0_df1w#CrMi13(rTFb6g^nrIVHa=dou`q45^#OC5F}T(11`}^K
z!{z0f(@Zv#8vGH%4-A8F1Od3S{oQ$X5&o*bYMJ`LtK7wGf*p$As#(r%D=ov$F-zPF
zSVHnwi}#|U^_IU^H5s=0BGq`;hm&~`zjBr@O3TQY%tfRF`fB`nuzf1^$}Q;5zr5|S
zXmwT;X+!sY1S2r3ODRE^KlEw^U5-vpM^EooH$8Pp;WiN_TE#Kg2+@Eu$^}3b_j>??
z-+#dR=Hc<7>o;1DR`I`>pl%Y6YRz417h&dHOx)EXD_Gvgs~S9=R=t{i%l(S8{$Ei`%&~r>
zlMRe-P!zM5F1u^LVl@2uCU(HLzgaimHMqReeo3JCFL-4N#}s?zmQ{L;=mgT8Lbql*Bs{NcokhR)AuRa-HlJ26(bq|
zCp*j9sK4K*LaAQwjFNK8=dgGaZtpeH;^xJ
zH0wJ8xJcOd5w;V|?LpfX!sew{u5Jq2_nRW<;OFOp=1>5pvO7}UzE6-?zR^L^&;W5c
zw=LZ3plE@m$tzr}fKb+XC@CrOBK%mTW@Ev^aM0?Z*#l-})A#GNC
zuM2o3%!H`k5GdGMNO^0HTI8Do3|dHzGHMwCd=@S3|6R7x(K`at9dbUP0LP2!y9R;*
zV!5#4KgA~ed$p%nUyM9TVB@s5;zAq}TovhhwRS5~@ISJ##a+an=DMWLdOtz)w|8}`
zK1%~m8059he0g=LemgRUUMpSrABoHruDY#&cW#>V3@5_o}8=AMj8aDVj-dL28M>*+!U5OOE(H;EG<^@&`eBBaBy~=ste}R
zd$5Fp5hfa)%oz3Bj!z96wt*XM`T6+1-s*_fAkpMxgE~qiTTTk_=2sYpcan9YbCf
zOPO9XbqBDo!UG44b84m~R>O(n;uEpnZ@OdoIPKNhUtitk*VM!n&y_V6BLSSENM2hx
zuvb|4X!ge`qf~&S7T2M(rqr=7Nct6}D**`^<>lb!(21;Nyoihp-N`rripvNB{)Z=h
zcBRK$-uA5Y6`Z>QRyhhV;|WV0#KZ}6WS6WJ`WKC8}3=Eu*}=ZlK-l
z`U}v+8q%o09HS$kk$P%v%)gfeE@6sb@03DzC;z=8mICcNrtGUDS^0^>B@}s>tAh8E
zqN3VISDFE|tzBN8J9o5@RP_f4W{`WHoDBd|m;&$wxRKJ(Oq+wZUH$n`sv1RHZI?-1
z@bVr4;N~EQKlJYUJot-Bpqa5b_}sVT{I!kNHRLuNd37`4AH69gG{BirRK>3{pZFWwwL473!6QbZ8V{U^
zQb%OA18CZ|xJ5<{#@P}@@40Mb2VFD<b
z?9Sm1^7E2@vd{nf7ZcOGZG7gwVD>(EZf4>X
z!1e%Q6f!rb1+bQbCias~Ps^-{c5K{V-}ST^ynK+r%0_TA^-_OR*lexw5Y`J2k>&wY
zC|&a#1Iz<@eKkfL5e1NqnSun9*6@`29sdFe_u31r{C&RGPkDkp=fax?-ukmx!P=Wp17u`2W@jOekhIGCj~�UM@bT2mNp3+w)Ob`BU7`lb!NCXf*4n_PJ2d{#K>_^P
zhXGH&hI{K?z~*EW-wzzk&Fxn!e>j?3OsYDlOHk2q2ncd`T2ukx;kteA4+M{qgc8YL
zTWO6>v|nhIBhBr!!5u}ns)gdAzyjqJ6v(HqvhV>GF`7B1Y88C7J4(T5i)l8NM$Tv(
zb^iky4f$LC%*Eps(^t=70*Ltqu+ZqlTFHZGa3B$2mZI)P`(YW&fN}
zzt5jrn*Q{^s9~3Qggh9P*Y&u)jD**wE`6t8z~xLAFiv25{BHQ>08qOU)g6Q+PPdFE
z@Ci3P+`R1>KtLzC=?1Co0rLcMIR56F&3`F3JJSXJh)Sp#0v&2)tdl*xWL
zK7e@gh>JP^N8Hrv=_G={K?cBDq%{|x2RU?VdwP5APZnz}>rdxjlT!FSuZ=HqdMt-}
z^3aGRWOdanWY|i4ydYO@>+P_;kz{&DE=fQ&2hY|KvqExpx3=hvd`P+EFC!h4NTj{{
z+N16Ri1%f=-^G;NUPD}}mR-5qg(4%pS_%C6VEo~VeFLy`HX+sC+$yf!BxIlu0J}`x
zf7WjPg^G%*$6(b$DPOf1iu#d_vU_)EtZjMr
zs0LuWn`f=BDHW+se867m7(1I?;bPPsGvW
zI%_3BLr5Kuh|K&hFOLa&y2A$ODPU$LE>o5&CKY8S+`M25Zh!4&;3iW3_f#psVVaI*
z^x*4xE0`oLWNOuvHA11#;?mErE@r*?Czv`y75
zr;#l;djh}Mu<`*LVezre_#noUA)wgF?$_I2@pb4WIgnL$)0HLK(aJKxWyK3U*HJ-$B!9>8hUo_y(BLv3t-GjRk7vT}le2dxk(Qx$*g8|2#wG!Z>!>v9vmQ;>Ovwd0$Z7NaPx8)@on7VmjHu%B8Xq8EKZ*Rd4BOTo?WG*2lCFSsPh!Pl%4zFv`!>li_^6rw|Y~hjCf0Yr9
zHtXajvOy{7fnh!=K|vV%;>0|)p2!jEGVScsd;m2#A1uE0XVhuN1aW7m6sxM;-xsfH
z?NqcfAld+=k%obNX1ARVSSnyCDdrCA9|3;5wfb;MVSBzVwT3ae+6VaiV&JCUtLz16
z7LZmHF8(>qOh8g>di>J3hz?LTfQZI4l_faJrSi<|Tpk{J7yf$kB9`}R=ByDuK%{Sl
z6_lQBwgXYGl8nUGkH{}sW
z+40H08Vh?3%*7*-BxCNIpMmIDz=q1}eB2GdOpOP~UcHtGA~`vE8o$>~T>=&F1J!(vosgaWh5P+6$LYi4eP;lUA&V*43?(we-b(WW
zSzhv1asG>%<8ePL1!`^P(EVN#AXADrLPA1~R>NG3dZQ*&zqR%r9eKhK5}y|
z|MT-Rr4OX%1n>W9>@B0}h?;gm5+GQD1-D?q-JRe8f^%>Q5Zv7%fk1E)NN^|E!QI^w
z-0k4*65Q|Redn7sv+i1V<_8Nnr+fGA+O?~zo_eZ!R7|aazh_DG7dok6xmC9y4|h-A
zrm+0GkHl^BNUI>nRIuhE@Fp#ZH{E&5@6*Cu=@*)(LXeQSUF91;uk!+D<>GmX~Y1O{wZv;Yg5QPj{554Z8ZkwyFXI9g!)
zHfSKxztLZBbI%eK8hc3f@L*{(S4IY;kS&8Qc=mpB$0xz#()a7v;PR8!zdZ3N^~@xZ
zJ=i`jB2W2`(LdmEqpCIIVn+>71!TC_M^Ut83dGg$QPqFYN$Z^lJj59TMn(7*@S>!G
zq^c}oaT#t0bzTQajbR{i%))MI@k{uC>gjn6f}2P{`VklN6Nx>nk4Adq7ct*opHfms
zYYT{DD6d{UrEW?^9^)k(0*E`yL5S2u%xeK+DqC1}c9$|`^?MLLA_xfy9iN?b4G)t<
z#IsH>`R477gJAyT1gY@R-+PPHQ0mvk$^dj(*pYOOBCnw#XLMvZ%8!xTTI+amg}diNJ!QRk0K!{$mw*`3~nQxg-)
zJ~u8hni7}@VwhYkDG&7l0fONC^XJc>DMOSYlO&Ww|7;&7vUNKCPEJqn
zi4UNFBRjf7VLRheRa`Yta2mPZK!`5nX{7!CIbV_l5e(CgjwJ(riIquosr;Y5&hC63
z9Ze`m_F1^m-4b?#5t8MeE@E@-q$UH|3uyazynaJJK2ysYHE>oHRWiiiK6@n5%IbBj
z42O!Qc(Rc@%$5k<79|fud0qN!`Ge`(bpM7QV`JEPt&D(G*`rvOL<*}B_@K>cI_IJq
z8!Id1cBYq_Mvk1Cf}Z|MVtc;oJSe9*DEIL*-BU<=d(;y7S6%AUCqwa$4mbig)0cbG
z_px9&ud^_6YU-b3V=-?0CFIEQc$85ch*H3(;-A${s*aB_PMd86W5~&cakDpRF3d%c6TLmA=m
z_|@FpHVX|w17}>s(}{~Yxiaa}>6%K>23UHYrcB-4c{`#X$>Ej`=EYXavw3=7@WnWM
z2XZrF^Wd<(VMajwX0fA`o6Bbd&t=`8wDo5N0Xv#BN2V0{#mkV@R^3`I9VP1pxVyW%
ztGn??m%T*^`Q+qS=P7IVg>$v5^L5!05-e+H^eJC3zq4K3_NiiBxC9|jRJ+4No@p_|
z*4xAMPVgDuulOy87L7uOzN($GeZoxAVOH|bX~I<7vz!6jVu9CA)vMs1i~Si
zno;C$ulWEmQ~o>IhI%Fp${eHn@Edj7+<&UE0sc&5Z+h2nKl)00>VgBLYU+udc36`bVkm&erT+
zNJ~9M)S=m>rEi|}atM;$!_MaQUE)EJF)Ao1=tF`>WMmK~$iW_e@BicAgFLJ25rmk>
zMj%AWG!PK*eAPb+egI=3CdRnI^+?RpGE^ZidM=5`=l-6)lrGQO1JDDzW^2?u%}#}=
znIb|Auu|U3QF95s3~YbRn$6qGL`ir#j=^L1ZttX@v5%gPZuyfHD%_&9y{aO$CB}Sg
z0sYyYeT|#^zs0J;+rSuTEZ6M3ZDO!6H3A!tlvH*n}
zFVQ>_@qMqLX!lP57~n(xI?=QzMP~5AoSdZbW*+fVG=b-7p%*hoZ!f`ZiKX>@Ef#(;8yT2j`
z1s8h+9BT$|+FYD3mmNANoxR}H=>Ap~EBnnn$W{ShSH`@z&!0UYO>3UM(S>WV$2$$2{
z9g*t5O+k?fHzyxSj!fF?zI52uxh$)GMm|-nFJUR$xnyf83t;h<{-WEhfBio?eXOU+
zP)PZP%A2xMmY0>iv?QN4{mhSSlEsEvpL$tm8IYL6p4|$gdGqih@R>UXMj!J51gn|S
z!QK49Mo6Y_8^(|~2R_Tool+uXm@B%hAHja$W
z91?zsts68-vLesjh}Sh-Vi=-cl;`T3qTw9(eEt5Lc2weyW!HV(bdQ27^-9J?
z?$V4_O-2U!Vbhko4%uzZ+d|zD9XmTkbdbYV+*8(88LB(cT-)2ra}A*f>$Wl?Mx&G_
zezu&siPS1(ks6upT=R;E?8>u#bBJ?RJMm>BhxF^zE)yBsC!rV$vJZVwqPm}s)tQ@5
zAwfk&O=@cS^$S-LgQ#fopfOdjs-~iH0m)j&y4eKCQDEKX)(eeyNuYCOaet-;Vto|&
z0!@^gKM_2hM+J)c8n?XNj#hi$Tyl8+!=d@mB)#{2En-c!*G53z^waNIm$-_fqDefE
zeCbXoU#mCYe3K;P4pu4RjO%HY4qJgrDarMr(0sUi<-Tx>boxTd@q_c~*YT#XQBg@L
z$TLI;ggKcx8BQG+q9Rs(s7Z*)9kO3kbRiOb8aBCba~{V^I!DRsQj{uHM&QxCmuG%QwYBp~t*|e4W8%rq}F!z62d{55L
zsTjQ2;AEny0ju%7tccy3-Q>8X;5u?gP(b^#>{&)T34hASCgD>q*|;(X=#=vug!jDR4v?L+V~&4b|O+`n0&;P|5dI8uDAE?;#g`xZFOv9PTU
zsT0%Fx7fCq$P#}&`D=CgJEN8OV1xP8It#*D7C(RtjIh^*6-RU(1bAIv!-=eWCGRoI
z)vm7TZdS-ef*wiC1Y~MhWJaCrJ6pu{#lb7~2Jk2nQWXh|U2kAfm|Nr*d
z{y&_$PG)j)OgIQwj=^zN?UuoGFZF&Ljq~&K2Fv0nA9H23>)>pMQb!;?`@cS9mB4Y`
zsD;h27w{;N#q%;JSG81BM$^upJVzEMK*1DjKpm6@M4u;Dm6YS@Gy(Ns0-?I`rEj6q
ztZ)~p1*`X~9IHd>&*XYFlrV>+XI$sKFWH_y!|A>YS9lCFCB~@ECwOv6;c!`{j8xuZ
zRzCN!y*P8Z+%z+^PwQtjc2z;4qx>#zPMbRDZ>WR{Cy{V7^J*aFXYZW7Q?h;lknc)N
zkI*OUg!Z$SaX3BR6}($WZY`j$ucQzRM)Mt-W`sMcX3rw>dRm0iNiC$B9xCis)u_?@#->rUq>(0HC}guxw~
zp9!l_Mg=%nX0D