From 239aa3aa0239b2ce1b13d08a170454b42db0c44d Mon Sep 17 00:00:00 2001 From: Taylor Beeston Date: Mon, 17 Apr 2023 12:38:46 -0700 Subject: [PATCH 1/7] :art: Bring in plugin_template This would ideally be a shared package --- autogpt/plugin_template.py | 255 +++++++++++++++++++++++++++++++++++++ requirements.txt | 1 + 2 files changed, 256 insertions(+) create mode 100644 autogpt/plugin_template.py diff --git a/autogpt/plugin_template.py b/autogpt/plugin_template.py new file mode 100644 index 00000000..90e9fa32 --- /dev/null +++ b/autogpt/plugin_template.py @@ -0,0 +1,255 @@ +"""This is a template for Auto-GPT plugins.""" + +# TODO: Move to shared package + +import abc +from typing import Any, Dict, List, Optional, Tuple, TypedDict +from abstract_singleton import AbstractSingleton, Singleton + +from prompts.generator import PromptGenerator + + +class Message(TypedDict): + role: str + content: str + + +class AutoGPTPluginTemplate(AbstractSingleton, metaclass=Singleton): + """ + This is a template for Auto-GPT plugins. + """ + + def __init__(self): + super().__init__() + self._name = "Auto-GPT-Plugin-Template" + self._version = "0.1.0" + self._description = "This is a template for Auto-GPT plugins." + + @abc.abstractmethod + def can_handle_on_response(self) -> bool: + """This method is called to check that the plugin can + handle the on_response method. + + Returns: + bool: True if the plugin can handle the on_response method.""" + return False + + @abc.abstractmethod + def on_response(self, response: str, *args, **kwargs) -> str: + """This method is called when a response is received from the model.""" + pass + + @abc.abstractmethod + def can_handle_post_prompt(self) -> bool: + """This method is called to check that the plugin can + handle the post_prompt method. + + Returns: + bool: True if the plugin can handle the post_prompt method.""" + return False + + @abc.abstractmethod + def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: + """This method is called just after the generate_prompt is called, + but actually before the prompt is generated. + + Args: + prompt (PromptGenerator): The prompt generator. + + Returns: + PromptGenerator: The prompt generator. + """ + pass + + @abc.abstractmethod + def can_handle_on_planning(self) -> bool: + """This method is called to check that the plugin can + handle the on_planning method. + + Returns: + bool: True if the plugin can handle the on_planning method.""" + return False + + @abc.abstractmethod + def on_planning( + self, prompt: PromptGenerator, messages: List[Message] + ) -> Optional[str]: + """This method is called before the planning chat completeion is done. + + Args: + prompt (PromptGenerator): The prompt generator. + messages (List[str]): The list of messages. + """ + pass + + @abc.abstractmethod + def can_handle_post_planning(self) -> bool: + """This method is called to check that the plugin can + handle the post_planning method. + + Returns: + bool: True if the plugin can handle the post_planning method.""" + return False + + @abc.abstractmethod + def post_planning(self, response: str) -> str: + """This method is called after the planning chat completeion is done. + + Args: + response (str): The response. + + Returns: + str: The resulting response. + """ + pass + + @abc.abstractmethod + def can_handle_pre_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the pre_instruction method. + + Returns: + bool: True if the plugin can handle the pre_instruction method.""" + return False + + @abc.abstractmethod + def pre_instruction(self, messages: List[Message]) -> List[Message]: + """This method is called before the instruction chat is done. + + Args: + messages (List[Message]): The list of context messages. + + Returns: + List[Message]: The resulting list of messages. + """ + pass + + @abc.abstractmethod + def can_handle_on_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the on_instruction method. + + Returns: + bool: True if the plugin can handle the on_instruction method.""" + return False + + @abc.abstractmethod + def on_instruction(self, messages: List[Message]) -> Optional[str]: + """This method is called when the instruction chat is done. + + Args: + messages (List[Message]): The list of context messages. + + Returns: + Optional[str]: The resulting message. + """ + pass + + @abc.abstractmethod + def can_handle_post_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the post_instruction method. + + Returns: + bool: True if the plugin can handle the post_instruction method.""" + return False + + @abc.abstractmethod + def post_instruction(self, response: str) -> str: + """This method is called after the instruction chat is done. + + Args: + response (str): The response. + + Returns: + str: The resulting response. + """ + pass + + @abc.abstractmethod + def can_handle_pre_command(self) -> bool: + """This method is called to check that the plugin can + handle the pre_command method. + + Returns: + bool: True if the plugin can handle the pre_command method.""" + return False + + @abc.abstractmethod + def pre_command( + self, command_name: str, arguments: Dict[str, Any] + ) -> Tuple[str, Dict[str, Any]]: + """This method is called before the command is executed. + + Args: + command_name (str): The command name. + arguments (Dict[str, Any]): The arguments. + + Returns: + Tuple[str, Dict[str, Any]]: The command name and the arguments. + """ + pass + + @abc.abstractmethod + def can_handle_post_command(self) -> bool: + """This method is called to check that the plugin can + handle the post_command method. + + Returns: + bool: True if the plugin can handle the post_command method.""" + return False + + @abc.abstractmethod + def post_command(self, command_name: str, response: str) -> str: + """This method is called after the command is executed. + + Args: + command_name (str): The command name. + response (str): The response. + + Returns: + str: The resulting response. + """ + pass + + @abc.abstractmethod + def can_handle_chat_completion( + self, + messages: List[Message], + model: Optional[str], + temperature: float, + max_tokens: Optional[int], + ) -> bool: + """This method is called to check that the plugin can + handle the chat_completion method. + + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + + Returns: + bool: True if the plugin can handle the chat_completion method.""" + return False + + @abc.abstractmethod + def handle_chat_completion( + self, + messages: List[Message], + model: Optional[str], + temperature: float, + max_tokens: Optional[int], + ) -> str: + """This method is called when the chat completion is done. + + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + + Returns: + str: The resulting response. + """ + pass diff --git a/requirements.txt b/requirements.txt index 843b66bf..5e8f1000 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,6 +29,7 @@ black sourcery isort gitpython==3.1.31 +abstract-singleton # Testing dependencies pytest From dea5000a014ea69d352400638e2f26dd77eacb20 Mon Sep 17 00:00:00 2001 From: Taylor Beeston Date: Mon, 17 Apr 2023 12:40:46 -0700 Subject: [PATCH 2/7] :bug: Fix pre_instruction --- autogpt/agent/agent_manager.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index d2648150..286b8ebd 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,9 +1,12 @@ """Agent manager for managing GPT agents""" from __future__ import annotations +from typing import List from autogpt.config.config import Config, Singleton from autogpt.llm_utils import create_chat_completion +from plugin_template import Message + class AgentManager(metaclass=Singleton): """Agent manager for managing GPT agents""" @@ -27,7 +30,7 @@ class AgentManager(metaclass=Singleton): Returns: The key of the new agent """ - messages = [ + messages: List[Message] = [ {"role": "user", "content": prompt}, ] for plugin in self.cfg.plugins: @@ -36,7 +39,7 @@ class AgentManager(metaclass=Singleton): plugin_messages = plugin.pre_instruction(messages) if plugin_messages: for plugin_message in plugin_messages: - messages.append({"role": "system", "content": plugin_message}) + messages.append(plugin_message) # Start GPT instance agent_reply = create_chat_completion( @@ -92,7 +95,7 @@ class AgentManager(metaclass=Singleton): plugin_messages = plugin.pre_instruction(messages) if plugin_messages: for plugin_message in plugin_messages: - messages.append({"role": "system", "content": plugin_message}) + messages.append(plugin_message) # Start GPT instance agent_reply = create_chat_completion( From d23ada30d72fe44467770d82389243cc3f7cb254 Mon Sep 17 00:00:00 2001 From: Taylor Beeston Date: Mon, 17 Apr 2023 12:41:17 -0700 Subject: [PATCH 3/7] :bug: Fix on_planning --- autogpt/chat.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/autogpt/chat.py b/autogpt/chat.py index 22fe636c..e7354fc1 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -7,10 +7,12 @@ from autogpt.config import Config from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger +from plugin_template import Message + cfg = Config() -def create_chat_message(role, content): +def create_chat_message(role, content) -> Message: """ Create a chat message with the given role and content. @@ -145,7 +147,7 @@ def chat_with_ai( if not plugin_response or plugin_response == "": continue tokens_to_add = token_counter.count_message_tokens( - [plugin_response], model + [create_chat_message("system", plugin_response)], model ) if current_tokens_used + tokens_to_add > send_token_limit: if cfg.debug_mode: From f7840490793223b859311778e7993451a827803a Mon Sep 17 00:00:00 2001 From: Taylor Beeston Date: Mon, 17 Apr 2023 12:41:34 -0700 Subject: [PATCH 4/7] :label: Type plugins field in config --- autogpt/config/config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index c12eed2e..2aaf879a 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -1,7 +1,9 @@ """Configuration class to store the state of bools for different scripts access.""" import os +from typing import List import openai +from plugin_template import AutoGPTPluginTemplate import yaml from colorama import Fore from dotenv import load_dotenv @@ -107,7 +109,7 @@ class Config(metaclass=Singleton): # Initialize the OpenAI API client openai.api_key = self.openai_api_key - self.plugins = [] + self.plugins: List[AutoGPTPluginTemplate] = [] self.plugins_whitelist = [] self.plugins_blacklist = [] From ea67b6772c461b1e1083b236d4a5668f0a0c3d50 Mon Sep 17 00:00:00 2001 From: Taylor Beeston Date: Mon, 17 Apr 2023 12:42:17 -0700 Subject: [PATCH 5/7] :bug: Minor type fixes --- autogpt/llm_utils.py | 14 ++++++++------ autogpt/plugins.py | 6 +++++- autogpt/token_counter.py | 5 ++++- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 4fb0e1f5..bc68ba93 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -1,12 +1,14 @@ from __future__ import annotations import time +from typing import List, Optional import openai from colorama import Fore from openai.error import APIError, RateLimitError from autogpt.config import Config +from plugin_template import Message CFG = Config() @@ -35,8 +37,8 @@ def call_ai_function( # For each arg, if any are None, convert to "None": args = [str(arg) if arg is not None else "None" for arg in args] # parse args to comma separated string - args = ", ".join(args) - messages = [ + args: str = ", ".join(args) + messages: List[Message] = [ { "role": "system", "content": f"You are now the following python function: ```# {description}" @@ -51,15 +53,15 @@ def call_ai_function( # Overly simple abstraction until we create something better # simple retry mechanism when getting a rate error or a bad gateway def create_chat_completion( - messages: list, # type: ignore - model: str | None = None, + messages: List[Message], # type: ignore + model: Optional[str] = None, temperature: float = CFG.temperature, - max_tokens: int | None = None, + max_tokens: Optional[int] = None, ) -> str: """Create a chat completion using the OpenAI API Args: - messages (list[dict[str, str]]): The messages to send to the chat completion + messages (List[Message]): The messages to send to the chat completion model (str, optional): The model to use. Defaults to None. temperature (float, optional): The temperature to use. Defaults to 0.9. max_tokens (int, optional): The max tokens to use. Defaults to None. diff --git a/autogpt/plugins.py b/autogpt/plugins.py index a00b989e..b4b2ac78 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -6,6 +6,8 @@ from pathlib import Path from typing import List, Optional, Tuple from zipimport import zipimporter +from plugin_template import AutoGPTPluginTemplate + def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]: """ @@ -45,7 +47,9 @@ def scan_plugins(plugins_path: Path, debug: bool = False) -> List[Tuple[str, Pat return plugins -def load_plugins(plugins_path: Path, debug: bool = False) -> List[Module]: +def load_plugins( + plugins_path: Path, debug: bool = False +) -> List[AutoGPTPluginTemplate]: """Load plugins from the plugins directory. Args: diff --git a/autogpt/token_counter.py b/autogpt/token_counter.py index 338fe6be..8cf4c369 100644 --- a/autogpt/token_counter.py +++ b/autogpt/token_counter.py @@ -1,13 +1,16 @@ """Functions for counting the number of tokens in a message or string.""" from __future__ import annotations +from typing import List import tiktoken from autogpt.logs import logger +from plugin_template import Message + def count_message_tokens( - messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301" + messages: List[Message], model: str = "gpt-3.5-turbo-0301" ) -> int: """ Returns the number of tokens used by a list of messages. From 9705f60dd3ab3aa8abae64abf7ec68d77dabf4d4 Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Mon, 17 Apr 2023 19:44:54 +0000 Subject: [PATCH 6/7] 'Refactored by Sourcery' --- autogpt/agent/agent_manager.py | 20 +++++++------------- autogpt/llm_utils.py | 30 ++++++++++-------------------- 2 files changed, 17 insertions(+), 33 deletions(-) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 286b8ebd..dc57811e 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -36,11 +36,8 @@ class AgentManager(metaclass=Singleton): for plugin in self.cfg.plugins: if not plugin.can_handle_pre_instruction(): continue - plugin_messages = plugin.pre_instruction(messages) - if plugin_messages: - for plugin_message in plugin_messages: - messages.append(plugin_message) - + if plugin_messages := plugin.pre_instruction(messages): + messages.extend(iter(plugin_messages)) # Start GPT instance agent_reply = create_chat_completion( model=model, @@ -53,9 +50,8 @@ class AgentManager(metaclass=Singleton): for i, plugin in enumerate(self.cfg.plugins): if not plugin.can_handle_on_instruction(): continue - plugin_result = plugin.on_instruction(messages) - if plugin_result: - sep = "" if not i else "\n" + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" plugins_reply = f"{plugins_reply}{sep}{plugin_result}" if plugins_reply and plugins_reply != "": @@ -92,8 +88,7 @@ class AgentManager(metaclass=Singleton): for plugin in self.cfg.plugins: if not plugin.can_handle_pre_instruction(): continue - plugin_messages = plugin.pre_instruction(messages) - if plugin_messages: + if plugin_messages := plugin.pre_instruction(messages): for plugin_message in plugin_messages: messages.append(plugin_message) @@ -109,9 +104,8 @@ class AgentManager(metaclass=Singleton): for i, plugin in enumerate(self.cfg.plugins): if not plugin.can_handle_on_instruction(): continue - plugin_result = plugin.on_instruction(messages) - if plugin_result: - sep = "" if not i else "\n" + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" plugins_reply = f"{plugins_reply}{sep}{plugin_result}" # Update full message history if plugins_reply and plugins_reply != "": diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index bc68ba93..85e0fbf7 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -69,13 +69,10 @@ def create_chat_completion( Returns: str: The response from the chat completion """ - response = None num_retries = 10 if CFG.debug_mode: print( - Fore.GREEN - + f"Creating chat completion with model {model}, temperature {temperature}," - f" max_tokens {max_tokens}" + Fore.RESET + f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" ) for plugin in CFG.plugins: if plugin.can_handle_chat_completion( @@ -84,13 +81,13 @@ def create_chat_completion( temperature=temperature, max_tokens=max_tokens, ): - response = plugin.handle_chat_completion( + return plugin.handle_chat_completion( messages=messages, model=model, temperature=temperature, max_tokens=max_tokens, ) - return response + response = None for attempt in range(num_retries): backoff = 2 ** (attempt + 2) try: @@ -112,21 +109,16 @@ def create_chat_completion( break except RateLimitError: if CFG.debug_mode: - print( - Fore.RED + "Error: ", - "Reached rate limit, passing..." + Fore.RESET, - ) + print(f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}") except APIError as e: - if e.http_status == 502: - pass - else: + if e.http_status != 502: raise if attempt == num_retries - 1: raise if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", ) time.sleep(backoff) if response is None: @@ -159,15 +151,13 @@ def create_embedding_with_ada(text) -> list: except RateLimitError: pass except APIError as e: - if e.http_status == 502: - pass - else: + if e.http_status != 502: raise if attempt == num_retries - 1: raise if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", ) time.sleep(backoff) From b84de4f7f89b95f176ebd0b390c60198acfa8bf9 Mon Sep 17 00:00:00 2001 From: Taylor Beeston Date: Mon, 17 Apr 2023 22:10:40 -0700 Subject: [PATCH 7/7] :recycle: Use AutoGPT template package for the plugin type --- autogpt/agent/agent_manager.py | 3 +- autogpt/chat.py | 3 +- autogpt/config/config.py | 2 +- autogpt/llm_utils.py | 6 +- autogpt/plugin_template.py | 255 --------------------------------- autogpt/plugins.py | 2 +- autogpt/token_counter.py | 3 +- autogpt/types/openai.py | 9 ++ requirements.txt | 1 + 9 files changed, 19 insertions(+), 265 deletions(-) delete mode 100644 autogpt/plugin_template.py create mode 100644 autogpt/types/openai.py diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index dc57811e..9f123eaa 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -4,8 +4,7 @@ from typing import List from autogpt.config.config import Config, Singleton from autogpt.llm_utils import create_chat_completion - -from plugin_template import Message +from autogpt.types.openai import Message class AgentManager(metaclass=Singleton): diff --git a/autogpt/chat.py b/autogpt/chat.py index e7354fc1..f9fc9471 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -6,8 +6,7 @@ from autogpt import token_counter from autogpt.config import Config from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger - -from plugin_template import Message +from autogpt.types.openai import Message cfg = Config() diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 2aaf879a..f93bf17a 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -3,7 +3,7 @@ import os from typing import List import openai -from plugin_template import AutoGPTPluginTemplate +from auto_gpt_plugin_template import AutoGPTPluginTemplate import yaml from colorama import Fore from dotenv import load_dotenv diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 85e0fbf7..a6d87c30 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -8,7 +8,7 @@ from colorama import Fore from openai.error import APIError, RateLimitError from autogpt.config import Config -from plugin_template import Message +from autogpt.types.openai import Message CFG = Config() @@ -109,7 +109,9 @@ def create_chat_completion( break except RateLimitError: if CFG.debug_mode: - print(f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}") + print( + f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" + ) except APIError as e: if e.http_status != 502: raise diff --git a/autogpt/plugin_template.py b/autogpt/plugin_template.py deleted file mode 100644 index 90e9fa32..00000000 --- a/autogpt/plugin_template.py +++ /dev/null @@ -1,255 +0,0 @@ -"""This is a template for Auto-GPT plugins.""" - -# TODO: Move to shared package - -import abc -from typing import Any, Dict, List, Optional, Tuple, TypedDict -from abstract_singleton import AbstractSingleton, Singleton - -from prompts.generator import PromptGenerator - - -class Message(TypedDict): - role: str - content: str - - -class AutoGPTPluginTemplate(AbstractSingleton, metaclass=Singleton): - """ - This is a template for Auto-GPT plugins. - """ - - def __init__(self): - super().__init__() - self._name = "Auto-GPT-Plugin-Template" - self._version = "0.1.0" - self._description = "This is a template for Auto-GPT plugins." - - @abc.abstractmethod - def can_handle_on_response(self) -> bool: - """This method is called to check that the plugin can - handle the on_response method. - - Returns: - bool: True if the plugin can handle the on_response method.""" - return False - - @abc.abstractmethod - def on_response(self, response: str, *args, **kwargs) -> str: - """This method is called when a response is received from the model.""" - pass - - @abc.abstractmethod - def can_handle_post_prompt(self) -> bool: - """This method is called to check that the plugin can - handle the post_prompt method. - - Returns: - bool: True if the plugin can handle the post_prompt method.""" - return False - - @abc.abstractmethod - def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: - """This method is called just after the generate_prompt is called, - but actually before the prompt is generated. - - Args: - prompt (PromptGenerator): The prompt generator. - - Returns: - PromptGenerator: The prompt generator. - """ - pass - - @abc.abstractmethod - def can_handle_on_planning(self) -> bool: - """This method is called to check that the plugin can - handle the on_planning method. - - Returns: - bool: True if the plugin can handle the on_planning method.""" - return False - - @abc.abstractmethod - def on_planning( - self, prompt: PromptGenerator, messages: List[Message] - ) -> Optional[str]: - """This method is called before the planning chat completeion is done. - - Args: - prompt (PromptGenerator): The prompt generator. - messages (List[str]): The list of messages. - """ - pass - - @abc.abstractmethod - def can_handle_post_planning(self) -> bool: - """This method is called to check that the plugin can - handle the post_planning method. - - Returns: - bool: True if the plugin can handle the post_planning method.""" - return False - - @abc.abstractmethod - def post_planning(self, response: str) -> str: - """This method is called after the planning chat completeion is done. - - Args: - response (str): The response. - - Returns: - str: The resulting response. - """ - pass - - @abc.abstractmethod - def can_handle_pre_instruction(self) -> bool: - """This method is called to check that the plugin can - handle the pre_instruction method. - - Returns: - bool: True if the plugin can handle the pre_instruction method.""" - return False - - @abc.abstractmethod - def pre_instruction(self, messages: List[Message]) -> List[Message]: - """This method is called before the instruction chat is done. - - Args: - messages (List[Message]): The list of context messages. - - Returns: - List[Message]: The resulting list of messages. - """ - pass - - @abc.abstractmethod - def can_handle_on_instruction(self) -> bool: - """This method is called to check that the plugin can - handle the on_instruction method. - - Returns: - bool: True if the plugin can handle the on_instruction method.""" - return False - - @abc.abstractmethod - def on_instruction(self, messages: List[Message]) -> Optional[str]: - """This method is called when the instruction chat is done. - - Args: - messages (List[Message]): The list of context messages. - - Returns: - Optional[str]: The resulting message. - """ - pass - - @abc.abstractmethod - def can_handle_post_instruction(self) -> bool: - """This method is called to check that the plugin can - handle the post_instruction method. - - Returns: - bool: True if the plugin can handle the post_instruction method.""" - return False - - @abc.abstractmethod - def post_instruction(self, response: str) -> str: - """This method is called after the instruction chat is done. - - Args: - response (str): The response. - - Returns: - str: The resulting response. - """ - pass - - @abc.abstractmethod - def can_handle_pre_command(self) -> bool: - """This method is called to check that the plugin can - handle the pre_command method. - - Returns: - bool: True if the plugin can handle the pre_command method.""" - return False - - @abc.abstractmethod - def pre_command( - self, command_name: str, arguments: Dict[str, Any] - ) -> Tuple[str, Dict[str, Any]]: - """This method is called before the command is executed. - - Args: - command_name (str): The command name. - arguments (Dict[str, Any]): The arguments. - - Returns: - Tuple[str, Dict[str, Any]]: The command name and the arguments. - """ - pass - - @abc.abstractmethod - def can_handle_post_command(self) -> bool: - """This method is called to check that the plugin can - handle the post_command method. - - Returns: - bool: True if the plugin can handle the post_command method.""" - return False - - @abc.abstractmethod - def post_command(self, command_name: str, response: str) -> str: - """This method is called after the command is executed. - - Args: - command_name (str): The command name. - response (str): The response. - - Returns: - str: The resulting response. - """ - pass - - @abc.abstractmethod - def can_handle_chat_completion( - self, - messages: List[Message], - model: Optional[str], - temperature: float, - max_tokens: Optional[int], - ) -> bool: - """This method is called to check that the plugin can - handle the chat_completion method. - - Args: - messages (List[Message]): The messages. - model (str): The model name. - temperature (float): The temperature. - max_tokens (int): The max tokens. - - Returns: - bool: True if the plugin can handle the chat_completion method.""" - return False - - @abc.abstractmethod - def handle_chat_completion( - self, - messages: List[Message], - model: Optional[str], - temperature: float, - max_tokens: Optional[int], - ) -> str: - """This method is called when the chat completion is done. - - Args: - messages (List[Message]): The messages. - model (str): The model name. - temperature (float): The temperature. - max_tokens (int): The max tokens. - - Returns: - str: The resulting response. - """ - pass diff --git a/autogpt/plugins.py b/autogpt/plugins.py index b4b2ac78..a4d9c17c 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -6,7 +6,7 @@ from pathlib import Path from typing import List, Optional, Tuple from zipimport import zipimporter -from plugin_template import AutoGPTPluginTemplate +from auto_gpt_plugin_template import AutoGPTPluginTemplate def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]: diff --git a/autogpt/token_counter.py b/autogpt/token_counter.py index 8cf4c369..b1e59d86 100644 --- a/autogpt/token_counter.py +++ b/autogpt/token_counter.py @@ -5,8 +5,7 @@ from typing import List import tiktoken from autogpt.logs import logger - -from plugin_template import Message +from autogpt.types.openai import Message def count_message_tokens( diff --git a/autogpt/types/openai.py b/autogpt/types/openai.py new file mode 100644 index 00000000..2af85785 --- /dev/null +++ b/autogpt/types/openai.py @@ -0,0 +1,9 @@ +"""Type helpers for working with the OpenAI library""" +from typing import TypedDict + + +class Message(TypedDict): + """OpenAI Message object containing a role and the message content""" + + role: str + content: str diff --git a/requirements.txt b/requirements.txt index 5e8f1000..86d24b5a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,6 +30,7 @@ sourcery isort gitpython==3.1.31 abstract-singleton +auto-gpt-plugin-template # Testing dependencies pytest