diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index dc57811e..9f123eaa 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -4,8 +4,7 @@ from typing import List from autogpt.config.config import Config, Singleton from autogpt.llm_utils import create_chat_completion - -from plugin_template import Message +from autogpt.types.openai import Message class AgentManager(metaclass=Singleton): diff --git a/autogpt/chat.py b/autogpt/chat.py index e7354fc1..f9fc9471 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -6,8 +6,7 @@ from autogpt import token_counter from autogpt.config import Config from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger - -from plugin_template import Message +from autogpt.types.openai import Message cfg = Config() diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 2aaf879a..f93bf17a 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -3,7 +3,7 @@ import os from typing import List import openai -from plugin_template import AutoGPTPluginTemplate +from auto_gpt_plugin_template import AutoGPTPluginTemplate import yaml from colorama import Fore from dotenv import load_dotenv diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 85e0fbf7..a6d87c30 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -8,7 +8,7 @@ from colorama import Fore from openai.error import APIError, RateLimitError from autogpt.config import Config -from plugin_template import Message +from autogpt.types.openai import Message CFG = Config() @@ -109,7 +109,9 @@ def create_chat_completion( break except RateLimitError: if CFG.debug_mode: - print(f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}") + print( + f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" + ) except APIError as e: if e.http_status != 502: raise diff --git a/autogpt/plugin_template.py b/autogpt/plugin_template.py deleted file mode 100644 index 90e9fa32..00000000 --- a/autogpt/plugin_template.py +++ /dev/null @@ -1,255 +0,0 @@ -"""This is a template for Auto-GPT plugins.""" - -# TODO: Move to shared package - -import abc -from typing import Any, Dict, List, Optional, Tuple, TypedDict -from abstract_singleton import AbstractSingleton, Singleton - -from prompts.generator import PromptGenerator - - -class Message(TypedDict): - role: str - content: str - - -class AutoGPTPluginTemplate(AbstractSingleton, metaclass=Singleton): - """ - This is a template for Auto-GPT plugins. - """ - - def __init__(self): - super().__init__() - self._name = "Auto-GPT-Plugin-Template" - self._version = "0.1.0" - self._description = "This is a template for Auto-GPT plugins." - - @abc.abstractmethod - def can_handle_on_response(self) -> bool: - """This method is called to check that the plugin can - handle the on_response method. - - Returns: - bool: True if the plugin can handle the on_response method.""" - return False - - @abc.abstractmethod - def on_response(self, response: str, *args, **kwargs) -> str: - """This method is called when a response is received from the model.""" - pass - - @abc.abstractmethod - def can_handle_post_prompt(self) -> bool: - """This method is called to check that the plugin can - handle the post_prompt method. - - Returns: - bool: True if the plugin can handle the post_prompt method.""" - return False - - @abc.abstractmethod - def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: - """This method is called just after the generate_prompt is called, - but actually before the prompt is generated. - - Args: - prompt (PromptGenerator): The prompt generator. - - Returns: - PromptGenerator: The prompt generator. - """ - pass - - @abc.abstractmethod - def can_handle_on_planning(self) -> bool: - """This method is called to check that the plugin can - handle the on_planning method. - - Returns: - bool: True if the plugin can handle the on_planning method.""" - return False - - @abc.abstractmethod - def on_planning( - self, prompt: PromptGenerator, messages: List[Message] - ) -> Optional[str]: - """This method is called before the planning chat completeion is done. - - Args: - prompt (PromptGenerator): The prompt generator. - messages (List[str]): The list of messages. - """ - pass - - @abc.abstractmethod - def can_handle_post_planning(self) -> bool: - """This method is called to check that the plugin can - handle the post_planning method. - - Returns: - bool: True if the plugin can handle the post_planning method.""" - return False - - @abc.abstractmethod - def post_planning(self, response: str) -> str: - """This method is called after the planning chat completeion is done. - - Args: - response (str): The response. - - Returns: - str: The resulting response. - """ - pass - - @abc.abstractmethod - def can_handle_pre_instruction(self) -> bool: - """This method is called to check that the plugin can - handle the pre_instruction method. - - Returns: - bool: True if the plugin can handle the pre_instruction method.""" - return False - - @abc.abstractmethod - def pre_instruction(self, messages: List[Message]) -> List[Message]: - """This method is called before the instruction chat is done. - - Args: - messages (List[Message]): The list of context messages. - - Returns: - List[Message]: The resulting list of messages. - """ - pass - - @abc.abstractmethod - def can_handle_on_instruction(self) -> bool: - """This method is called to check that the plugin can - handle the on_instruction method. - - Returns: - bool: True if the plugin can handle the on_instruction method.""" - return False - - @abc.abstractmethod - def on_instruction(self, messages: List[Message]) -> Optional[str]: - """This method is called when the instruction chat is done. - - Args: - messages (List[Message]): The list of context messages. - - Returns: - Optional[str]: The resulting message. - """ - pass - - @abc.abstractmethod - def can_handle_post_instruction(self) -> bool: - """This method is called to check that the plugin can - handle the post_instruction method. - - Returns: - bool: True if the plugin can handle the post_instruction method.""" - return False - - @abc.abstractmethod - def post_instruction(self, response: str) -> str: - """This method is called after the instruction chat is done. - - Args: - response (str): The response. - - Returns: - str: The resulting response. - """ - pass - - @abc.abstractmethod - def can_handle_pre_command(self) -> bool: - """This method is called to check that the plugin can - handle the pre_command method. - - Returns: - bool: True if the plugin can handle the pre_command method.""" - return False - - @abc.abstractmethod - def pre_command( - self, command_name: str, arguments: Dict[str, Any] - ) -> Tuple[str, Dict[str, Any]]: - """This method is called before the command is executed. - - Args: - command_name (str): The command name. - arguments (Dict[str, Any]): The arguments. - - Returns: - Tuple[str, Dict[str, Any]]: The command name and the arguments. - """ - pass - - @abc.abstractmethod - def can_handle_post_command(self) -> bool: - """This method is called to check that the plugin can - handle the post_command method. - - Returns: - bool: True if the plugin can handle the post_command method.""" - return False - - @abc.abstractmethod - def post_command(self, command_name: str, response: str) -> str: - """This method is called after the command is executed. - - Args: - command_name (str): The command name. - response (str): The response. - - Returns: - str: The resulting response. - """ - pass - - @abc.abstractmethod - def can_handle_chat_completion( - self, - messages: List[Message], - model: Optional[str], - temperature: float, - max_tokens: Optional[int], - ) -> bool: - """This method is called to check that the plugin can - handle the chat_completion method. - - Args: - messages (List[Message]): The messages. - model (str): The model name. - temperature (float): The temperature. - max_tokens (int): The max tokens. - - Returns: - bool: True if the plugin can handle the chat_completion method.""" - return False - - @abc.abstractmethod - def handle_chat_completion( - self, - messages: List[Message], - model: Optional[str], - temperature: float, - max_tokens: Optional[int], - ) -> str: - """This method is called when the chat completion is done. - - Args: - messages (List[Message]): The messages. - model (str): The model name. - temperature (float): The temperature. - max_tokens (int): The max tokens. - - Returns: - str: The resulting response. - """ - pass diff --git a/autogpt/plugins.py b/autogpt/plugins.py index b4b2ac78..a4d9c17c 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -6,7 +6,7 @@ from pathlib import Path from typing import List, Optional, Tuple from zipimport import zipimporter -from plugin_template import AutoGPTPluginTemplate +from auto_gpt_plugin_template import AutoGPTPluginTemplate def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]: diff --git a/autogpt/token_counter.py b/autogpt/token_counter.py index 8cf4c369..b1e59d86 100644 --- a/autogpt/token_counter.py +++ b/autogpt/token_counter.py @@ -5,8 +5,7 @@ from typing import List import tiktoken from autogpt.logs import logger - -from plugin_template import Message +from autogpt.types.openai import Message def count_message_tokens( diff --git a/autogpt/types/openai.py b/autogpt/types/openai.py new file mode 100644 index 00000000..2af85785 --- /dev/null +++ b/autogpt/types/openai.py @@ -0,0 +1,9 @@ +"""Type helpers for working with the OpenAI library""" +from typing import TypedDict + + +class Message(TypedDict): + """OpenAI Message object containing a role and the message content""" + + role: str + content: str diff --git a/requirements.txt b/requirements.txt index 5e8f1000..86d24b5a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,6 +30,7 @@ sourcery isort gitpython==3.1.31 abstract-singleton +auto-gpt-plugin-template # Testing dependencies pytest