clean(autogpt): Remove old plugin system (#7097)

### Background

Follow up after merging https://github.com/Significant-Gravitas/AutoGPT/pull/7054, old plugins will no longer be used.

### Changes 🏗️

- Removed all dead code needed to load and use plugins.
- Removed `auto-gpt-plugin-template` dependency
- Removed `rev=` from `autogpt-forge` dependency (the set `rev` had incompatible `duckduckgo-search` versions)
- Kept `--install-plugin-deps` CLI option and dead code associated (may be needed for new plugins)
This commit is contained in:
Krzysztof Czerwinski
2024-04-28 21:10:53 +02:00
committed by GitHub
parent 0014e2ac14
commit d38e8b8f6c
26 changed files with 11 additions and 1490 deletions

View File

@@ -44,9 +44,6 @@ OPENAI_API_KEY=your-openai-api-key
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml) ## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml # AI_SETTINGS_FILE=ai_settings.yaml
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the AutoGPT root directory. (Default plugins_config.yaml)
# PLUGINS_CONFIG_FILE=plugins_config.yaml
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml) ## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml # PROMPT_SETTINGS_FILE=prompt_settings.yaml

View File

@@ -52,7 +52,6 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
smart_llm=config.smart_llm, smart_llm=config.smart_llm,
allow_fs_access=not config.restrict_to_workspace, allow_fs_access=not config.restrict_to_workspace,
use_functions_api=config.openai_functions, use_functions_api=config.openai_functions,
plugins=config.plugins,
), ),
prompt_config=agent_prompt_config, prompt_config=agent_prompt_config,
history=Agent.default_settings.history.copy(deep=True), history=Agent.default_settings.history.copy(deep=True),

View File

@@ -4,8 +4,6 @@ from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.config import AIDirectives, AIProfile, Config from autogpt.config import AIDirectives, AIProfile, Config
from autogpt.core.resource.model_providers import ChatModelProvider from autogpt.core.resource.model_providers import ChatModelProvider
from autogpt.file_storage.base import FileStorage from autogpt.file_storage.base import FileStorage
from autogpt.logs.config import configure_chat_plugins
from autogpt.plugins import scan_plugins
def create_agent( def create_agent(
@@ -65,9 +63,6 @@ def _configure_agent(
" must be specified" " must be specified"
) )
app_config.plugins = scan_plugins(app_config)
configure_chat_plugins(app_config)
agent_state = state or create_agent_state( agent_state = state or create_agent_state(
agent_id=agent_id, agent_id=agent_id,
task=task, task=task,
@@ -105,7 +100,6 @@ def create_agent_state(
smart_llm=app_config.smart_llm, smart_llm=app_config.smart_llm,
allow_fs_access=not app_config.restrict_to_workspace, allow_fs_access=not app_config.restrict_to_workspace,
use_functions_api=app_config.openai_functions, use_functions_api=app_config.openai_functions,
plugins=app_config.plugins,
), ),
history=Agent.default_settings.history.copy(deep=True), history=Agent.default_settings.history.copy(deep=True),
) )

View File

@@ -15,7 +15,6 @@ from typing import (
overload, overload,
) )
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from colorama import Fore from colorama import Fore
from pydantic import BaseModel, Field, validator from pydantic import BaseModel, Field, validator
@@ -96,21 +95,6 @@ class BaseAgentConfiguration(SystemConfiguration):
summary_max_tlength: Optional[int] = None summary_max_tlength: Optional[int] = None
# TODO: move to ActionHistoryConfiguration # TODO: move to ActionHistoryConfiguration
plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
class Config:
arbitrary_types_allowed = True # Necessary for plugins
@validator("plugins", each_item=True)
def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
assert issubclass(
p.__class__, AutoGPTPluginTemplate
), f"{p} does not subclass AutoGPTPluginTemplate"
assert (
p.__class__.__name__ != "AutoGPTPluginTemplate"
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
return p
@validator("use_functions_api") @validator("use_functions_api")
def validate_openai_functions(cls, v: bool, values: dict[str, Any]): def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
if v: if v:

View File

@@ -135,7 +135,6 @@ class FileManagerComponent(DirectiveProvider, CommandProvider):
Returns: Returns:
str: A message indicating success or failure str: A message indicating success or failure
""" """
logger.info(f"self: {self}")
if directory := os.path.dirname(filename): if directory := os.path.dirname(filename):
self.workspace.make_dir(directory) self.workspace.make_dir(directory)
await self.workspace.write_file(filename, contents) await self.workspace.write_file(filename, contents)

View File

@@ -37,13 +37,11 @@ from autogpt.config import (
from autogpt.core.resource.model_providers.openai import OpenAIProvider from autogpt.core.resource.model_providers.openai import OpenAIProvider
from autogpt.core.runner.client_lib.utils import coroutine from autogpt.core.runner.client_lib.utils import coroutine
from autogpt.file_storage import FileStorageBackendName, get_storage from autogpt.file_storage import FileStorageBackendName, get_storage
from autogpt.logs.config import LoggingConfig, configure_chat_plugins, configure_logging from autogpt.logs.config import configure_logging
from autogpt.logs.helpers import print_attribute, speak from autogpt.logs.helpers import print_attribute, speak
from autogpt.models.action_history import ActionInterruptedByHuman from autogpt.models.action_history import ActionInterruptedByHuman
from autogpt.plugins import scan_plugins
from autogpt.utils.exceptions import AgentTerminated, InvalidAgentResponseError from autogpt.utils.exceptions import AgentTerminated, InvalidAgentResponseError
from autogpt.utils.utils import DEFAULT_FINISH_COMMAND from autogpt.utils.utils import DEFAULT_FINISH_COMMAND
from scripts.install_plugin_deps import install_plugin_dependencies
from .configurator import apply_overrides_to_config from .configurator import apply_overrides_to_config
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
@@ -166,12 +164,6 @@ async def run_auto_gpt(
title_color=Fore.YELLOW, title_color=Fore.YELLOW,
) )
if install_plugin_deps:
install_plugin_dependencies()
config.plugins = scan_plugins(config)
configure_chat_plugins(config)
# Let user choose an existing agent to run # Let user choose an existing agent to run
agent_manager = AgentManager(file_storage) agent_manager = AgentManager(file_storage)
existing_agents = agent_manager.list_agents() existing_agents = agent_manager.list_agents()
@@ -408,11 +400,6 @@ async def run_auto_gpt_server(
llm_provider = _configure_openai_provider(config) llm_provider = _configure_openai_provider(config)
if install_plugin_deps:
install_plugin_dependencies()
config.plugins = scan_plugins(config)
# Set up & start server # Set up & start server
database = AgentDB( database = AgentDB(
database_string=os.getenv("AP_SERVER_DB_URL", "sqlite:///data/ap_server.db"), database_string=os.getenv("AP_SERVER_DB_URL", "sqlite:///data/ap_server.db"),
@@ -726,12 +713,7 @@ async def get_user_feedback(
while user_feedback is None: while user_feedback is None:
# Get input from user # Get input from user
if config.chat_messages_enabled: console_input = clean_input(config, Fore.MAGENTA + "Input:" + Style.RESET_ALL)
console_input = clean_input(config, "Waiting for your response...")
else:
console_input = clean_input(
config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
# Parse user input # Parse user input
if console_input.lower().strip() == config.authorise_key: if console_input.lower().strip() == config.authorise_key:

View File

@@ -20,34 +20,6 @@ logger = logging.getLogger(__name__)
def clean_input(config: "Config", prompt: str = ""): def clean_input(config: "Config", prompt: str = ""):
try: try:
if config.chat_messages_enabled:
for plugin in config.plugins:
if not hasattr(plugin, "can_handle_user_input"):
continue
if not plugin.can_handle_user_input(user_input=prompt):
continue
plugin_response = plugin.user_input(user_input=prompt)
if not plugin_response:
continue
if plugin_response.lower() in [
"yes",
"yeah",
"y",
"ok",
"okay",
"sure",
"alright",
]:
return config.authorise_key
elif plugin_response.lower() in [
"no",
"nope",
"n",
"negative",
]:
return config.exit_key
return plugin_response
# ask for input, default when just pressing Enter is y # ask for input, default when just pressing Enter is y
logger.debug("Asking user via keyboard...") logger.debug("Asking user via keyboard...")
@@ -215,7 +187,7 @@ def print_motd(config: "Config", logger: logging.Logger):
}, },
msg=motd_line, msg=motd_line,
) )
if is_new_motd and not config.chat_messages_enabled: if is_new_motd:
input( input(
Fore.MAGENTA Fore.MAGENTA
+ Style.BRIGHT + Style.BRIGHT

View File

@@ -7,9 +7,8 @@ import re
from pathlib import Path from pathlib import Path
from typing import Any, Optional, Union from typing import Any, Optional, Union
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from colorama import Fore from colorama import Fore
from pydantic import Field, SecretStr, validator from pydantic import SecretStr, validator
import autogpt import autogpt
from autogpt.app.utils import clean_input from autogpt.app.utils import clean_input
@@ -25,7 +24,6 @@ from autogpt.core.resource.model_providers.openai import (
) )
from autogpt.file_storage import FileStorageBackendName from autogpt.file_storage import FileStorageBackendName
from autogpt.logs.config import LoggingConfig from autogpt.logs.config import LoggingConfig
from autogpt.plugins.plugins_config import PluginsConfig
from autogpt.speech import TTSConfig from autogpt.speech import TTSConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -33,7 +31,6 @@ logger = logging.getLogger(__name__)
PROJECT_ROOT = Path(autogpt.__file__).parent.parent PROJECT_ROOT = Path(autogpt.__file__).parent.parent
AI_SETTINGS_FILE = Path("ai_settings.yaml") AI_SETTINGS_FILE = Path("ai_settings.yaml")
AZURE_CONFIG_FILE = Path("azure.yaml") AZURE_CONFIG_FILE = Path("azure.yaml")
PLUGINS_CONFIG_FILE = Path("plugins_config.yaml")
PROMPT_SETTINGS_FILE = Path("prompt_settings.yaml") PROMPT_SETTINGS_FILE = Path("prompt_settings.yaml")
GPT_4_MODEL = OpenAIModelName.GPT4 GPT_4_MODEL = OpenAIModelName.GPT4
@@ -54,9 +51,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY") authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY")
exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY") exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY")
noninteractive_mode: bool = False noninteractive_mode: bool = False
chat_messages_enabled: bool = UserConfigurable(
default=True, from_env=lambda: os.getenv("CHAT_MESSAGES_ENABLED") == "True"
)
# TTS configuration # TTS configuration
logging: LoggingConfig = LoggingConfig() logging: LoggingConfig = LoggingConfig()
@@ -181,29 +175,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
from_env="USER_AGENT", from_env="USER_AGENT",
) )
###################
# Plugin Settings #
###################
plugins_dir: str = UserConfigurable("plugins", from_env="PLUGINS_DIR")
plugins_config_file: Path = UserConfigurable(
default=PLUGINS_CONFIG_FILE, from_env="PLUGINS_CONFIG_FILE"
)
plugins_config: PluginsConfig = Field(
default_factory=lambda: PluginsConfig(plugins={})
)
plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
plugins_allowlist: list[str] = UserConfigurable(
default_factory=list,
from_env=lambda: _safe_split(os.getenv("ALLOWLISTED_PLUGINS")),
)
plugins_denylist: list[str] = UserConfigurable(
default_factory=list,
from_env=lambda: _safe_split(os.getenv("DENYLISTED_PLUGINS")),
)
plugins_openai: list[str] = UserConfigurable(
default_factory=list, from_env=lambda: _safe_split(os.getenv("OPENAI_PLUGINS"))
)
############### ###############
# Credentials # # Credentials #
############### ###############
@@ -231,16 +202,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
# Stable Diffusion # Stable Diffusion
sd_webui_auth: Optional[str] = UserConfigurable(from_env="SD_WEBUI_AUTH") sd_webui_auth: Optional[str] = UserConfigurable(from_env="SD_WEBUI_AUTH")
@validator("plugins", each_item=True)
def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
assert issubclass(
p.__class__, AutoGPTPluginTemplate
), f"{p} does not subclass AutoGPTPluginTemplate"
assert (
p.__class__.__name__ != "AutoGPTPluginTemplate"
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
return p
@validator("openai_functions") @validator("openai_functions")
def validate_openai_functions(cls, v: bool, values: dict[str, Any]): def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
if v: if v:
@@ -266,7 +227,6 @@ class ConfigBuilder(Configurable[Config]):
for k in { for k in {
"ai_settings_file", # TODO: deprecate or repurpose "ai_settings_file", # TODO: deprecate or repurpose
"prompt_settings_file", # TODO: deprecate or repurpose "prompt_settings_file", # TODO: deprecate or repurpose
"plugins_config_file", # TODO: move from project root
"azure_config_file", # TODO: move from project root "azure_config_file", # TODO: move from project root
}: }:
setattr(config, k, project_root / getattr(config, k)) setattr(config, k, project_root / getattr(config, k))
@@ -278,12 +238,6 @@ class ConfigBuilder(Configurable[Config]):
): ):
config.openai_credentials.load_azure_config(config_file) config.openai_credentials.load_azure_config(config_file)
config.plugins_config = PluginsConfig.load_config(
config.plugins_config_file,
config.plugins_denylist,
config.plugins_allowlist,
)
return config return config

View File

@@ -1,4 +1,4 @@
from .config import configure_chat_plugins, configure_logging from .config import configure_logging
from .helpers import user_friendly_output from .helpers import user_friendly_output
from .log_cycle import ( from .log_cycle import (
CURRENT_CONTEXT_FILE_NAME, CURRENT_CONTEXT_FILE_NAME,
@@ -13,7 +13,6 @@ from .log_cycle import (
__all__ = [ __all__ = [
"configure_logging", "configure_logging",
"configure_chat_plugins",
"user_friendly_output", "user_friendly_output",
"CURRENT_CONTEXT_FILE_NAME", "CURRENT_CONTEXT_FILE_NAME",
"NEXT_ACTION_FILE_NAME", "NEXT_ACTION_FILE_NAME",

View File

@@ -8,11 +8,9 @@ import sys
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING, Optional
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from openai._base_client import log as openai_logger from openai._base_client import log as openai_logger
if TYPE_CHECKING: if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.speech import TTSConfig from autogpt.speech import TTSConfig
from autogpt.core.configuration import SystemConfiguration, UserConfigurable from autogpt.core.configuration import SystemConfiguration, UserConfigurable
@@ -34,8 +32,6 @@ DEBUG_LOG_FORMAT = (
SPEECH_OUTPUT_LOGGER = "VOICE" SPEECH_OUTPUT_LOGGER = "VOICE"
USER_FRIENDLY_OUTPUT_LOGGER = "USER_FRIENDLY_OUTPUT" USER_FRIENDLY_OUTPUT_LOGGER = "USER_FRIENDLY_OUTPUT"
_chat_plugins: list[AutoGPTPluginTemplate] = []
class LogFormatName(str, enum.Enum): class LogFormatName(str, enum.Enum):
SIMPLE = "simple" SIMPLE = "simple"
@@ -222,19 +218,3 @@ def configure_logging(
# Disable debug logging from OpenAI library # Disable debug logging from OpenAI library
openai_logger.setLevel(logging.WARNING) openai_logger.setLevel(logging.WARNING)
def configure_chat_plugins(config: Config) -> None:
"""Configure chat plugins for use by the logging module"""
logger = logging.getLogger(__name__)
# Add chat plugins capable of report to logger
if config.chat_messages_enabled:
if _chat_plugins:
_chat_plugins.clear()
for plugin in config.plugins:
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
logger.debug(f"Loaded plugin into logger: {plugin.__class__.__name__}")
_chat_plugins.append(plugin)

View File

@@ -3,7 +3,7 @@ from typing import Any, Optional
from colorama import Fore from colorama import Fore
from .config import SPEECH_OUTPUT_LOGGER, USER_FRIENDLY_OUTPUT_LOGGER, _chat_plugins from .config import SPEECH_OUTPUT_LOGGER, USER_FRIENDLY_OUTPUT_LOGGER
def user_friendly_output( def user_friendly_output(
@@ -21,10 +21,6 @@ def user_friendly_output(
""" """
logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER) logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER)
if _chat_plugins:
for plugin in _chat_plugins:
plugin.report(f"{title}: {message}")
logger.log( logger.log(
level, level,
message, message,

View File

@@ -1,5 +1,4 @@
import logging import logging
from contextlib import suppress
from typing import Any, Sequence, overload from typing import Any, Sequence, overload
import numpy as np import numpy as np
@@ -51,16 +50,9 @@ async def get_embedding(
if isinstance(input, str): if isinstance(input, str):
input = input.replace("\n", " ") input = input.replace("\n", " ")
with suppress(NotImplementedError):
return _get_embedding_with_plugin(input, config)
elif multiple and isinstance(input[0], str): elif multiple and isinstance(input[0], str):
input = [text.replace("\n", " ") for text in input] input = [text.replace("\n", " ") for text in input]
with suppress(NotImplementedError):
return [_get_embedding_with_plugin(i, config) for i in input]
model = config.embedding_model model = config.embedding_model
logger.debug( logger.debug(
@@ -86,13 +78,3 @@ async def get_embedding(
) )
embeddings.append(result.embedding) embeddings.append(result.embedding)
return embeddings return embeddings
def _get_embedding_with_plugin(text: str, config: Config) -> Embedding:
for plugin in config.plugins:
if plugin.can_handle_text_embedding(text):
embedding = plugin.handle_text_embedding(text)
if embedding is not None:
return embedding
raise NotImplementedError

View File

@@ -1,251 +0,0 @@
"""Handles loading of plugins."""
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
from auto_gpt_plugin_template import AutoGPTPluginTemplate
PromptGenerator = TypeVar("PromptGenerator")
class Message(TypedDict):
role: str
content: str
class BaseOpenAIPlugin(AutoGPTPluginTemplate):
"""
This is a BaseOpenAIPlugin class for generating AutoGPT plugins.
"""
def __init__(self, manifests_specs_clients: dict):
# super().__init__()
self._name = manifests_specs_clients["manifest"]["name_for_model"]
self._version = manifests_specs_clients["manifest"]["schema_version"]
self._description = manifests_specs_clients["manifest"]["description_for_model"]
self._client = manifests_specs_clients["client"]
self._manifest = manifests_specs_clients["manifest"]
self._openapi_spec = manifests_specs_clients["openapi_spec"]
def can_handle_on_response(self) -> bool:
"""This method is called to check that the plugin can
handle the on_response method.
Returns:
bool: True if the plugin can handle the on_response method."""
return False
def on_response(self, response: str, *args, **kwargs) -> str:
"""This method is called when a response is received from the model."""
return response
def can_handle_post_prompt(self) -> bool:
"""This method is called to check that the plugin can
handle the post_prompt method.
Returns:
bool: True if the plugin can handle the post_prompt method."""
return False
def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
"""This method is called just after the generate_prompt is called,
but actually before the prompt is generated.
Args:
prompt (PromptGenerator): The prompt generator.
Returns:
PromptGenerator: The prompt generator.
"""
return prompt
def can_handle_on_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the on_planning method.
Returns:
bool: True if the plugin can handle the on_planning method."""
return False
def on_planning(
self, prompt: PromptGenerator, messages: List[Message]
) -> Optional[str]:
"""This method is called before the planning chat completion is done.
Args:
prompt (PromptGenerator): The prompt generator.
messages (List[str]): The list of messages.
"""
def can_handle_post_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the post_planning method.
Returns:
bool: True if the plugin can handle the post_planning method."""
return False
def post_planning(self, response: str) -> str:
"""This method is called after the planning chat completion is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
return response
def can_handle_pre_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_instruction method.
Returns:
bool: True if the plugin can handle the pre_instruction method."""
return False
def pre_instruction(self, messages: List[Message]) -> List[Message]:
"""This method is called before the instruction chat is done.
Args:
messages (List[Message]): The list of context messages.
Returns:
List[Message]: The resulting list of messages.
"""
return messages
def can_handle_on_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the on_instruction method.
Returns:
bool: True if the plugin can handle the on_instruction method."""
return False
def on_instruction(self, messages: List[Message]) -> Optional[str]:
"""This method is called when the instruction chat is done.
Args:
messages (List[Message]): The list of context messages.
Returns:
Optional[str]: The resulting message.
"""
def can_handle_post_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the post_instruction method.
Returns:
bool: True if the plugin can handle the post_instruction method."""
return False
def post_instruction(self, response: str) -> str:
"""This method is called after the instruction chat is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
return response
def can_handle_pre_command(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_command method.
Returns:
bool: True if the plugin can handle the pre_command method."""
return False
def pre_command(
self, command_name: str, arguments: Dict[str, Any]
) -> Tuple[str, Dict[str, Any]]:
"""This method is called before the command is executed.
Args:
command_name (str): The command name.
arguments (Dict[str, Any]): The arguments.
Returns:
Tuple[str, Dict[str, Any]]: The command name and the arguments.
"""
return command_name, arguments
def can_handle_post_command(self) -> bool:
"""This method is called to check that the plugin can
handle the post_command method.
Returns:
bool: True if the plugin can handle the post_command method."""
return False
def post_command(self, command_name: str, response: str) -> str:
"""This method is called after the command is executed.
Args:
command_name (str): The command name.
response (str): The response.
Returns:
str: The resulting response.
"""
return response
def can_handle_chat_completion(
self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
) -> bool:
"""This method is called to check that the plugin can
handle the chat_completion method.
Args:
messages (List[Message]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
bool: True if the plugin can handle the chat_completion method."""
return False
def handle_chat_completion(
self, messages: List[Message], model: str, temperature: float, max_tokens: int
) -> str:
"""This method is called when the chat completion is done.
Args:
messages (List[Message]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
str: The resulting response.
"""
def can_handle_text_embedding(self, text: str) -> bool:
"""This method is called to check that the plugin can
handle the text_embedding method.
Args:
text (str): The text to be convert to embedding.
Returns:
bool: True if the plugin can handle the text_embedding method."""
return False
def handle_text_embedding(self, text: str) -> list[float]:
"""This method is called to create a text embedding.
Args:
text (str): The text to be convert to embedding.
Returns:
list[float]: The created embedding vector.
"""
def can_handle_user_input(self, user_input: str) -> bool:
"""This method is called to check that the plugin can
handle the user_input method.
Args:
user_input (str): The user input.
Returns:
bool: True if the plugin can handle the user_input method."""
return False
def user_input(self, user_input: str) -> str:
"""This method is called to request user input to the user.
Args:
user_input (str): The question or prompt to ask the user.
Returns:
str: The user input.
"""
def can_handle_report(self) -> bool:
"""This method is called to check that the plugin can
handle the report method.
Returns:
bool: True if the plugin can handle the report method."""
return False
def report(self, message: str) -> None:
"""This method is called to report a message to the user.
Args:
message (str): The message to report.
"""

View File

@@ -1,330 +0,0 @@
"""Handles loading of plugins."""
from __future__ import annotations
import importlib.util
import inspect
import json
import logging
import os
import zipfile
from pathlib import Path
from typing import TYPE_CHECKING, List
from urllib.parse import urlparse
from zipimport import ZipImportError, zipimporter
import openapi_python_client
import requests
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from openapi_python_client.config import Config as OpenAPIConfig
if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
logger = logging.getLogger(__name__)
def inspect_zip_for_modules(zip_path: str) -> list[str]:
"""
Inspect a zipfile for a modules.
Args:
zip_path (str): Path to the zipfile.
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
list[str]: The list of module names found or empty list if none were found.
"""
result = []
with zipfile.ZipFile(zip_path, "r") as zfile:
for name in zfile.namelist():
if name.endswith("__init__.py") and not name.startswith("__MACOSX"):
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
result.append(name)
if len(result) == 0:
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
return result
def write_dict_to_json_file(data: dict, file_path: str) -> None:
"""
Write a dictionary to a JSON file.
Args:
data (dict): Dictionary to write.
file_path (str): Path to the file.
"""
with open(file_path, "w") as file:
json.dump(data, file, indent=4)
def fetch_openai_plugins_manifest_and_spec(config: Config) -> dict:
"""
Fetch the manifest for a list of OpenAI plugins.
Args:
urls (List): List of URLs to fetch.
Returns:
dict: per url dictionary of manifest and spec.
"""
# TODO add directory scan
manifests = {}
for url in config.plugins_openai:
openai_plugin_client_dir = f"{config.plugins_dir}/openai/{urlparse(url).netloc}"
create_directory_if_not_exists(openai_plugin_client_dir)
if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
try:
response = requests.get(f"{url}/.well-known/ai-plugin.json")
if response.status_code == 200:
manifest = response.json()
if manifest["schema_version"] != "v1":
logger.warning(
"Unsupported manifest version: "
f"{manifest['schem_version']} for {url}"
)
continue
if manifest["api"]["type"] != "openapi":
logger.warning(
f"Unsupported API type: {manifest['api']['type']} for {url}"
)
continue
write_dict_to_json_file(
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
)
else:
logger.warning(
f"Failed to fetch manifest for {url}: {response.status_code}"
)
except requests.exceptions.RequestException as e:
logger.warning(f"Error while requesting manifest from {url}: {e}")
else:
logger.info(f"Manifest for {url} already exists")
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
openapi_spec = openapi_python_client._get_document(
url=manifest["api"]["url"], path=None, timeout=5
)
write_dict_to_json_file(
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
)
else:
logger.info(f"OpenAPI spec for {url} already exists")
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
return manifests
def create_directory_if_not_exists(directory_path: str) -> bool:
"""
Create a directory if it does not exist.
Args:
directory_path (str): Path to the directory.
Returns:
bool: True if the directory was created, else False.
"""
if not os.path.exists(directory_path):
try:
os.makedirs(directory_path)
logger.debug(f"Created directory: {directory_path}")
return True
except OSError as e:
logger.warning(f"Error creating directory {directory_path}: {e}")
return False
else:
logger.info(f"Directory {directory_path} already exists")
return True
def initialize_openai_plugins(manifests_specs: dict, config: Config) -> dict:
"""
Initialize OpenAI plugins.
Args:
manifests_specs (dict): per url dictionary of manifest and spec.
config (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
dict: per url dictionary of manifest, spec and client.
"""
openai_plugins_dir = f"{config.plugins_dir}/openai"
if create_directory_if_not_exists(openai_plugins_dir):
for url, manifest_spec in manifests_specs.items():
openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
_meta_option = (openapi_python_client.MetaType.SETUP,)
_config = OpenAPIConfig(
**{
"project_name_override": "client",
"package_name_override": "client",
}
)
prev_cwd = Path.cwd()
os.chdir(openai_plugin_client_dir)
if not os.path.exists("client"):
client_results = openapi_python_client.create_new_client(
url=manifest_spec["manifest"]["api"]["url"],
path=None,
meta=_meta_option,
config=_config,
)
if client_results:
logger.warning(
f"Error creating OpenAPI client: {client_results[0].header} \n"
f" details: {client_results[0].detail}"
)
continue
spec = importlib.util.spec_from_file_location(
"client", "client/client/client.py"
)
module = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(module)
finally:
os.chdir(prev_cwd)
client = module.Client(base_url=url)
manifest_spec["client"] = client
return manifests_specs
def instantiate_openai_plugin_clients(manifests_specs_clients: dict) -> dict:
"""
Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
Args:
manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
config (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
"""
plugins = {}
for url, manifest_spec_client in manifests_specs_clients.items():
plugins[url] = BaseOpenAIPlugin(manifest_spec_client)
return plugins
def scan_plugins(config: Config) -> List[AutoGPTPluginTemplate]:
"""Scan the plugins directory for plugins and loads them.
Args:
config (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
List[Tuple[str, Path]]: List of plugins.
"""
loaded_plugins = []
# Generic plugins
plugins_path = Path(config.plugins_dir)
plugins_config = config.plugins_config
# Directory-based plugins
for plugin_path in [f for f in Path(config.plugins_dir).iterdir() if f.is_dir()]:
# Avoid going into __pycache__ or other hidden directories
if plugin_path.name.startswith("__"):
continue
plugin_module_name = plugin_path.name
qualified_module_name = ".".join(plugin_path.parts)
try:
plugin = importlib.import_module(qualified_module_name)
except ImportError as e:
logger.error(
f"Failed to load {qualified_module_name} from {plugin_path}: {e}"
)
continue
if not plugins_config.is_enabled(plugin_module_name):
logger.warning(
f"Plugin folder {plugin_module_name} found but not configured. "
"If this is a legitimate plugin, please add it to plugins_config.yaml "
f"(key: {plugin_module_name})."
)
continue
for _, class_obj in inspect.getmembers(plugin):
if (
hasattr(class_obj, "_abc_impl")
and AutoGPTPluginTemplate in class_obj.__bases__
):
loaded_plugins.append(class_obj())
# Zip-based plugins
for plugin in plugins_path.glob("*.zip"):
if moduleList := inspect_zip_for_modules(str(plugin)):
for module in moduleList:
plugin = Path(plugin)
module = Path(module)
logger.debug(f"Zipped Plugin: {plugin}, Module: {module}")
zipped_package = zipimporter(str(plugin))
try:
zipped_module = zipped_package.load_module(str(module.parent))
except ZipImportError as e:
logger.error(f"Failed to load {module.parent} from {plugin}: {e}")
continue
for key in dir(zipped_module):
if key.startswith("__"):
continue
a_module = getattr(zipped_module, key)
if not inspect.isclass(a_module):
continue
if (
issubclass(a_module, AutoGPTPluginTemplate)
and a_module.__name__ != "AutoGPTPluginTemplate"
):
plugin_name = a_module.__name__
plugin_configured = plugins_config.get(plugin_name) is not None
plugin_enabled = plugins_config.is_enabled(plugin_name)
if plugin_configured and plugin_enabled:
logger.debug(
f"Loading plugin {plugin_name}. "
"Enabled in plugins_config.yaml."
)
loaded_plugins.append(a_module())
elif plugin_configured and not plugin_enabled:
logger.debug(
f"Not loading plugin {plugin_name}. "
"Disabled in plugins_config.yaml."
)
elif not plugin_configured:
logger.warning(
f"Not loading plugin {plugin_name}. "
f"No entry for '{plugin_name}' in plugins_config.yaml. "
"Note: Zipped plugins should use the class name "
f"({plugin_name}) as the key."
)
else:
if (
module_name := getattr(a_module, "__name__", str(a_module))
) != "AutoGPTPluginTemplate":
logger.debug(
f"Skipping '{module_name}' because it doesn't subclass "
"AutoGPTPluginTemplate."
)
# OpenAI plugins
if config.plugins_openai:
manifests_specs = fetch_openai_plugins_manifest_and_spec(config)
if manifests_specs.keys():
manifests_specs_clients = initialize_openai_plugins(manifests_specs, config)
for url, openai_plugin_meta in manifests_specs_clients.items():
if not plugins_config.is_enabled(url):
plugin_name = openai_plugin_meta["manifest"]["name_for_model"]
logger.warning(
f"OpenAI Plugin {plugin_name} found but not configured"
)
continue
plugin = BaseOpenAIPlugin(openai_plugin_meta)
loaded_plugins.append(plugin)
if loaded_plugins:
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
for plugin in loaded_plugins:
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
return loaded_plugins

View File

@@ -1,11 +0,0 @@
from typing import Any
from pydantic import BaseModel
class PluginConfig(BaseModel):
"""Class for holding configuration of a single plugin"""
name: str
enabled: bool = False
config: dict[str, Any] = None

View File

@@ -1,118 +0,0 @@
from __future__ import annotations
import logging
from pathlib import Path
from typing import Union
import yaml
from pydantic import BaseModel
from autogpt.plugins.plugin_config import PluginConfig
logger = logging.getLogger(__name__)
class PluginsConfig(BaseModel):
"""Class for holding configuration of all plugins"""
plugins: dict[str, PluginConfig]
def __repr__(self):
return f"PluginsConfig({self.plugins})"
def get(self, name: str) -> Union[PluginConfig, None]:
return self.plugins.get(name)
def is_enabled(self, name) -> bool:
plugin_config = self.plugins.get(name)
return plugin_config is not None and plugin_config.enabled
@classmethod
def load_config(
cls,
plugins_config_file: Path,
plugins_denylist: list[str],
plugins_allowlist: list[str],
) -> "PluginsConfig":
empty_config = cls(plugins={})
try:
config_data = cls.deserialize_config_file(
plugins_config_file,
plugins_denylist,
plugins_allowlist,
)
if type(config_data) is not dict:
logger.error(
f"Expected plugins config to be a dict, got {type(config_data)}."
" Continuing without plugins."
)
return empty_config
return cls(plugins=config_data)
except BaseException as e:
logger.error(
f"Plugin config is invalid. Continuing without plugins. Error: {e}"
)
return empty_config
@classmethod
def deserialize_config_file(
cls,
plugins_config_file: Path,
plugins_denylist: list[str],
plugins_allowlist: list[str],
) -> dict[str, PluginConfig]:
if not plugins_config_file.is_file():
logger.warning("plugins_config.yaml does not exist, creating base config.")
cls.create_empty_plugins_config(
plugins_config_file,
plugins_denylist,
plugins_allowlist,
)
with open(plugins_config_file, "r") as f:
plugins_config = yaml.load(f, Loader=yaml.SafeLoader)
plugins = {}
for name, plugin in plugins_config.items():
if type(plugin) is dict:
plugins[name] = PluginConfig(
name=name,
enabled=plugin.get("enabled", False),
config=plugin.get("config", {}),
)
elif isinstance(plugin, PluginConfig):
plugins[name] = plugin
else:
raise ValueError(f"Invalid plugin config data type: {type(plugin)}")
return plugins
@staticmethod
def create_empty_plugins_config(
plugins_config_file: Path,
plugins_denylist: list[str],
plugins_allowlist: list[str],
):
"""
Create an empty plugins_config.yaml file.
Fill it with values from old env variables.
"""
base_config = {}
logger.debug(f"Legacy plugin denylist: {plugins_denylist}")
logger.debug(f"Legacy plugin allowlist: {plugins_allowlist}")
# Backwards-compatibility shim
for plugin_name in plugins_denylist:
base_config[plugin_name] = {"enabled": False, "config": {}}
for plugin_name in plugins_allowlist:
base_config[plugin_name] = {"enabled": True, "config": {}}
logger.debug(f"Constructed base plugins config: {base_config}")
logger.debug(f"Creating plugin config file {plugins_config_file}")
with open(plugins_config_file, "w+") as f:
f.write(yaml.dump(base_config))
return base_config

View File

@@ -1,16 +1,5 @@
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. # This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
[[package]]
name = "abstract-singleton"
version = "1.0.1"
description = "An abstract singleton class."
optional = false
python-versions = ">=3.8"
files = [
{file = "abstract_singleton-1.0.1-py3-none-any.whl", hash = "sha256:1f5e2359a609360bc08d975f578cce75a752df06db561efb679e69646199ec1d"},
{file = "abstract_singleton-1.0.1.tar.gz", hash = "sha256:d97d26ecbcb7422f78df1b0bca48a03df5ba04cf58844c6da033a7840beaae82"},
]
[[package]] [[package]]
name = "agbenchmark" name = "agbenchmark"
version = "0.0.10" version = "0.0.10"
@@ -276,24 +265,6 @@ tests = ["attrs[tests-no-zope]", "zope-interface"]
tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
[[package]]
name = "auto_gpt_plugin_template"
version = "0.0.2"
description = "The template plugin for Auto-GPT."
optional = false
python-versions = ">=3.8"
files = []
develop = false
[package.dependencies]
abstract-singleton = "*"
[package.source]
type = "git"
url = "https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template"
reference = "0.1.0"
resolved_reference = "7612a14c629dc64ad870eee4d05850d60e1dd9ce"
[[package]] [[package]]
name = "autoflake" name = "autoflake"
version = "2.2.1" version = "2.2.1"
@@ -338,13 +309,13 @@ uvicorn = "^0.23.2"
webdriver-manager = "^4.0.1" webdriver-manager = "^4.0.1"
[package.extras] [package.extras]
benchmark = ["agbenchmark @ git+https://github.com/Significant-Gravitas/AutoGPT.git#subdirectory=benchmark"] benchmark = ["agbenchmark @ file:///Users/czerwinski/Library/Caches/pypoetry/virtualenvs/agpt-JtDOdZb2-py3.11/src/AutoGPT/benchmark"]
[package.source] [package.source]
type = "git" type = "git"
url = "https://github.com/Significant-Gravitas/AutoGPT.git" url = "https://github.com/Significant-Gravitas/AutoGPT.git"
reference = "ab05b7ae70754c063909" reference = "HEAD"
resolved_reference = "ab05b7ae70754c06390982d237d86dc7290cd1aa" resolved_reference = "fd3f8fa5fc86271e4e319258fefdb3065d1aa0d4"
subdirectory = "autogpts/forge" subdirectory = "autogpts/forge"
[[package]] [[package]]
@@ -7263,4 +7234,4 @@ benchmark = ["agbenchmark"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "^3.10" python-versions = "^3.10"
content-hash = "afa9674e032573e483e977e818766efe18ea4f52efa1ce6dfc71686772371b5b" content-hash = "e6eab5c079d53f075ce701e86a2007e7ebeb635ac067d25f555bfea363bcc630"

View File

@@ -22,9 +22,8 @@ serve = "autogpt.app.cli:serve"
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.10" python = "^3.10"
auto-gpt-plugin-template = {git = "https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template", rev = "0.1.0"}
# autogpt-forge = { path = "../forge" } # autogpt-forge = { path = "../forge" }
autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", rev = "ab05b7ae70754c063909", subdirectory = "autogpts/forge"} autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "autogpts/forge"}
beautifulsoup4 = "^4.12.2" beautifulsoup4 = "^4.12.2"
boto3 = "^1.33.6" boto3 = "^1.33.6"
charset-normalizer = "^3.1.0" charset-normalizer = "^3.1.0"

View File

@@ -1,66 +0,0 @@
import logging
import os
import subprocess
import sys
import zipfile
from glob import glob
from pathlib import Path
logger = logging.getLogger(__name__)
def install_plugin_dependencies():
"""
Installs dependencies for all plugins in the plugins dir.
Args:
None
Returns:
None
"""
plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins"))
logger.debug("Checking for dependencies in zipped plugins...")
# Install zip-based plugins
for plugin_archive in plugins_dir.glob("*.zip"):
logger.debug(f"Checking for requirements in '{plugin_archive}'...")
with zipfile.ZipFile(str(plugin_archive), "r") as zfile:
if not zfile.namelist():
continue
# Assume the first entry in the list will be (in) the lowest common dir
first_entry = zfile.namelist()[0]
basedir = first_entry.rsplit("/", 1)[0] if "/" in first_entry else ""
logger.debug(f"Looking for requirements.txt in '{basedir}'")
basereqs = os.path.join(basedir, "requirements.txt")
try:
extracted = zfile.extract(basereqs, path=plugins_dir)
except KeyError as e:
logger.debug(e.args[0])
continue
logger.debug(f"Installing dependencies from '{basereqs}'...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", extracted]
)
os.remove(extracted)
os.rmdir(os.path.join(plugins_dir, basedir))
logger.debug("Checking for dependencies in other plugin folders...")
# Install directory-based plugins
for requirements_file in glob(f"{plugins_dir}/*/requirements.txt"):
logger.debug(f"Installing dependencies from '{requirements_file}'...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_file],
stdout=subprocess.DEVNULL,
)
logger.debug("Finished installing plugin dependencies")
if __name__ == "__main__":
install_plugin_dependencies()

View File

@@ -3,10 +3,8 @@ from __future__ import annotations
import os import os
import uuid import uuid
from pathlib import Path from pathlib import Path
from tempfile import TemporaryDirectory
import pytest import pytest
import yaml
from pytest_mock import MockerFixture from pytest_mock import MockerFixture
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
@@ -48,23 +46,8 @@ def storage(app_data_dir: Path) -> FileStorage:
return storage return storage
@pytest.fixture
def temp_plugins_config_file():
"""
Create a plugins_config.yaml file in a temp directory
so that it doesn't mess with existing ones.
"""
config_directory = TemporaryDirectory()
config_file = Path(config_directory.name) / "plugins_config.yaml"
with open(config_file, "w+") as f:
f.write(yaml.dump({}))
yield config_file
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def config( def config(
temp_plugins_config_file: Path,
tmp_project_root: Path, tmp_project_root: Path,
app_data_dir: Path, app_data_dir: Path,
mocker: MockerFixture, mocker: MockerFixture,
@@ -75,19 +58,8 @@ def config(
config.app_data_dir = app_data_dir config.app_data_dir = app_data_dir
config.plugins_dir = "tests/unit/data/test_plugins"
config.plugins_config_file = temp_plugins_config_file
config.noninteractive_mode = True config.noninteractive_mode = True
# avoid circular dependency
from autogpt.plugins.plugins_config import PluginsConfig
config.plugins_config = PluginsConfig.load_config(
plugins_config_file=config.plugins_config_file,
plugins_denylist=config.plugins_denylist,
plugins_allowlist=config.plugins_allowlist,
)
yield config yield config
@@ -125,7 +97,6 @@ def agent(
smart_llm=config.smart_llm, smart_llm=config.smart_llm,
allow_fs_access=not config.restrict_to_workspace, allow_fs_access=not config.restrict_to_workspace,
use_functions_api=config.openai_functions, use_functions_api=config.openai_functions,
plugins=config.plugins,
), ),
history=Agent.default_settings.history.copy(deep=True), history=Agent.default_settings.history.copy(deep=True),
) )

View File

@@ -41,7 +41,6 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
fast_llm=config.fast_llm, fast_llm=config.fast_llm,
smart_llm=config.smart_llm, smart_llm=config.smart_llm,
use_functions_api=config.openai_functions, use_functions_api=config.openai_functions,
plugins=config.plugins,
), ),
prompt_config=agent_prompt_config, prompt_config=agent_prompt_config,
history=Agent.default_settings.history.copy(deep=True), history=Agent.default_settings.history.copy(deep=True),

View File

@@ -1,274 +0,0 @@
"""This is the Test plugin for AutoGPT."""
from typing import Any, Dict, List, Optional, Tuple, TypeVar
from auto_gpt_plugin_template import AutoGPTPluginTemplate
PromptGenerator = TypeVar("PromptGenerator")
class AutoGPTGuanaco(AutoGPTPluginTemplate):
"""
This is plugin for AutoGPT.
"""
def __init__(self):
super().__init__()
self._name = "AutoGPT-Guanaco"
self._version = "0.1.0"
self._description = "This is a Guanaco local model plugin."
def can_handle_on_response(self) -> bool:
"""This method is called to check that the plugin can
handle the on_response method.
Returns:
bool: True if the plugin can handle the on_response method."""
return False
def on_response(self, response: str, *args, **kwargs) -> str:
"""This method is called when a response is received from the model."""
if len(response):
print("OMG OMG It's Alive!")
else:
print("Is it alive?")
def can_handle_post_prompt(self) -> bool:
"""This method is called to check that the plugin can
handle the post_prompt method.
Returns:
bool: True if the plugin can handle the post_prompt method."""
return False
def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
"""This method is called just after the generate_prompt is called,
but actually before the prompt is generated.
Args:
prompt (PromptGenerator): The prompt generator.
Returns:
PromptGenerator: The prompt generator.
"""
def can_handle_on_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the on_planning method.
Returns:
bool: True if the plugin can handle the on_planning method."""
return False
def on_planning(
self, prompt: PromptGenerator, messages: List[str]
) -> Optional[str]:
"""This method is called before the planning chat completeion is done.
Args:
prompt (PromptGenerator): The prompt generator.
messages (List[str]): The list of messages.
"""
def can_handle_post_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the post_planning method.
Returns:
bool: True if the plugin can handle the post_planning method."""
return False
def post_planning(self, response: str) -> str:
"""This method is called after the planning chat completeion is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
def can_handle_pre_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_instruction method.
Returns:
bool: True if the plugin can handle the pre_instruction method."""
return False
def pre_instruction(self, messages: List[str]) -> List[str]:
"""This method is called before the instruction chat is done.
Args:
messages (List[str]): The list of context messages.
Returns:
List[str]: The resulting list of messages.
"""
def can_handle_on_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the on_instruction method.
Returns:
bool: True if the plugin can handle the on_instruction method."""
return False
def on_instruction(self, messages: List[str]) -> Optional[str]:
"""This method is called when the instruction chat is done.
Args:
messages (List[str]): The list of context messages.
Returns:
Optional[str]: The resulting message.
"""
def can_handle_post_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the post_instruction method.
Returns:
bool: True if the plugin can handle the post_instruction method."""
return False
def post_instruction(self, response: str) -> str:
"""This method is called after the instruction chat is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
def can_handle_pre_command(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_command method.
Returns:
bool: True if the plugin can handle the pre_command method."""
return False
def pre_command(
self, command_name: str, arguments: Dict[str, Any]
) -> Tuple[str, Dict[str, Any]]:
"""This method is called before the command is executed.
Args:
command_name (str): The command name.
arguments (Dict[str, Any]): The arguments.
Returns:
Tuple[str, Dict[str, Any]]: The command name and the arguments.
"""
def can_handle_post_command(self) -> bool:
"""This method is called to check that the plugin can
handle the post_command method.
Returns:
bool: True if the plugin can handle the post_command method."""
return False
def post_command(self, command_name: str, response: str) -> str:
"""This method is called after the command is executed.
Args:
command_name (str): The command name.
response (str): The response.
Returns:
str: The resulting response.
"""
def can_handle_chat_completion(
self,
messages: list[Dict[Any, Any]],
model: str,
temperature: float,
max_tokens: int,
) -> bool:
"""This method is called to check that the plugin can
handle the chat_completion method.
Args:
messages (Dict[Any, Any]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
bool: True if the plugin can handle the chat_completion method."""
return False
def handle_chat_completion(
self,
messages: list[Dict[Any, Any]],
model: str,
temperature: float,
max_tokens: int,
) -> str:
"""This method is called when the chat completion is done.
Args:
messages (Dict[Any, Any]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
str: The resulting response.
"""
def can_handle_text_embedding(self, text: str) -> bool:
"""This method is called to check that the plugin can
handle the text_embedding method.
Args:
text (str): The text to be convert to embedding.
Returns:
bool: True if the plugin can handle the text_embedding method."""
return False
def handle_text_embedding(self, text: str) -> list:
"""This method is called when the chat completion is done.
Args:
text (str): The text to be convert to embedding.
Returns:
list: The text embedding.
"""
def can_handle_user_input(self, user_input: str) -> bool:
"""This method is called to check that the plugin can
handle the user_input method.
Args:
user_input (str): The user input.
Returns:
bool: True if the plugin can handle the user_input method."""
return False
def user_input(self, user_input: str) -> str:
"""This method is called to request user input to the user.
Args:
user_input (str): The question or prompt to ask the user.
Returns:
str: The user input.
"""
def can_handle_report(self) -> bool:
"""This method is called to check that the plugin can
handle the report method.
Returns:
bool: True if the plugin can handle the report method."""
return False
def report(self, message: str) -> None:
"""This method is called to report a message to the user.
Args:
message (str): The message to report.
"""

View File

@@ -1,81 +0,0 @@
import pytest
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
class DummyPlugin(BaseOpenAIPlugin):
"""A dummy plugin for testing purposes."""
@pytest.fixture
def dummy_plugin():
"""A dummy plugin for testing purposes."""
manifests_specs_clients = {
"manifest": {
"name_for_model": "Dummy",
"schema_version": "1.0",
"description_for_model": "A dummy plugin for testing purposes",
},
"client": None,
"openapi_spec": None,
}
return DummyPlugin(manifests_specs_clients)
def test_dummy_plugin_inheritance(dummy_plugin):
"""Test that the DummyPlugin class inherits from the BaseOpenAIPlugin class."""
assert isinstance(dummy_plugin, BaseOpenAIPlugin)
def test_dummy_plugin_name(dummy_plugin):
"""Test that the DummyPlugin class has the correct name."""
assert dummy_plugin._name == "Dummy"
def test_dummy_plugin_version(dummy_plugin):
"""Test that the DummyPlugin class has the correct version."""
assert dummy_plugin._version == "1.0"
def test_dummy_plugin_description(dummy_plugin):
"""Test that the DummyPlugin class has the correct description."""
assert dummy_plugin._description == "A dummy plugin for testing purposes"
def test_dummy_plugin_default_methods(dummy_plugin):
"""Test that the DummyPlugin class has the correct default methods."""
assert not dummy_plugin.can_handle_on_response()
assert not dummy_plugin.can_handle_post_prompt()
assert not dummy_plugin.can_handle_on_planning()
assert not dummy_plugin.can_handle_post_planning()
assert not dummy_plugin.can_handle_pre_instruction()
assert not dummy_plugin.can_handle_on_instruction()
assert not dummy_plugin.can_handle_post_instruction()
assert not dummy_plugin.can_handle_pre_command()
assert not dummy_plugin.can_handle_post_command()
assert not dummy_plugin.can_handle_chat_completion(None, None, None, None)
assert not dummy_plugin.can_handle_text_embedding(None)
assert dummy_plugin.on_response("hello") == "hello"
assert dummy_plugin.post_prompt(None) is None
assert dummy_plugin.on_planning(None, None) is None
assert dummy_plugin.post_planning("world") == "world"
pre_instruction = dummy_plugin.pre_instruction(
[{"role": "system", "content": "Beep, bop, boop"}]
)
assert isinstance(pre_instruction, list)
assert len(pre_instruction) == 1
assert pre_instruction[0]["role"] == "system"
assert pre_instruction[0]["content"] == "Beep, bop, boop"
assert dummy_plugin.on_instruction(None) is None
assert dummy_plugin.post_instruction("I'm a robot") == "I'm a robot"
pre_command = dummy_plugin.pre_command("evolve", {"continuously": True})
assert isinstance(pre_command, tuple)
assert len(pre_command) == 2
assert pre_command[0] == "evolve"
assert pre_command[1]["continuously"] is True
post_command = dummy_plugin.post_command("evolve", "upgraded successfully!")
assert isinstance(post_command, str)
assert post_command == "upgraded successfully!"
assert dummy_plugin.handle_chat_completion(None, None, None, None) is None
assert dummy_plugin.handle_text_embedding(None) is None

View File

@@ -1,125 +0,0 @@
import os
import yaml
from autogpt.config.config import Config
from autogpt.plugins import inspect_zip_for_modules, scan_plugins
from autogpt.plugins.plugin_config import PluginConfig
from autogpt.plugins.plugins_config import PluginsConfig
PLUGINS_TEST_DIR = "tests/unit/data/test_plugins"
PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip"
PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__.py"
PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/"
def test_scan_plugins_openai(config: Config):
config.plugins_openai = [PLUGIN_TEST_OPENAI]
plugins_config = config.plugins_config
plugins_config.plugins[PLUGIN_TEST_OPENAI] = PluginConfig(
name=PLUGIN_TEST_OPENAI, enabled=True
)
# Test that the function returns the correct number of plugins
result = scan_plugins(config)
assert len(result) == 1
def test_scan_plugins_generic(config: Config):
# Test that the function returns the correct number of plugins
plugins_config = config.plugins_config
plugins_config.plugins["auto_gpt_guanaco"] = PluginConfig(
name="auto_gpt_guanaco", enabled=True
)
plugins_config.plugins["AutoGPTPVicuna"] = PluginConfig(
name="AutoGPTPVicuna", enabled=True
)
result = scan_plugins(config)
plugin_class_names = [plugin.__class__.__name__ for plugin in result]
assert len(result) == 2
assert "AutoGPTGuanaco" in plugin_class_names
assert "AutoGPTPVicuna" in plugin_class_names
def test_scan_plugins_not_enabled(config: Config):
# Test that the function returns the correct number of plugins
plugins_config = config.plugins_config
plugins_config.plugins["auto_gpt_guanaco"] = PluginConfig(
name="auto_gpt_guanaco", enabled=True
)
plugins_config.plugins["auto_gpt_vicuna"] = PluginConfig(
name="auto_gptp_vicuna", enabled=False
)
result = scan_plugins(config)
plugin_class_names = [plugin.__class__.__name__ for plugin in result]
assert len(result) == 1
assert "AutoGPTGuanaco" in plugin_class_names
assert "AutoGPTPVicuna" not in plugin_class_names
def test_inspect_zip_for_modules():
result = inspect_zip_for_modules(str(f"{PLUGINS_TEST_DIR}/{PLUGIN_TEST_ZIP_FILE}"))
assert result == [PLUGIN_TEST_INIT_PY]
def test_create_base_config(config: Config):
"""
Test the backwards-compatibility shim to convert old plugin allow/deny list
to a config file.
"""
config.plugins_allowlist = ["a", "b"]
config.plugins_denylist = ["c", "d"]
os.remove(config.plugins_config_file)
plugins_config = PluginsConfig.load_config(
plugins_config_file=config.plugins_config_file,
plugins_denylist=config.plugins_denylist,
plugins_allowlist=config.plugins_allowlist,
)
# Check the structure of the plugins config data
assert len(plugins_config.plugins) == 4
assert plugins_config.get("a").enabled
assert plugins_config.get("b").enabled
assert not plugins_config.get("c").enabled
assert not plugins_config.get("d").enabled
# Check the saved config file
with open(config.plugins_config_file, "r") as saved_config_file:
saved_config = yaml.load(saved_config_file, Loader=yaml.SafeLoader)
assert saved_config == {
"a": {"enabled": True, "config": {}},
"b": {"enabled": True, "config": {}},
"c": {"enabled": False, "config": {}},
"d": {"enabled": False, "config": {}},
}
def test_load_config(config: Config):
"""
Test that the plugin config is loaded correctly from the plugins_config.yaml file.
"""
# Create a test config and write it to disk
test_config = {
"a": {"enabled": True, "config": {"api_key": "1234"}},
"b": {"enabled": False, "config": {}},
}
with open(config.plugins_config_file, "w+") as f:
f.write(yaml.dump(test_config))
# Load the config from disk
plugins_config = PluginsConfig.load_config(
plugins_config_file=config.plugins_config_file,
plugins_denylist=config.plugins_denylist,
plugins_allowlist=config.plugins_allowlist,
)
# Check that the loaded config is equal to the test config
assert len(plugins_config.plugins) == 2
assert plugins_config.get("a").enabled
assert plugins_config.get("a").config == {"api_key": "1234"}
assert not plugins_config.get("b").enabled
assert plugins_config.get("b").config == {}

View File

@@ -33,7 +33,6 @@ Configuration is controlled through the `Config` object. You can set configurati
- `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys). - `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys).
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional. - `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False - `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False
- `PLUGINS_CONFIG_FILE`: Path of the Plugins Config file relative to the AutoGPT root directory. Default: plugins_config.yaml
- `PROMPT_SETTINGS_FILE`: Location of the Prompt Settings file relative to the AutoGPT root directory. Default: prompt_settings.yaml - `PROMPT_SETTINGS_FILE`: Location of the Prompt Settings file relative to the AutoGPT root directory. Default: prompt_settings.yaml
- `REDIS_HOST`: Redis Host. Default: localhost - `REDIS_HOST`: Redis Host. Default: localhost
- `REDIS_PASSWORD`: Redis Password. Optional. Default: - `REDIS_PASSWORD`: Redis Password. Optional. Default: