From b1570543c872c342f23d15f574c75b3fbcdaed59 Mon Sep 17 00:00:00 2001 From: uta <122957026+uta0x89@users.noreply.github.com> Date: Tue, 27 Jun 2023 06:38:16 +0900 Subject: [PATCH 01/34] Retry ServiceUnavailableError (#4789) Co-authored-by: merwanehamadi --- autogpt/llm/providers/openai.py | 16 ++++++------ tests/unit/test_retry_provider_openai.py | 31 +++++++++++++++++++++--- 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/autogpt/llm/providers/openai.py b/autogpt/llm/providers/openai.py index 3c16f5cf..397b4791 100644 --- a/autogpt/llm/providers/openai.py +++ b/autogpt/llm/providers/openai.py @@ -9,7 +9,7 @@ from unittest.mock import patch import openai import openai.api_resources.abstract.engine_api_resource as engine_api_resource from colorama import Fore, Style -from openai.error import APIError, RateLimitError, Timeout +from openai.error import APIError, RateLimitError, ServiceUnavailableError, Timeout from openai.openai_object import OpenAIObject if TYPE_CHECKING: @@ -163,7 +163,10 @@ def retry_api( backoff_base float: Base for exponential backoff. Defaults to 2. warn_user bool: Whether to warn the user. Defaults to True. """ - retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}" + error_messages = { + ServiceUnavailableError: f"{Fore.RED}Error: The OpenAI API engine is currently overloaded, passing...{Fore.RESET}", + RateLimitError: f"{Fore.RED}Error: Reached rate limit, passing...{Fore.RESET}", + } api_key_error_msg = ( f"Please double check that you have setup a " f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can " @@ -182,19 +185,18 @@ def retry_api( try: return func(*args, **kwargs) - except RateLimitError: + except (RateLimitError, ServiceUnavailableError) as e: if attempt == num_attempts: raise - logger.debug(retry_limit_msg) + error_msg = error_messages[type(e)] + logger.debug(error_msg) if not user_warned: logger.double_check(api_key_error_msg) user_warned = True except (APIError, Timeout) as e: - if (e.http_status not in [429, 502, 503]) or ( - attempt == num_attempts - ): + if (e.http_status not in [429, 502]) or (attempt == num_attempts): raise backoff = backoff_base ** (attempt + 2) diff --git a/tests/unit/test_retry_provider_openai.py b/tests/unit/test_retry_provider_openai.py index f8162eb8..b2c2d04a 100644 --- a/tests/unit/test_retry_provider_openai.py +++ b/tests/unit/test_retry_provider_openai.py @@ -1,10 +1,10 @@ import pytest -from openai.error import APIError, RateLimitError +from openai.error import APIError, RateLimitError, ServiceUnavailableError from autogpt.llm.providers import openai -@pytest.fixture(params=[RateLimitError, APIError]) +@pytest.fixture(params=[RateLimitError, ServiceUnavailableError, APIError]) def error(request): if request.param == APIError: return request.param("Error", http_status=502) @@ -52,7 +52,7 @@ def test_retry_open_api_no_error(capsys): ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"], ) def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure): - """Tests the retry with simulated errors [RateLimitError, APIError], but should ulimately pass""" + """Tests the retry with simulated errors [RateLimitError, ServiceUnavailableError, APIError], but should ulimately pass""" call_count = min(error_count, retry_count) + 1 raises = error_factory(error, error_count, retry_count) @@ -71,6 +71,12 @@ def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure if type(error) == RateLimitError: assert "Reached rate limit, passing..." in output.out assert "Please double check" in output.out + if type(error) == ServiceUnavailableError: + assert ( + "The OpenAI API engine is currently overloaded, passing..." + in output.out + ) + assert "Please double check" in output.out if type(error) == APIError: assert "API Bad gateway" in output.out else: @@ -94,6 +100,25 @@ def test_retry_open_api_rate_limit_no_warn(capsys): assert "Please double check" not in output.out +def test_retry_open_api_service_unavairable_no_warn(capsys): + """Tests the retry logic with a service unavairable error""" + error_count = 2 + retry_count = 10 + + raises = error_factory( + ServiceUnavailableError, error_count, retry_count, warn_user=False + ) + result = raises() + call_count = min(error_count, retry_count) + 1 + assert result == call_count + assert raises.count == call_count + + output = capsys.readouterr() + + assert "The OpenAI API engine is currently overloaded, passing..." in output.out + assert "Please double check" not in output.out + + def test_retry_openapi_other_api_error(capsys): """Tests the Retry logic with a non rate limit error such as HTTP500""" error_count = 2 From 9f353f41c45940c90bd3a0082e4395616368d9b5 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Mon, 26 Jun 2023 17:01:36 -0700 Subject: [PATCH 02/34] Use Configuration of the rearch branch (#4803) --- autogpt/config/config.py | 486 ++++++++++++------------- autogpt/configurator.py | 28 +- autogpt/core/configuration/__init__.py | 0 autogpt/core/configuration/schema.py | 98 +++++ autogpt/main.py | 7 +- autogpt/plugins/__init__.py | 2 +- benchmarks.py | 6 +- data_ingestion.py | 2 +- tests/conftest.py | 11 +- tests/integration/agent_factory.py | 4 +- tests/integration/memory/utils.py | 4 +- tests/unit/test_config.py | 44 +-- tests/unit/test_plugins.py | 5 +- 13 files changed, 389 insertions(+), 308 deletions(-) create mode 100644 autogpt/core/configuration/__init__.py create mode 100644 autogpt/core/configuration/schema.py diff --git a/autogpt/config/config.py b/autogpt/config/config.py index f4dff28d..03c7179f 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -1,223 +1,263 @@ """Configuration class to store the state of bools for different scripts access.""" +import contextlib import os import re -from typing import List +from typing import Dict -import openai import yaml -from auto_gpt_plugin_template import AutoGPTPluginTemplate from colorama import Fore -import autogpt +from autogpt.core.configuration.schema import Configurable, SystemSettings + +AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml") +from typing import Optional -class Config: - """ - Configuration class to store the state of bools for different scripts access. - """ +class ConfigSettings(SystemSettings): + fast_llm_model: str + smart_llm_model: str + continuous_mode: bool + skip_news: bool + workspace_path: Optional[str] + file_logger_path: Optional[str] + debug_mode: bool + plugins_dir: str + plugins_config: dict[str, str] + continuous_limit: int + speak_mode: bool + skip_reprompt: bool + allow_downloads: bool + exit_key: str + plain_output: bool + disabled_command_categories: list[str] + shell_command_control: str + shell_denylist: list[str] + shell_allowlist: list[str] + ai_settings_file: str + prompt_settings_file: str + embedding_model: str + browse_spacy_language_model: str + openai_api_key: Optional[str] + openai_organization: Optional[str] + temperature: float + use_azure: bool + execute_local_commands: bool + restrict_to_workspace: bool + openai_api_type: Optional[str] + openai_api_base: Optional[str] + openai_api_version: Optional[str] + openai_functions: bool + elevenlabs_api_key: Optional[str] + streamelements_voice: str + text_to_speech_provider: str + github_api_key: Optional[str] + github_username: Optional[str] + google_api_key: Optional[str] + google_custom_search_engine_id: Optional[str] + image_provider: Optional[str] + image_size: int + huggingface_api_token: Optional[str] + huggingface_image_model: str + audio_to_text_provider: str + huggingface_audio_to_text_model: Optional[str] + sd_webui_url: Optional[str] + sd_webui_auth: Optional[str] + selenium_web_browser: str + selenium_headless: bool + user_agent: str + memory_backend: str + memory_index: str + redis_host: str + redis_port: int + redis_password: str + wipe_redis_on_start: bool + plugins_allowlist: list[str] + plugins_denylist: list[str] + plugins_openai: list[str] + plugins_config_file: str + chat_messages_enabled: bool + elevenlabs_voice_id: Optional[str] + plugins: list[str] + authorise_key: str - def __init__(self) -> None: + +class Config(Configurable): + default_plugins_config_file = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml" + ) + + elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") + if os.getenv("USE_MAC_OS_TTS"): + default_tts_provider = "macos" + elif elevenlabs_api_key: + default_tts_provider = "elevenlabs" + elif os.getenv("USE_BRIAN_TTS"): + default_tts_provider = "streamelements" + else: + default_tts_provider = "gtts" + + defaults_settings = ConfigSettings( + name="Default Server Config", + description="This is a default server configuration", + smart_llm_model="gpt-3.5-turbo", + fast_llm_model="gpt-3.5-turbo", + continuous_mode=False, + continuous_limit=0, + skip_news=False, + debug_mode=False, + plugins_dir="plugins", + plugins_config={}, + speak_mode=False, + skip_reprompt=False, + allow_downloads=False, + exit_key="n", + plain_output=False, + disabled_command_categories=[], + shell_command_control="denylist", + shell_denylist=["sudo", "su"], + shell_allowlist=[], + ai_settings_file="ai_settings.yaml", + prompt_settings_file="prompt_settings.yaml", + embedding_model="text-embedding-ada-002", + browse_spacy_language_model="en_core_web_sm", + temperature=0, + use_azure=False, + execute_local_commands=False, + restrict_to_workspace=True, + openai_functions=False, + streamelements_voice="Brian", + text_to_speech_provider=default_tts_provider, + image_size=256, + huggingface_image_model="CompVis/stable-diffusion-v1-4", + audio_to_text_provider="huggingface", + sd_webui_url="http://localhost:7860", + selenium_web_browser="chrome", + selenium_headless=True, + user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", + memory_backend="json_file", + memory_index="auto-gpt-memory", + redis_host="localhost", + redis_port=6379, + wipe_redis_on_start=True, + plugins_allowlist=[], + plugins_denylist=[], + plugins_openai=[], + plugins_config_file=default_plugins_config_file, + chat_messages_enabled=True, + plugins=[], + authorise_key="y", + redis_password="", + ) + + @classmethod + def build_config_from_env(cls): """Initialize the Config class""" - self.workspace_path: str = None - self.file_logger_path: str = None - - self.debug_mode = False - self.continuous_mode = False - self.continuous_limit = 0 - self.speak_mode = False - self.skip_reprompt = False - self.allow_downloads = False - self.skip_news = False - - self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y") - self.exit_key = os.getenv("EXIT_KEY", "n") - self.plain_output = os.getenv("PLAIN_OUTPUT", "False") == "True" + config_dict = { + "authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"), + "exit_key": os.getenv("EXIT_KEY"), + "plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True", + "shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"), + "ai_settings_file": os.getenv("AI_SETTINGS_FILE"), + "prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"), + "fast_llm_model": os.getenv("FAST_LLM_MODEL"), + "smart_llm_model": os.getenv("SMART_LLM_MODEL"), + "embedding_model": os.getenv("EMBEDDING_MODEL"), + "browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"), + "openai_api_key": os.getenv("OPENAI_API_KEY"), + "use_azure": os.getenv("USE_AZURE") == "True", + "execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False") + == "True", + "restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True") + == "True", + "openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True", + "elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"), + "streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"), + "text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"), + "github_api_key": os.getenv("GITHUB_API_KEY"), + "github_username": os.getenv("GITHUB_USERNAME"), + "google_api_key": os.getenv("GOOGLE_API_KEY"), + "image_provider": os.getenv("IMAGE_PROVIDER"), + "huggingface_api_token": os.getenv("HUGGINGFACE_API_TOKEN"), + "huggingface_image_model": os.getenv("HUGGINGFACE_IMAGE_MODEL"), + "audio_to_text_provider": os.getenv("AUDIO_TO_TEXT_PROVIDER"), + "huggingface_audio_to_text_model": os.getenv( + "HUGGINGFACE_AUDIO_TO_TEXT_MODEL" + ), + "sd_webui_url": os.getenv("SD_WEBUI_URL"), + "sd_webui_auth": os.getenv("SD_WEBUI_AUTH"), + "selenium_web_browser": os.getenv("USE_WEB_BROWSER"), + "selenium_headless": os.getenv("HEADLESS_BROWSER", "True") == "True", + "user_agent": os.getenv("USER_AGENT"), + "memory_backend": os.getenv("MEMORY_BACKEND"), + "memory_index": os.getenv("MEMORY_INDEX"), + "redis_host": os.getenv("REDIS_HOST"), + "redis_password": os.getenv("REDIS_PASSWORD"), + "wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True", + "plugins_dir": os.getenv("PLUGINS_DIR"), + "plugins_config_file": os.getenv("PLUGINS_CONFIG_FILE"), + "chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True", + } + # Converting to a list from comma-separated string disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES") if disabled_command_categories: - self.disabled_command_categories = disabled_command_categories.split(",") - else: - self.disabled_command_categories = [] + config_dict[ + "disabled_command_categories" + ] = disabled_command_categories.split(",") - self.shell_command_control = os.getenv("SHELL_COMMAND_CONTROL", "denylist") - - # DENY_COMMANDS is deprecated and included for backwards-compatibility + # Converting to a list from comma-separated string shell_denylist = os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS")) if shell_denylist: - self.shell_denylist = shell_denylist.split(",") - else: - self.shell_denylist = ["sudo", "su"] + config_dict["shell_denylist"] = shell_denylist.split(",") - # ALLOW_COMMANDS is deprecated and included for backwards-compatibility shell_allowlist = os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS")) if shell_allowlist: - self.shell_allowlist = shell_allowlist.split(",") - else: - self.shell_allowlist = [] + config_dict["shell_allowlist"] = shell_allowlist.split(",") - self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") - self.prompt_settings_file = os.getenv( - "PROMPT_SETTINGS_FILE", "prompt_settings.yaml" - ) - self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") - self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-3.5-turbo") - self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002") - - self.browse_spacy_language_model = os.getenv( - "BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm" - ) - - self.openai_api_key = os.getenv("OPENAI_API_KEY") - self.openai_organization = os.getenv("OPENAI_ORGANIZATION") - self.temperature = float(os.getenv("TEMPERATURE", "0")) - self.use_azure = os.getenv("USE_AZURE") == "True" - self.execute_local_commands = ( - os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True" - ) - self.restrict_to_workspace = ( - os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True" - ) - - if self.use_azure: - self.load_azure_config() - openai.api_type = self.openai_api_type - openai.api_base = self.openai_api_base - openai.api_version = self.openai_api_version - elif os.getenv("OPENAI_API_BASE_URL", None): - openai.api_base = os.getenv("OPENAI_API_BASE_URL") - - if self.openai_organization is not None: - openai.organization = self.openai_organization - - self.openai_functions = os.getenv("OPENAI_FUNCTIONS", "False") == "True" - - self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") - # ELEVENLABS_VOICE_1_ID is deprecated and included for backwards-compatibility - self.elevenlabs_voice_id = os.getenv( - "ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID") - ) - self.streamelements_voice = os.getenv("STREAMELEMENTS_VOICE", "Brian") - - # Backwards-compatibility shim for deprecated env variables - if os.getenv("USE_MAC_OS_TTS"): - default_tts_provider = "macos" - elif self.elevenlabs_api_key: - default_tts_provider = "elevenlabs" - elif os.getenv("USE_BRIAN_TTS"): - default_tts_provider = "streamelements" - else: - default_tts_provider = "gtts" - - self.text_to_speech_provider = os.getenv( - "TEXT_TO_SPEECH_PROVIDER", default_tts_provider - ) - - self.github_api_key = os.getenv("GITHUB_API_KEY") - self.github_username = os.getenv("GITHUB_USERNAME") - - self.google_api_key = os.getenv("GOOGLE_API_KEY") - # CUSTOM_SEARCH_ENGINE_ID is deprecated and included for backwards-compatibility - self.google_custom_search_engine_id = os.getenv( + config_dict["google_custom_search_engine_id"] = os.getenv( "GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID") ) - self.image_provider = os.getenv("IMAGE_PROVIDER") - self.image_size = int(os.getenv("IMAGE_SIZE", 256)) - self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") - self.huggingface_image_model = os.getenv( - "HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4" - ) - self.audio_to_text_provider = os.getenv("AUDIO_TO_TEXT_PROVIDER", "huggingface") - self.huggingface_audio_to_text_model = os.getenv( - "HUGGINGFACE_AUDIO_TO_TEXT_MODEL" - ) - self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860") - self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH") - - # Selenium browser settings - self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome") - self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True" - - # User agent header to use when making HTTP requests - # Some websites might just completely deny request with an error code if - # no user agent was found. - self.user_agent = os.getenv( - "USER_AGENT", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36" - " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", + config_dict["elevenlabs_voice_id"] = os.getenv( + "ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID") ) - self.memory_backend = os.getenv("MEMORY_BACKEND", "json_file") - self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt-memory") - - self.redis_host = os.getenv("REDIS_HOST", "localhost") - self.redis_port = int(os.getenv("REDIS_PORT", "6379")) - self.redis_password = os.getenv("REDIS_PASSWORD", "") - self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True" - - self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins") - self.plugins: List[AutoGPTPluginTemplate] = [] - self.plugins_openai = [] - - # Deprecated. Kept for backwards-compatibility. Will remove in a future version. plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS") if plugins_allowlist: - self.plugins_allowlist = plugins_allowlist.split(",") - else: - self.plugins_allowlist = [] + config_dict["plugins_allowlist"] = plugins_allowlist.split(",") - # Deprecated. Kept for backwards-compatibility. Will remove in a future version. plugins_denylist = os.getenv("DENYLISTED_PLUGINS") if plugins_denylist: - self.plugins_denylist = plugins_denylist.split(",") - else: - self.plugins_denylist = [] + config_dict["plugins_denylist"] = plugins_denylist.split(",") - # Avoid circular imports - from autogpt.plugins import DEFAULT_PLUGINS_CONFIG_FILE + with contextlib.suppress(TypeError): + config_dict["image_size"] = int(os.getenv("IMAGE_SIZE")) + with contextlib.suppress(TypeError): + config_dict["redis_port"] = int(os.getenv("REDIS_PORT")) + with contextlib.suppress(TypeError): + config_dict["temperature"] = float(os.getenv("TEMPERATURE")) - self.plugins_config_file = os.getenv( - "PLUGINS_CONFIG_FILE", DEFAULT_PLUGINS_CONFIG_FILE - ) - self.load_plugins_config() + if config_dict["use_azure"]: + azure_config = cls.load_azure_config() + config_dict["openai_api_type"] = azure_config["openai_api_type"] + config_dict["openai_api_base"] = azure_config["openai_api_base"] + config_dict["openai_api_version"] = azure_config["openai_api_version"] - self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True" + if os.getenv("OPENAI_API_BASE_URL"): + config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL") - def load_plugins_config(self) -> "autogpt.plugins.PluginsConfig": - # Avoid circular import - from autogpt.plugins.plugins_config import PluginsConfig + openai_organization = os.getenv("OPENAI_ORGANIZATION") + if openai_organization is not None: + config_dict["openai_organization"] = openai_organization - self.plugins_config = PluginsConfig.load_config(global_config=self) - return self.plugins_config + config_dict_without_none_values = { + k: v for k, v in config_dict.items() if v is not None + } - def get_azure_deployment_id_for_model(self, model: str) -> str: - """ - Returns the relevant deployment id for the model specified. + return cls.build_agent_configuration(config_dict_without_none_values) - Parameters: - model(str): The model to map to the deployment id. - - Returns: - The matching deployment id if found, otherwise an empty string. - """ - if model == self.fast_llm_model: - return self.azure_model_to_deployment_id_map[ - "fast_llm_model_deployment_id" - ] # type: ignore - elif model == self.smart_llm_model: - return self.azure_model_to_deployment_id_map[ - "smart_llm_model_deployment_id" - ] # type: ignore - elif model == "text-embedding-ada-002": - return self.azure_model_to_deployment_id_map[ - "embedding_model_deployment_id" - ] # type: ignore - else: - return "" - - AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml") - - def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None: + @classmethod + def load_azure_config(cls, config_file: str = AZURE_CONFIG_FILE) -> Dict[str, str]: """ Loads the configuration parameters for Azure hosting from the specified file path as a yaml file. @@ -226,80 +266,20 @@ class Config: config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml" Returns: - None + Dict """ with open(config_file) as file: config_params = yaml.load(file, Loader=yaml.FullLoader) or {} - self.openai_api_type = config_params.get("azure_api_type") or "azure" - self.openai_api_base = config_params.get("azure_api_base") or "" - self.openai_api_version = ( - config_params.get("azure_api_version") or "2023-03-15-preview" - ) - self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {}) - def set_continuous_mode(self, value: bool) -> None: - """Set the continuous mode value.""" - self.continuous_mode = value - - def set_continuous_limit(self, value: int) -> None: - """Set the continuous limit value.""" - self.continuous_limit = value - - def set_speak_mode(self, value: bool) -> None: - """Set the speak mode value.""" - self.speak_mode = value - - def set_fast_llm_model(self, value: str) -> None: - """Set the fast LLM model value.""" - self.fast_llm_model = value - - def set_smart_llm_model(self, value: str) -> None: - """Set the smart LLM model value.""" - self.smart_llm_model = value - - def set_embedding_model(self, value: str) -> None: - """Set the model to use for creating embeddings.""" - self.embedding_model = value - - def set_openai_api_key(self, value: str) -> None: - """Set the OpenAI API key value.""" - self.openai_api_key = value - - def set_elevenlabs_api_key(self, value: str) -> None: - """Set the ElevenLabs API key value.""" - self.elevenlabs_api_key = value - - def set_elevenlabs_voice_1_id(self, value: str) -> None: - """Set the ElevenLabs Voice 1 ID value.""" - self.elevenlabs_voice_id = value - - def set_elevenlabs_voice_2_id(self, value: str) -> None: - """Set the ElevenLabs Voice 2 ID value.""" - self.elevenlabs_voice_2_id = value - - def set_google_api_key(self, value: str) -> None: - """Set the Google API key value.""" - self.google_api_key = value - - def set_custom_search_engine_id(self, value: str) -> None: - """Set the custom search engine id value.""" - self.google_custom_search_engine_id = value - - def set_debug_mode(self, value: bool) -> None: - """Set the debug mode value.""" - self.debug_mode = value - - def set_plugins(self, value: list) -> None: - """Set the plugins value.""" - self.plugins = value - - def set_temperature(self, value: int) -> None: - """Set the temperature value.""" - self.temperature = value - - def set_memory_backend(self, name: str) -> None: - """Set the memory backend name.""" - self.memory_backend = name + return { + "openai_api_type": config_params.get("azure_api_type") or "azure", + "openai_api_base": config_params.get("azure_api_base") or "", + "openai_api_version": config_params.get("azure_api_version") + or "2023-03-15-preview", + "azure_model_to_deployment_id_map": config_params.get( + "azure_model_map", {} + ), + } def check_openai_api_key(config: Config) -> None: @@ -318,7 +298,7 @@ def check_openai_api_key(config: Config) -> None: openai_api_key = openai_api_key.strip() if re.search(key_pattern, openai_api_key): os.environ["OPENAI_API_KEY"] = openai_api_key - cfg.set_openai_api_key(openai_api_key) + config.openai_api_key = openai_api_key print( Fore.GREEN + "OpenAI API key successfully set!\n" diff --git a/autogpt/configurator.py b/autogpt/configurator.py index 324f3084..cc21414c 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -51,13 +51,13 @@ def create_config( allow_downloads (bool): Whether to allow Auto-GPT to download files natively skips_news (bool): Whether to suppress the output of latest news on startup """ - config.set_debug_mode(False) - config.set_continuous_mode(False) - config.set_speak_mode(False) + config.debug_mode = False + config.continuous_mode = False + config.speak_mode = False if debug: logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") - config.set_debug_mode(True) + config.debug_mode = True if continuous: logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") @@ -68,13 +68,13 @@ def create_config( " cause your AI to run forever or carry out actions you would not usually" " authorise. Use at your own risk.", ) - config.set_continuous_mode(True) + config.continuous_mode = True if continuous_limit: logger.typewriter_log( "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}" ) - config.set_continuous_limit(continuous_limit) + config.continuous_limit = continuous_limit # Check if continuous limit is used without continuous mode if continuous_limit and not continuous: @@ -82,14 +82,14 @@ def create_config( if speak: logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") - config.set_speak_mode(True) + config.speak_mode = True # Set the default LLM models if gpt3only: logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") # --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config - config.set_fast_llm_model(GPT_3_MODEL) - config.set_smart_llm_model(GPT_3_MODEL) + config.fast_llm_model = GPT_3_MODEL + config.smart_llm_model = GPT_3_MODEL elif ( gpt4only @@ -97,13 +97,11 @@ def create_config( ): logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") # --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config - config.set_fast_llm_model(GPT_4_MODEL) - config.set_smart_llm_model(GPT_4_MODEL) + config.fast_llm_model = GPT_4_MODEL + config.smart_llm_model = GPT_4_MODEL else: - config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model")) - config.set_smart_llm_model( - check_model(config.smart_llm_model, "smart_llm_model") - ) + config.fast_llm_model = check_model(config.fast_llm_model, "fast_llm_model") + config.smart_llm_model = check_model(config.smart_llm_model, "smart_llm_model") if memory_type: supported_memory = get_supported_memory_backends() diff --git a/autogpt/core/configuration/__init__.py b/autogpt/core/configuration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/configuration/schema.py b/autogpt/core/configuration/schema.py new file mode 100644 index 00000000..cff4dfe8 --- /dev/null +++ b/autogpt/core/configuration/schema.py @@ -0,0 +1,98 @@ +import abc +import copy +import typing +from typing import Any + +from pydantic import BaseModel + + +class SystemConfiguration(BaseModel): + def get_user_config(self) -> dict[str, Any]: + return _get_user_config_fields(self) + + class Config: + extra = "forbid" + use_enum_values = True + + +class SystemSettings(BaseModel, abc.ABC): + """A base class for all system settings.""" + + name: str + description: typing.Optional[str] + + class Config: + extra = "forbid" + use_enum_values = True + + +class Configurable(abc.ABC): + """A base class for all configurable objects.""" + + prefix: str = "" + defaults_settings: typing.ClassVar[SystemSettings] + + @classmethod + def get_user_config(cls) -> dict[str, Any]: + return _get_user_config_fields(cls.defaults_settings) + + @classmethod + def build_agent_configuration(cls, configuration: dict = {}) -> SystemSettings: + """Process the configuration for this object.""" + + defaults_settings = cls.defaults_settings.dict() + final_configuration = deep_update(defaults_settings, configuration) + + return cls.defaults_settings.__class__.parse_obj(final_configuration) + + +def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]: + """ + Get the user config fields of a Pydantic model instance. + Args: + instance: The Pydantic model instance. + Returns: + The user config fields of the instance. + """ + user_config_fields = {} + + for name, value in instance.__dict__.items(): + field_info = instance.__fields__[name] + if "user_configurable" in field_info.field_info.extra: + user_config_fields[name] = value + elif isinstance(value, SystemConfiguration): + user_config_fields[name] = value.get_user_config() + elif isinstance(value, list) and all( + isinstance(i, SystemConfiguration) for i in value + ): + user_config_fields[name] = [i.get_user_config() for i in value] + elif isinstance(value, dict) and all( + isinstance(i, SystemConfiguration) for i in value.values() + ): + user_config_fields[name] = { + k: v.get_user_config() for k, v in value.items() + } + + return user_config_fields + + +def deep_update(original_dict: dict, update_dict: dict) -> dict: + """ + Recursively update a dictionary. + Args: + original_dict (dict): The dictionary to be updated. + update_dict (dict): The dictionary to update with. + Returns: + dict: The updated dictionary. + """ + original_dict = copy.deepcopy(original_dict) + for key, value in update_dict.items(): + if ( + key in original_dict + and isinstance(original_dict[key], dict) + and isinstance(value, dict) + ): + original_dict[key] = deep_update(original_dict[key], value) + else: + original_dict[key] = value + return original_dict diff --git a/autogpt/main.py b/autogpt/main.py index 2d2d6d97..3c1f722d 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -5,7 +5,7 @@ import sys from colorama import Fore, Style from autogpt.agent import Agent -from autogpt.config import Config, check_openai_api_key +from autogpt.config.config import Config, check_openai_api_key from autogpt.configurator import create_config from autogpt.logs import logger from autogpt.memory.vector import get_memory @@ -52,7 +52,8 @@ def run_auto_gpt( logger.set_level(logging.DEBUG if debug else logging.INFO) logger.speak_mode = speak - config = Config() + config = Config.build_config_from_env() + # TODO: fill in llm values here check_openai_api_key(config) @@ -120,7 +121,7 @@ def run_auto_gpt( # HACK: doing this here to collect some globals that depend on the workspace. Workspace.build_file_logger_path(config, workspace_directory) - config.set_plugins(scan_plugins(config, config.debug_mode)) + config.plugins = scan_plugins(config, config.debug_mode) # Create a CommandRegistry instance and scan default folder command_registry = CommandRegistry() diff --git a/autogpt/plugins/__init__.py b/autogpt/plugins/__init__.py index 30a43d9f..600d6b4f 100644 --- a/autogpt/plugins/__init__.py +++ b/autogpt/plugins/__init__.py @@ -219,8 +219,8 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl loaded_plugins = [] # Generic plugins plugins_path_path = Path(config.plugins_dir) - plugins_config = config.plugins_config + plugins_config = config.plugins_config # Directory-based plugins for plugin_path in [f.path for f in os.scandir(config.plugins_dir) if f.is_dir()]: # Avoid going into __pycache__ or other hidden directories diff --git a/benchmarks.py b/benchmarks.py index 1cbe6f1e..fe4d3207 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -13,9 +13,9 @@ def run_task(task) -> None: def bootstrap_agent(task): - config = Config() - config.set_continuous_mode(False) - config.set_temperature(0) + config = Config.build_config_from_env() + config.continuous_mode = False + config.temperature = 0 config.plain_output = True command_registry = get_command_registry(config) config.memory_backend = "no_memory" diff --git a/data_ingestion.py b/data_ingestion.py index ae32b47c..5149ddcc 100644 --- a/data_ingestion.py +++ b/data_ingestion.py @@ -5,7 +5,7 @@ from autogpt.commands.file_operations import ingest_file, list_files from autogpt.config import Config from autogpt.memory.vector import VectorMemory, get_memory -config = Config() +config = Config.build_config_from_env() def configure_logging(): diff --git a/tests/conftest.py b/tests/conftest.py index e375decb..5d2c000a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -49,13 +49,17 @@ def temp_plugins_config_file(): def config( temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace ) -> Config: - config = Config() + config = Config.build_config_from_env() if not os.environ.get("OPENAI_API_KEY"): os.environ["OPENAI_API_KEY"] = "sk-dummy" config.plugins_dir = "tests/unit/data/test_plugins" config.plugins_config_file = temp_plugins_config_file - config.load_plugins_config() + + # avoid circular dependency + from autogpt.plugins.plugins_config import PluginsConfig + + config.plugins_config = PluginsConfig.load_config(global_config=config) # Do a little setup and teardown since the config object is a singleton mocker.patch.multiple( @@ -95,8 +99,7 @@ def agent(config: Config, workspace: Workspace) -> Agent: command_registry = CommandRegistry() ai_config.command_registry = command_registry - - config.set_memory_backend("json_file") + config.memory_backend = "json_file" memory_json_file = get_memory(config) memory_json_file.clear() diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py index 29b958ee..664c6cbb 100644 --- a/tests/integration/agent_factory.py +++ b/tests/integration/agent_factory.py @@ -11,12 +11,12 @@ from autogpt.workspace import Workspace def memory_json_file(config: Config): was_memory_backend = config.memory_backend - config.set_memory_backend("json_file") + config.memory_backend = "json_file" memory = get_memory(config) memory.clear() yield memory - config.set_memory_backend(was_memory_backend) + config.memory_backend = was_memory_backend @pytest.fixture diff --git a/tests/integration/memory/utils.py b/tests/integration/memory/utils.py index 374eab30..1f791160 100644 --- a/tests/integration/memory/utils.py +++ b/tests/integration/memory/utils.py @@ -38,7 +38,7 @@ def mock_get_embedding(mocker: MockerFixture, embedding_dimension: int): def memory_none(agent_test_config: Config, mock_get_embedding): was_memory_backend = agent_test_config.memory_backend - agent_test_config.set_memory_backend("no_memory") + agent_test_config.memory_backend = "no_memory" yield get_memory(agent_test_config) - agent_test_config.set_memory_backend(was_memory_backend) + agent_test_config.memory_backend = was_memory_backend diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 19f474e2..1903fd16 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -30,11 +30,11 @@ def test_set_continuous_mode(config: Config): # Store continuous mode to reset it after the test continuous_mode = config.continuous_mode - config.set_continuous_mode(True) + config.continuous_mode = True assert config.continuous_mode == True # Reset continuous mode - config.set_continuous_mode(continuous_mode) + config.continuous_mode = continuous_mode def test_set_speak_mode(config: Config): @@ -44,11 +44,11 @@ def test_set_speak_mode(config: Config): # Store speak mode to reset it after the test speak_mode = config.speak_mode - config.set_speak_mode(True) + config.speak_mode = True assert config.speak_mode == True # Reset speak mode - config.set_speak_mode(speak_mode) + config.speak_mode = speak_mode def test_set_fast_llm_model(config: Config): @@ -58,11 +58,11 @@ def test_set_fast_llm_model(config: Config): # Store model name to reset it after the test fast_llm_model = config.fast_llm_model - config.set_fast_llm_model("gpt-3.5-turbo-test") + config.fast_llm_model = "gpt-3.5-turbo-test" assert config.fast_llm_model == "gpt-3.5-turbo-test" # Reset model name - config.set_fast_llm_model(fast_llm_model) + config.fast_llm_model = fast_llm_model def test_set_smart_llm_model(config: Config): @@ -72,11 +72,11 @@ def test_set_smart_llm_model(config: Config): # Store model name to reset it after the test smart_llm_model = config.smart_llm_model - config.set_smart_llm_model("gpt-4-test") + config.smart_llm_model = "gpt-4-test" assert config.smart_llm_model == "gpt-4-test" # Reset model name - config.set_smart_llm_model(smart_llm_model) + config.smart_llm_model = smart_llm_model def test_set_debug_mode(config: Config): @@ -86,11 +86,11 @@ def test_set_debug_mode(config: Config): # Store debug mode to reset it after the test debug_mode = config.debug_mode - config.set_debug_mode(True) + config.debug_mode = True assert config.debug_mode == True # Reset debug mode - config.set_debug_mode(debug_mode) + config.debug_mode = debug_mode @patch("openai.Model.list") @@ -127,22 +127,22 @@ def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config) assert config.smart_llm_model == "gpt-3.5-turbo" # Reset config - config.set_fast_llm_model(fast_llm_model) - config.set_smart_llm_model(smart_llm_model) + config.fast_llm_model = fast_llm_model + config.smart_llm_model = smart_llm_model def test_missing_azure_config(config: Config, workspace: Workspace): config_file = workspace.get_path("azure_config.yaml") with pytest.raises(FileNotFoundError): - config.load_azure_config(str(config_file)) + Config.load_azure_config(str(config_file)) config_file.write_text("") - config.load_azure_config(str(config_file)) + azure_config = Config.load_azure_config(str(config_file)) - assert config.openai_api_type == "azure" - assert config.openai_api_base == "" - assert config.openai_api_version == "2023-03-15-preview" - assert config.azure_model_to_deployment_id_map == {} + assert azure_config["openai_api_type"] == "azure" + assert azure_config["openai_api_base"] == "" + assert azure_config["openai_api_version"] == "2023-03-15-preview" + assert azure_config["azure_model_to_deployment_id_map"] == {} def test_create_config_gpt4only(config: Config) -> None: @@ -170,8 +170,8 @@ def test_create_config_gpt4only(config: Config) -> None: assert config.smart_llm_model == GPT_4_MODEL # Reset config - config.set_fast_llm_model(fast_llm_model) - config.set_smart_llm_model(smart_llm_model) + config.fast_llm_model = fast_llm_model + config.smart_llm_model = smart_llm_model def test_create_config_gpt3only(config: Config) -> None: @@ -199,5 +199,5 @@ def test_create_config_gpt3only(config: Config) -> None: assert config.smart_llm_model == GPT_3_MODEL # Reset config - config.set_fast_llm_model(fast_llm_model) - config.set_smart_llm_model(smart_llm_model) + config.fast_llm_model = fast_llm_model + config.smart_llm_model = smart_llm_model diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py index 80aa1b9d..24b7d1e9 100644 --- a/tests/unit/test_plugins.py +++ b/tests/unit/test_plugins.py @@ -5,6 +5,7 @@ import yaml from autogpt.config.config import Config from autogpt.plugins import inspect_zip_for_modules, scan_plugins from autogpt.plugins.plugin_config import PluginConfig +from autogpt.plugins.plugins_config import PluginsConfig PLUGINS_TEST_DIR = "tests/unit/data/test_plugins" PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip" @@ -69,7 +70,7 @@ def test_create_base_config(config: Config): config.plugins_denylist = ["c", "d"] os.remove(config.plugins_config_file) - plugins_config = config.load_plugins_config() + plugins_config = PluginsConfig.load_config(global_config=config) # Check the structure of the plugins config data assert len(plugins_config.plugins) == 4 @@ -101,7 +102,7 @@ def test_load_config(config: Config): f.write(yaml.dump(test_config)) # Load the config from disk - plugins_config = config.load_plugins_config() + plugins_config = PluginsConfig.load_config(global_config=config) # Check that the loaded config is equal to the test config assert len(plugins_config.plugins) == 2 From bafcdcea7c89ffa1cdac037673b9110a6313e086 Mon Sep 17 00:00:00 2001 From: lukas-eu <62448426+lukas-eu@users.noreply.github.com> Date: Tue, 27 Jun 2023 18:16:08 +0200 Subject: [PATCH 03/34] Filtering out ANSI escape codes in printed assistant thoughts (#4810) Co-authored-by: merwanehamadi Co-authored-by: Luke K (pr-0f3t) <2609441+lc0rp@users.noreply.github.com> --- autogpt/agent/agent.py | 4 ++-- autogpt/logs.py | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index fca03a5f..c578152a 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -17,7 +17,7 @@ from autogpt.log_cycle.log_cycle import ( USER_INPUT_FILE_NAME, LogCycleHandler, ) -from autogpt.logs import logger, print_assistant_thoughts +from autogpt.logs import logger, print_assistant_thoughts, remove_ansi_escape from autogpt.memory.message_history import MessageHistory from autogpt.memory.vector import VectorMemory from autogpt.models.command_registry import CommandRegistry @@ -185,7 +185,7 @@ class Agent: logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} " + f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} " f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", ) diff --git a/autogpt/logs.py b/autogpt/logs.py index 90d006bc..8eb4c94a 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -249,6 +249,10 @@ def remove_color_codes(s: str) -> str: return ansi_escape.sub("", s) +def remove_ansi_escape(s: str) -> str: + return s.replace("\x1B", "") + + logger = Logger() @@ -263,12 +267,16 @@ def print_assistant_thoughts( assistant_thoughts_criticism = None assistant_thoughts = assistant_reply_json_valid.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") + assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text")) if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") + assistant_thoughts_reasoning = remove_ansi_escape( + assistant_thoughts.get("reasoning") + ) + assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan")) + assistant_thoughts_criticism = remove_ansi_escape( + assistant_thoughts.get("criticism") + ) + assistant_thoughts_speak = remove_ansi_escape(assistant_thoughts.get("speak")) logger.typewriter_log( f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" ) From 975094fcdd138c176e371f76e9a9a561c4a31939 Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Thu, 29 Jun 2023 09:26:49 -0400 Subject: [PATCH 04/34] Add fallback token limit in llm.utils.create_chat_completion (#4839) Co-authored-by: Reinier van der Leer --- autogpt/llm/utils/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index 41765314..1d4f4f97 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -115,6 +115,8 @@ def create_chat_completion( model = prompt.model.name if temperature is None: temperature = config.temperature + if max_tokens is None: + max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt.token_length logger.debug( f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" From 5070cc32ac3f5e18bd8a2078ed1f4020fc00afb0 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 30 Jun 2023 14:15:00 +0200 Subject: [PATCH 05/34] Fix Config type hint problems caused by #4803 (#4840) Co-authored-by: Luke <2609441+lc0rp@users.noreply.github.com> --- autogpt/agent/agent.py | 3 +- autogpt/config/__init__.py | 5 +-- autogpt/config/config.py | 54 ++++++++++++++++------------ autogpt/core/configuration/schema.py | 12 ++++--- autogpt/logs.py | 8 +++-- autogpt/main.py | 7 ++-- autogpt/plugins/__init__.py | 7 ++-- autogpt/plugins/plugins_config.py | 12 +++++-- autogpt/speech/base.py | 7 +++- autogpt/speech/eleven_labs.py | 8 +++-- autogpt/speech/say.py | 17 +++++---- autogpt/workspace/workspace.py | 11 +++--- benchmarks.py | 6 ++-- data_ingestion.py | 4 +-- tests/conftest.py | 4 +-- tests/unit/test_config.py | 8 ++--- 16 files changed, 109 insertions(+), 64 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index c578152a..a21197cc 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -2,6 +2,7 @@ import json import signal import sys from datetime import datetime +from pathlib import Path from colorama import Fore, Style @@ -64,7 +65,7 @@ class Agent: ai_config: AIConfig, system_prompt: str, triggering_prompt: str, - workspace_directory: str, + workspace_directory: str | Path, config: Config, ): self.ai_name = ai_name diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py index 9bdd98e2..1b98989b 100644 --- a/autogpt/config/__init__.py +++ b/autogpt/config/__init__.py @@ -1,11 +1,12 @@ """ This module contains the configuration classes for AutoGPT. """ -from autogpt.config.ai_config import AIConfig -from autogpt.config.config import Config, check_openai_api_key +from .ai_config import AIConfig +from .config import Config, ConfigBuilder, check_openai_api_key __all__ = [ "check_openai_api_key", "AIConfig", "Config", + "ConfigBuilder", ] diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 03c7179f..a84a8595 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -1,4 +1,6 @@ """Configuration class to store the state of bools for different scripts access.""" +from __future__ import annotations + import contextlib import os import re @@ -8,21 +10,22 @@ import yaml from colorama import Fore from autogpt.core.configuration.schema import Configurable, SystemSettings +from autogpt.plugins.plugins_config import PluginsConfig AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml") from typing import Optional -class ConfigSettings(SystemSettings): +class Config(SystemSettings): fast_llm_model: str smart_llm_model: str continuous_mode: bool skip_news: bool - workspace_path: Optional[str] - file_logger_path: Optional[str] + workspace_path: Optional[str] = None + file_logger_path: Optional[str] = None debug_mode: bool plugins_dir: str - plugins_config: dict[str, str] + plugins_config: PluginsConfig continuous_limit: int speak_mode: bool skip_reprompt: bool @@ -37,31 +40,31 @@ class ConfigSettings(SystemSettings): prompt_settings_file: str embedding_model: str browse_spacy_language_model: str - openai_api_key: Optional[str] - openai_organization: Optional[str] + openai_api_key: Optional[str] = None + openai_organization: Optional[str] = None temperature: float use_azure: bool execute_local_commands: bool restrict_to_workspace: bool - openai_api_type: Optional[str] - openai_api_base: Optional[str] - openai_api_version: Optional[str] + openai_api_type: Optional[str] = None + openai_api_base: Optional[str] = None + openai_api_version: Optional[str] = None openai_functions: bool - elevenlabs_api_key: Optional[str] + elevenlabs_api_key: Optional[str] = None streamelements_voice: str text_to_speech_provider: str - github_api_key: Optional[str] - github_username: Optional[str] - google_api_key: Optional[str] - google_custom_search_engine_id: Optional[str] - image_provider: Optional[str] + github_api_key: Optional[str] = None + github_username: Optional[str] = None + google_api_key: Optional[str] = None + google_custom_search_engine_id: Optional[str] = None + image_provider: Optional[str] = None image_size: int - huggingface_api_token: Optional[str] + huggingface_api_token: Optional[str] = None huggingface_image_model: str audio_to_text_provider: str - huggingface_audio_to_text_model: Optional[str] - sd_webui_url: Optional[str] - sd_webui_auth: Optional[str] + huggingface_audio_to_text_model: Optional[str] = None + sd_webui_url: Optional[str] = None + sd_webui_auth: Optional[str] = None selenium_web_browser: str selenium_headless: bool user_agent: str @@ -76,12 +79,17 @@ class ConfigSettings(SystemSettings): plugins_openai: list[str] plugins_config_file: str chat_messages_enabled: bool - elevenlabs_voice_id: Optional[str] + elevenlabs_voice_id: Optional[str] = None plugins: list[str] authorise_key: str + # Executed immediately after init by Pydantic + def model_post_init(self, **kwargs) -> None: + if not self.plugins_config.plugins: + self.plugins_config = PluginsConfig.load_config(self) -class Config(Configurable): + +class ConfigBuilder(Configurable[Config]): default_plugins_config_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml" ) @@ -96,7 +104,7 @@ class Config(Configurable): else: default_tts_provider = "gtts" - defaults_settings = ConfigSettings( + defaults_settings = Config( name="Default Server Config", description="This is a default server configuration", smart_llm_model="gpt-3.5-turbo", @@ -106,7 +114,7 @@ class Config(Configurable): skip_news=False, debug_mode=False, plugins_dir="plugins", - plugins_config={}, + plugins_config=PluginsConfig({}), speak_mode=False, skip_reprompt=False, allow_downloads=False, diff --git a/autogpt/core/configuration/schema.py b/autogpt/core/configuration/schema.py index cff4dfe8..aed484ef 100644 --- a/autogpt/core/configuration/schema.py +++ b/autogpt/core/configuration/schema.py @@ -1,7 +1,7 @@ import abc import copy import typing -from typing import Any +from typing import Any, Generic, TypeVar from pydantic import BaseModel @@ -22,22 +22,26 @@ class SystemSettings(BaseModel, abc.ABC): description: typing.Optional[str] class Config: + arbitrary_types_allowed = True extra = "forbid" use_enum_values = True -class Configurable(abc.ABC): +S = TypeVar("S", bound=SystemSettings) + + +class Configurable(abc.ABC, Generic[S]): """A base class for all configurable objects.""" prefix: str = "" - defaults_settings: typing.ClassVar[SystemSettings] + defaults_settings: typing.ClassVar[S] @classmethod def get_user_config(cls) -> dict[str, Any]: return _get_user_config_fields(cls.defaults_settings) @classmethod - def build_agent_configuration(cls, configuration: dict = {}) -> SystemSettings: + def build_agent_configuration(cls, configuration: dict = {}) -> S: """Process the configuration for this object.""" defaults_settings = cls.defaults_settings.dict() diff --git a/autogpt/logs.py b/autogpt/logs.py index 8eb4c94a..329afb8b 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -1,15 +1,19 @@ """Logging module for Auto-GPT.""" +from __future__ import annotations + import logging import os import random import re import time from logging import LogRecord -from typing import Any +from typing import TYPE_CHECKING, Any from colorama import Fore, Style -from autogpt.config import Config +if TYPE_CHECKING: + from autogpt.config import Config + from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter from autogpt.singleton import Singleton from autogpt.speech import say_text diff --git a/autogpt/main.py b/autogpt/main.py index 0217507a..30587029 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -1,11 +1,12 @@ """The application entry point. Can be invoked by a CLI or any other front end application.""" import logging import sys +from pathlib import Path from colorama import Fore, Style from autogpt.agent import Agent -from autogpt.config.config import Config, check_openai_api_key +from autogpt.config.config import ConfigBuilder, check_openai_api_key from autogpt.configurator import create_config from autogpt.logs import logger from autogpt.memory.vector import get_memory @@ -45,14 +46,14 @@ def run_auto_gpt( browser_name: str, allow_downloads: bool, skip_news: bool, - workspace_directory: str, + workspace_directory: str | Path, install_plugin_deps: bool, ): # Configure logging before we do anything else. logger.set_level(logging.DEBUG if debug else logging.INFO) logger.speak_mode = speak - config = Config.build_config_from_env() + config = ConfigBuilder.build_config_from_env() # TODO: fill in llm values here check_openai_api_key(config) diff --git a/autogpt/plugins/__init__.py b/autogpt/plugins/__init__.py index 600d6b4f..5b65ee42 100644 --- a/autogpt/plugins/__init__.py +++ b/autogpt/plugins/__init__.py @@ -1,4 +1,5 @@ """Handles loading of plugins.""" +from __future__ import annotations import importlib.util import inspect @@ -7,7 +8,7 @@ import os import sys import zipfile from pathlib import Path -from typing import List +from typing import TYPE_CHECKING, List from urllib.parse import urlparse from zipimport import zipimporter @@ -16,7 +17,9 @@ import requests from auto_gpt_plugin_template import AutoGPTPluginTemplate from openapi_python_client.config import Config as OpenAPIConfig -from autogpt.config.config import Config +if TYPE_CHECKING: + from autogpt.config import Config + from autogpt.logs import logger from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin diff --git a/autogpt/plugins/plugins_config.py b/autogpt/plugins/plugins_config.py index 7e04e795..cbf6f232 100644 --- a/autogpt/plugins/plugins_config.py +++ b/autogpt/plugins/plugins_config.py @@ -1,9 +1,13 @@ +from __future__ import annotations + import os -from typing import Any, Union +from typing import TYPE_CHECKING, Any, Union import yaml -from autogpt.config.config import Config +if TYPE_CHECKING: + from autogpt.config import Config + from autogpt.logs import logger from autogpt.plugins.plugin_config import PluginConfig @@ -11,6 +15,8 @@ from autogpt.plugins.plugin_config import PluginConfig class PluginsConfig: """Class for holding configuration of all plugins""" + plugins: dict[str, PluginConfig] + def __init__(self, plugins_config: dict[str, Any]): self.plugins = {} for name, plugin in plugins_config.items(): @@ -33,7 +39,7 @@ class PluginsConfig: def is_enabled(self, name) -> bool: plugin_config = self.plugins.get(name) - return plugin_config and plugin_config.enabled + return plugin_config is not None and plugin_config.enabled @classmethod def load_config(cls, global_config: Config) -> "PluginsConfig": diff --git a/autogpt/speech/base.py b/autogpt/speech/base.py index 7b8e7146..44e90eaa 100644 --- a/autogpt/speech/base.py +++ b/autogpt/speech/base.py @@ -1,9 +1,14 @@ """Base class for all voice classes.""" +from __future__ import annotations + import abc import re from threading import Lock +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from autogpt.config import Config -from autogpt.config import Config from autogpt.singleton import AbstractSingleton diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py index 3f3baf33..815f698c 100644 --- a/autogpt/speech/eleven_labs.py +++ b/autogpt/speech/eleven_labs.py @@ -1,11 +1,15 @@ """ElevenLabs speech module""" +from __future__ import annotations + import os +from typing import TYPE_CHECKING import requests from playsound import playsound -from autogpt.config.config import Config -from autogpt.speech.base import VoiceBase +if TYPE_CHECKING: + from autogpt.config import Config +from .base import VoiceBase PLACEHOLDERS = {"your-voice-id"} diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py index 1847c631..3d71a392 100644 --- a/autogpt/speech/say.py +++ b/autogpt/speech/say.py @@ -1,13 +1,18 @@ """ Text to speech module """ +from __future__ import annotations + import threading from threading import Semaphore +from typing import TYPE_CHECKING -from autogpt.config.config import Config -from autogpt.speech.base import VoiceBase -from autogpt.speech.eleven_labs import ElevenLabsSpeech -from autogpt.speech.gtts import GTTSVoice -from autogpt.speech.macos_tts import MacOSTTS -from autogpt.speech.stream_elements_speech import StreamElementsSpeech +if TYPE_CHECKING: + from autogpt.config import Config + +from .base import VoiceBase +from .eleven_labs import ElevenLabsSpeech +from .gtts import GTTSVoice +from .macos_tts import MacOSTTS +from .stream_elements_speech import StreamElementsSpeech _QUEUE_SEMAPHORE = Semaphore( 1 diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index a0520c17..6d90f854 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -10,6 +10,7 @@ agent. from __future__ import annotations from pathlib import Path +from typing import Optional from autogpt.config import Config from autogpt.logs import logger @@ -77,7 +78,7 @@ class Workspace: @staticmethod def _sanitize_path( relative_path: str | Path, - root: str | Path = None, + root: Optional[str | Path] = None, restrict_to_root: bool = True, ) -> Path: """Resolve the relative path within the given root if possible. @@ -139,7 +140,7 @@ class Workspace: return full_path @staticmethod - def build_file_logger_path(config, workspace_directory): + def build_file_logger_path(config: Config, workspace_directory: Path): file_logger_path = workspace_directory / "file_logger.txt" if not file_logger_path.exists(): with file_logger_path.open(mode="w", encoding="utf-8") as f: @@ -147,10 +148,12 @@ class Workspace: config.file_logger_path = str(file_logger_path) @staticmethod - def get_workspace_directory(config: Config, workspace_directory: str = None): + def get_workspace_directory( + config: Config, workspace_directory: Optional[str | Path] = None + ): if workspace_directory is None: workspace_directory = Path(__file__).parent / "auto_gpt_workspace" - else: + elif type(workspace_directory) == str: workspace_directory = Path(workspace_directory) # TODO: pass in the ai_settings file and the env file and have them cloned into # the workspace directory so we can bind them to the agent. diff --git a/benchmarks.py b/benchmarks.py index fe4d3207..cb592be8 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,5 +1,5 @@ from autogpt.agent import Agent -from autogpt.config import AIConfig, Config +from autogpt.config import AIConfig, Config, ConfigBuilder from autogpt.main import COMMAND_CATEGORIES from autogpt.memory.vector import get_memory from autogpt.models.command_registry import CommandRegistry @@ -13,7 +13,7 @@ def run_task(task) -> None: def bootstrap_agent(task): - config = Config.build_config_from_env() + config = ConfigBuilder.build_config_from_env() config.continuous_mode = False config.temperature = 0 config.plain_output = True @@ -42,7 +42,7 @@ def bootstrap_agent(task): ) -def get_command_registry(config): +def get_command_registry(config: Config): command_registry = CommandRegistry() enabled_command_categories = [ x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories diff --git a/data_ingestion.py b/data_ingestion.py index 5149ddcc..7d596f52 100644 --- a/data_ingestion.py +++ b/data_ingestion.py @@ -2,10 +2,10 @@ import argparse import logging from autogpt.commands.file_operations import ingest_file, list_files -from autogpt.config import Config +from autogpt.config import ConfigBuilder from autogpt.memory.vector import VectorMemory, get_memory -config = Config.build_config_from_env() +config = ConfigBuilder.build_config_from_env() def configure_logging(): diff --git a/tests/conftest.py b/tests/conftest.py index 5d2c000a..920fc4e4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,8 +7,8 @@ import yaml from pytest_mock import MockerFixture from autogpt.agent.agent import Agent +from autogpt.config import AIConfig, Config, ConfigBuilder from autogpt.config.ai_config import AIConfig -from autogpt.config.config import Config from autogpt.llm.api_manager import ApiManager from autogpt.logs import TypingConsoleHandler from autogpt.memory.vector import get_memory @@ -49,7 +49,7 @@ def temp_plugins_config_file(): def config( temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace ) -> Config: - config = Config.build_config_from_env() + config = ConfigBuilder.build_config_from_env() if not os.environ.get("OPENAI_API_KEY"): os.environ["OPENAI_API_KEY"] = "sk-dummy" diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 1903fd16..66334253 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -7,7 +7,7 @@ from unittest.mock import patch import pytest -from autogpt.config import Config +from autogpt.config import Config, ConfigBuilder from autogpt.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config from autogpt.workspace.workspace import Workspace @@ -131,13 +131,13 @@ def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config) config.smart_llm_model = smart_llm_model -def test_missing_azure_config(config: Config, workspace: Workspace): +def test_missing_azure_config(workspace: Workspace): config_file = workspace.get_path("azure_config.yaml") with pytest.raises(FileNotFoundError): - Config.load_azure_config(str(config_file)) + ConfigBuilder.load_azure_config(str(config_file)) config_file.write_text("") - azure_config = Config.load_azure_config(str(config_file)) + azure_config = ConfigBuilder.load_azure_config(str(config_file)) assert azure_config["openai_api_type"] == "azure" assert azure_config["openai_api_base"] == "" From c67940edec24dff6f8816f071356cf0cbbe9c280 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Tue, 4 Jul 2023 15:34:46 -0500 Subject: [PATCH 06/34] Update CODEOWNERS (#4884) --- .github/CODEOWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bc32f85b..d4c5c191 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1,2 @@ -.github/workflows/ @Significant-Gravitas/Auto-GPT-Source +.github/workflows/ @Significant-Gravitas/maintainers +autogpt/core @collijk From 7e45b6a9758c6a145b1dd3ef7e6d1026b9e33754 Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Tue, 4 Jul 2023 17:55:00 -0400 Subject: [PATCH 07/34] Fix Config.plugins_config - call model_post_init explicitly until pydantic 2.0 (#4858) As per https://github.com/pydantic/pydantic/issues/1729#issuecomment-1300576214, the implementation of `model_post_init()` is postponed until Pydantic v2. As a result, the initialization of PluginConfig is being skipped. This fix calls `plugin.model_post_init()` explicitly. The recency of the Pydantic v2 release means that some of the other extensions we use do not support it yet. Specifically, extensions such as spacy and openapi-python-client are currently limited to Pydantic versions that are less than 2.0. There may be other extensions that have the same limitation as well. --------- Co-authored-by: Reinier van der Leer --- autogpt/config/config.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index a84a8595..bec3e921 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -83,6 +83,13 @@ class Config(SystemSettings): plugins: list[str] authorise_key: str + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # Hotfix: Call model_post_init explictly as it doesn't seem to be called for pydantic<2.0.0 + # https://github.com/pydantic/pydantic/issues/1729#issuecomment-1300576214 + self.model_post_init(**kwargs) + # Executed immediately after init by Pydantic def model_post_init(self, **kwargs) -> None: if not self.plugins_config.plugins: From 744275b932d55b61476bee0641ae65f320d5224f Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 5 Jul 2023 00:30:00 +0200 Subject: [PATCH 08/34] Run pytest with only unit tests in pre-commit hook --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0aaad257..cb7180d4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -36,7 +36,7 @@ repos: types: [ python ] - id: pytest-check name: pytest-check - entry: pytest --cov=autogpt --without-integration --without-slow-integration + entry: pytest --cov=autogpt tests/unit language: system pass_filenames: false always_run: true From 66635f3ae6740fbb62279a4db74c5878be61cb17 Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Wed, 5 Jul 2023 14:12:28 -0400 Subject: [PATCH 09/34] Simplified plugin-loading log messages (#4870) Simplified plugin log messages to make plugin debugging easier --------- Co-authored-by: Reinier van der Leer --- autogpt/plugins/__init__.py | 37 ++++++++++++++++++------------- autogpt/plugins/plugins_config.py | 6 +++++ 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/autogpt/plugins/__init__.py b/autogpt/plugins/__init__.py index 5b65ee42..e9b864c6 100644 --- a/autogpt/plugins/__init__.py +++ b/autogpt/plugins/__init__.py @@ -221,7 +221,7 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl """ loaded_plugins = [] # Generic plugins - plugins_path_path = Path(config.plugins_dir) + plugins_path = Path(config.plugins_dir) plugins_config = config.plugins_config # Directory-based plugins @@ -238,7 +238,9 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl plugin = sys.modules[qualified_module_name] if not plugins_config.is_enabled(plugin_module_name): - logger.warn(f"Plugin {plugin_module_name} found but not configured") + logger.warn( + f"Plugin folder {plugin_module_name} found but not configured. If this is a legitimate plugin, please add it to plugins_config.yaml (key: {plugin_module_name})." + ) continue for _, class_obj in inspect.getmembers(plugin): @@ -249,23 +251,25 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl loaded_plugins.append(class_obj()) # Zip-based plugins - for plugin in plugins_path_path.glob("*.zip"): + for plugin in plugins_path.glob("*.zip"): if moduleList := inspect_zip_for_modules(str(plugin), debug): for module in moduleList: plugin = Path(plugin) module = Path(module) - logger.debug(f"Plugin: {plugin} Module: {module}") + logger.debug(f"Zipped Plugin: {plugin}, Module: {module}") zipped_package = zipimporter(str(plugin)) zipped_module = zipped_package.load_module(str(module.parent)) for key in dir(zipped_module): if key.startswith("__"): continue + a_module = getattr(zipped_module, key) + if not inspect.isclass(a_module): + continue if ( - inspect.isclass(a_module) - and issubclass(a_module, AutoGPTPluginTemplate) + issubclass(a_module, AutoGPTPluginTemplate) and a_module.__name__ != "AutoGPTPluginTemplate" ): plugin_name = a_module.__name__ @@ -274,24 +278,23 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl if plugin_configured and plugin_enabled: logger.debug( - f"Loading plugin {plugin_name} as it was enabled in config." + f"Loading plugin {plugin_name}. Enabled in plugins_config.yaml." ) loaded_plugins.append(a_module()) elif plugin_configured and not plugin_enabled: logger.debug( - f"Not loading plugin {plugin_name} as it was disabled in config." + f"Not loading plugin {plugin_name}. Disabled in plugins_config.yaml." ) elif not plugin_configured: logger.warn( - f"Not loading plugin {plugin_name} as it was not found in config. " - f"Please check your config. Starting with 0.4.1, plugins will not be loaded unless " - f"they are enabled in plugins_config.yaml. Zipped plugins should use the class " - f"name ({plugin_name}) as the key." + f"Not loading plugin {plugin_name}. Key '{plugin_name}' was not found in plugins_config.yaml. " + f"Zipped plugins should use the class name ({plugin_name}) as the key." ) else: - logger.debug( - f"Skipping {key}: {a_module.__name__} because it doesn't subclass AutoGPTPluginTemplate." - ) + if a_module.__name__ != "AutoGPTPluginTemplate": + logger.debug( + f"Skipping '{key}' because it doesn't subclass AutoGPTPluginTemplate." + ) # OpenAI plugins if config.plugins_openai: @@ -302,7 +305,9 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl ) for url, openai_plugin_meta in manifests_specs_clients.items(): if not plugins_config.is_enabled(url): - logger.warn(f"Plugin {plugin_module_name} found but not configured") + logger.warn( + f"OpenAI Plugin {plugin_module_name} found but not configured" + ) continue plugin = BaseOpenAIPlugin(openai_plugin_meta) diff --git a/autogpt/plugins/plugins_config.py b/autogpt/plugins/plugins_config.py index cbf6f232..dedffd21 100644 --- a/autogpt/plugins/plugins_config.py +++ b/autogpt/plugins/plugins_config.py @@ -75,6 +75,9 @@ class PluginsConfig: """Create an empty plugins_config.yaml file. Fill it with values from old env variables.""" base_config = {} + logger.debug(f"Legacy plugin denylist: {global_config.plugins_denylist}") + logger.debug(f"Legacy plugin allowlist: {global_config.plugins_allowlist}") + # Backwards-compatibility shim for plugin_name in global_config.plugins_denylist: base_config[plugin_name] = {"enabled": False, "config": {}} @@ -82,6 +85,9 @@ class PluginsConfig: for plugin_name in global_config.plugins_allowlist: base_config[plugin_name] = {"enabled": True, "config": {}} + logger.debug(f"Constructed base plugins config: {base_config}") + + logger.debug(f"Creating plugin config file {global_config.plugins_config_file}") with open(global_config.plugins_config_file, "w+") as f: f.write(yaml.dump(base_config)) return base_config From b9f01330db2003cb65e1735fa9360131e6956d6f Mon Sep 17 00:00:00 2001 From: James Collins Date: Wed, 5 Jul 2023 12:12:05 -0700 Subject: [PATCH 10/34] Re-arch WIP (#3969) Rough sketching out of a hello world using our refactored autogpt library. See the tracking issue here: #4770. # Run instructions There are two client applications for Auto-GPT included. ## CLI Application :star2: **This is the reference application I'm working with for now** :star2: The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic. - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_app/main.py) To run, you first need a settings file. Run ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. You can then run Auto-GPT with ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` to launch the interaction loop. ## CLI Web App The second app is still a CLI, but it sets up a local webserver that the client application talks to rather than invoking calls to the Agent library code directly. This application is essentially a sketch at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed. - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/client/client.py) - [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/server/api.py) To run, you still need to generate a default configuration. You can do ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings ``` It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key. To run, do ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client ``` This will launch a webserver and then start the client cli application to communicate with it. :warning: I am not actively developing this application. It is a very good place to get involved if you have web application design experience and are looking to get involved in the re-arch. --------- Co-authored-by: David Wurtz Co-authored-by: Media <12145726+rihp@users.noreply.github.com> Co-authored-by: Richard Beales Co-authored-by: Daryl Rodrigo Co-authored-by: Daryl Rodrigo Co-authored-by: Swifty Co-authored-by: Nicholas Tindle Co-authored-by: Merwane Hamadi --- autogpt/config/config.py | 6 +- autogpt/core/README.md | 54 +++ autogpt/core/__init__.py | 0 autogpt/core/ability/__init__.py | 4 + autogpt/core/ability/base.py | 92 ++++ autogpt/core/ability/builtins/__init__.py | 6 + .../ability/builtins/create_new_ability.py | 102 +++++ .../core/ability/builtins/file_operations.py | 167 ++++++++ .../ability/builtins/query_language_model.py | 78 ++++ autogpt/core/ability/schema.py | 26 ++ autogpt/core/ability/simple.py | 96 +++++ autogpt/core/agent/__init__.py | 3 + autogpt/core/agent/base.py | 26 ++ autogpt/core/agent/simple.py | 398 ++++++++++++++++++ autogpt/core/configuration/__init__.py | 7 + autogpt/core/configuration/schema.py | 29 +- autogpt/core/memory/__init__.py | 3 + autogpt/core/memory/base.py | 13 + autogpt/core/memory/simple.py | 47 +++ autogpt/core/planning/__init__.py | 10 + autogpt/core/planning/base.py | 76 ++++ autogpt/core/planning/schema.py | 76 ++++ autogpt/core/planning/simple.py | 182 ++++++++ autogpt/core/planning/strategies/__init__.py | 12 + .../core/planning/strategies/initial_plan.py | 190 +++++++++ .../planning/strategies/name_and_goals.py | 139 ++++++ .../core/planning/strategies/next_ability.py | 183 ++++++++ autogpt/core/planning/strategies/utils.py | 27 ++ autogpt/core/planning/templates.py | 102 +++++ autogpt/core/plugin/__init__.py | 2 + autogpt/core/plugin/base.py | 155 +++++++ autogpt/core/plugin/simple.py | 74 ++++ autogpt/core/resource/__init__.py | 7 + .../core/resource/model_providers/__init__.py | 44 ++ .../core/resource/model_providers/openai.py | 373 ++++++++++++++++ .../core/resource/model_providers/schema.py | 219 ++++++++++ autogpt/core/resource/schema.py | 57 +++ autogpt/core/runner/__init__.py | 3 + autogpt/core/runner/cli_app/__init__.py | 0 autogpt/core/runner/cli_app/cli.py | 49 +++ autogpt/core/runner/cli_app/main.py | 108 +++++ autogpt/core/runner/cli_web_app/__init__.py | 0 autogpt/core/runner/cli_web_app/cli.py | 101 +++++ .../runner/cli_web_app/client/__init__.py | 0 .../core/runner/cli_web_app/client/client.py | 16 + .../runner/cli_web_app/server/__init__.py | 0 autogpt/core/runner/cli_web_app/server/api.py | 48 +++ .../core/runner/cli_web_app/server/schema.py | 36 ++ .../cli_web_app/server/services/__init__.py | 0 .../cli_web_app/server/services/users.py | 20 + autogpt/core/runner/client_lib/__init__.py | 0 autogpt/core/runner/client_lib/logging.py | 20 + autogpt/core/runner/client_lib/settings.py | 14 + .../client_lib/shared_click_commands.py | 19 + autogpt/core/runner/client_lib/utils.py | 61 +++ autogpt/core/workspace/__init__.py | 3 + autogpt/core/workspace/base.py | 70 +++ autogpt/core/workspace/simple.py | 193 +++++++++ autogpt/plugins/plugin_config.py | 15 +- autogpt/plugins/plugins_config.py | 42 +- requirements.txt | 4 + 61 files changed, 3863 insertions(+), 44 deletions(-) create mode 100644 autogpt/core/README.md create mode 100644 autogpt/core/__init__.py create mode 100644 autogpt/core/ability/__init__.py create mode 100644 autogpt/core/ability/base.py create mode 100644 autogpt/core/ability/builtins/__init__.py create mode 100644 autogpt/core/ability/builtins/create_new_ability.py create mode 100644 autogpt/core/ability/builtins/file_operations.py create mode 100644 autogpt/core/ability/builtins/query_language_model.py create mode 100644 autogpt/core/ability/schema.py create mode 100644 autogpt/core/ability/simple.py create mode 100644 autogpt/core/agent/__init__.py create mode 100644 autogpt/core/agent/base.py create mode 100644 autogpt/core/agent/simple.py create mode 100644 autogpt/core/memory/__init__.py create mode 100644 autogpt/core/memory/base.py create mode 100644 autogpt/core/memory/simple.py create mode 100644 autogpt/core/planning/__init__.py create mode 100644 autogpt/core/planning/base.py create mode 100644 autogpt/core/planning/schema.py create mode 100644 autogpt/core/planning/simple.py create mode 100644 autogpt/core/planning/strategies/__init__.py create mode 100644 autogpt/core/planning/strategies/initial_plan.py create mode 100644 autogpt/core/planning/strategies/name_and_goals.py create mode 100644 autogpt/core/planning/strategies/next_ability.py create mode 100644 autogpt/core/planning/strategies/utils.py create mode 100644 autogpt/core/planning/templates.py create mode 100644 autogpt/core/plugin/__init__.py create mode 100644 autogpt/core/plugin/base.py create mode 100644 autogpt/core/plugin/simple.py create mode 100644 autogpt/core/resource/__init__.py create mode 100644 autogpt/core/resource/model_providers/__init__.py create mode 100644 autogpt/core/resource/model_providers/openai.py create mode 100644 autogpt/core/resource/model_providers/schema.py create mode 100644 autogpt/core/resource/schema.py create mode 100644 autogpt/core/runner/__init__.py create mode 100644 autogpt/core/runner/cli_app/__init__.py create mode 100644 autogpt/core/runner/cli_app/cli.py create mode 100644 autogpt/core/runner/cli_app/main.py create mode 100644 autogpt/core/runner/cli_web_app/__init__.py create mode 100644 autogpt/core/runner/cli_web_app/cli.py create mode 100644 autogpt/core/runner/cli_web_app/client/__init__.py create mode 100644 autogpt/core/runner/cli_web_app/client/client.py create mode 100644 autogpt/core/runner/cli_web_app/server/__init__.py create mode 100644 autogpt/core/runner/cli_web_app/server/api.py create mode 100644 autogpt/core/runner/cli_web_app/server/schema.py create mode 100644 autogpt/core/runner/cli_web_app/server/services/__init__.py create mode 100644 autogpt/core/runner/cli_web_app/server/services/users.py create mode 100644 autogpt/core/runner/client_lib/__init__.py create mode 100644 autogpt/core/runner/client_lib/logging.py create mode 100644 autogpt/core/runner/client_lib/settings.py create mode 100644 autogpt/core/runner/client_lib/shared_click_commands.py create mode 100644 autogpt/core/runner/client_lib/utils.py create mode 100644 autogpt/core/workspace/__init__.py create mode 100644 autogpt/core/workspace/base.py create mode 100644 autogpt/core/workspace/simple.py diff --git a/autogpt/config/config.py b/autogpt/config/config.py index bec3e921..1c2084f7 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -111,7 +111,7 @@ class ConfigBuilder(Configurable[Config]): else: default_tts_provider = "gtts" - defaults_settings = Config( + default_settings = Config( name="Default Server Config", description="This is a default server configuration", smart_llm_model="gpt-3.5-turbo", @@ -121,7 +121,7 @@ class ConfigBuilder(Configurable[Config]): skip_news=False, debug_mode=False, plugins_dir="plugins", - plugins_config=PluginsConfig({}), + plugins_config=PluginsConfig(plugins={}), speak_mode=False, skip_reprompt=False, allow_downloads=False, @@ -165,7 +165,7 @@ class ConfigBuilder(Configurable[Config]): ) @classmethod - def build_config_from_env(cls): + def build_config_from_env(cls) -> Config: """Initialize the Config class""" config_dict = { "authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"), diff --git a/autogpt/core/README.md b/autogpt/core/README.md new file mode 100644 index 00000000..49a87a09 --- /dev/null +++ b/autogpt/core/README.md @@ -0,0 +1,54 @@ +# Run instructions + +There are two client applications for Auto-GPT included. + +## CLI Application + +:star2: **This is the reference application I'm working with for now** :star2: + +The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic. + +- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py) +- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py) + +To run, you first need a settings file. Run + +``` + python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings + ``` + +where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. + +You can then run Auto-GPT with + +``` +python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings +``` + +to launch the interaction loop. + +## CLI Web App + +The second app is still a CLI, but it sets up a local webserver that the client application talks to rather than invoking calls to the Agent library code directly. This application is essentially a sketch at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed. + +- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/cli.py) +- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/client/client.py) +- [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/server/api.py) + +To run, you still need to generate a default configuration. You can do + +``` +python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings +``` + +It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key. + +To run, do + +``` +python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client +``` + +This will launch a webserver and then start the client cli application to communicate with it. + +:warning: I am not actively developing this application. It is a very good place to get involved if you have web application design experience and are looking to get involved in the re-arch. \ No newline at end of file diff --git a/autogpt/core/__init__.py b/autogpt/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/ability/__init__.py b/autogpt/core/ability/__init__.py new file mode 100644 index 00000000..3cf310de --- /dev/null +++ b/autogpt/core/ability/__init__.py @@ -0,0 +1,4 @@ +"""The command system provides a way to extend the functionality of the AI agent.""" +from autogpt.core.ability.base import Ability, AbilityRegistry +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry diff --git a/autogpt/core/ability/base.py b/autogpt/core/ability/base.py new file mode 100644 index 00000000..ac26f026 --- /dev/null +++ b/autogpt/core/ability/base.py @@ -0,0 +1,92 @@ +import abc +from pprint import pformat +from typing import ClassVar + +import inflection +from pydantic import Field + +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.configuration import SystemConfiguration +from autogpt.core.planning.simple import LanguageModelConfiguration + + +class AbilityConfiguration(SystemConfiguration): + """Struct for model configuration.""" + + from autogpt.core.plugin.base import PluginLocation + + location: PluginLocation + packages_required: list[str] = Field(default_factory=list) + language_model_required: LanguageModelConfiguration = None + memory_provider_required: bool = False + workspace_required: bool = False + + +class Ability(abc.ABC): + """A class representing an agent ability.""" + + default_configuration: ClassVar[AbilityConfiguration] + + @classmethod + def name(cls) -> str: + """The name of the ability.""" + return inflection.underscore(cls.__name__) + + @classmethod + @abc.abstractmethod + def description(cls) -> str: + """A detailed description of what the ability does.""" + ... + + @classmethod + @abc.abstractmethod + def arguments(cls) -> dict: + """A dict of arguments in standard json schema format.""" + ... + + @classmethod + def required_arguments(cls) -> list[str]: + """A list of required arguments.""" + return [] + + @abc.abstractmethod + async def __call__(self, *args, **kwargs) -> AbilityResult: + ... + + def __str__(self) -> str: + return pformat(self.dump) + + def dump(self) -> dict: + return { + "name": self.name(), + "description": self.description(), + "parameters": { + "type": "object", + "properties": self.arguments(), + "required": self.required_arguments(), + }, + } + + +class AbilityRegistry(abc.ABC): + @abc.abstractmethod + def register_ability( + self, ability_name: str, ability_configuration: AbilityConfiguration + ) -> None: + ... + + @abc.abstractmethod + def list_abilities(self) -> list[str]: + ... + + @abc.abstractmethod + def dump_abilities(self) -> list[dict]: + ... + + @abc.abstractmethod + def get_ability(self, ability_name: str) -> Ability: + ... + + @abc.abstractmethod + def perform(self, ability_name: str, **kwargs) -> AbilityResult: + ... diff --git a/autogpt/core/ability/builtins/__init__.py b/autogpt/core/ability/builtins/__init__.py new file mode 100644 index 00000000..0572605a --- /dev/null +++ b/autogpt/core/ability/builtins/__init__.py @@ -0,0 +1,6 @@ +from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility +from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel + +BUILTIN_ABILITIES = { + QueryLanguageModel.name(): QueryLanguageModel, +} diff --git a/autogpt/core/ability/builtins/create_new_ability.py b/autogpt/core/ability/builtins/create_new_ability.py new file mode 100644 index 00000000..8c53efb3 --- /dev/null +++ b/autogpt/core/ability/builtins/create_new_ability.py @@ -0,0 +1,102 @@ +import logging + +from autogpt.core.ability.base import Ability, AbilityConfiguration +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat + + +class CreateNewAbility(Ability): + default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.CreateNewAbility", + ), + ) + + def __init__( + self, + logger: logging.Logger, + configuration: AbilityConfiguration, + ): + self._logger = logger + self._configuration = configuration + + @classmethod + def description(cls) -> str: + return "Create a new ability by writing python code." + + @classmethod + def arguments(cls) -> dict: + return { + "ability_name": { + "type": "string", + "description": "A meaningful and concise name for the new ability.", + }, + "description": { + "type": "string", + "description": "A detailed description of the ability and its uses, including any limitations.", + }, + "arguments": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the argument.", + }, + "type": { + "type": "string", + "description": "The type of the argument. Must be a standard json schema type.", + }, + "description": { + "type": "string", + "description": "A detailed description of the argument and its uses.", + }, + }, + }, + "description": "A list of arguments that the ability will accept.", + }, + "required_arguments": { + "type": "array", + "items": { + "type": "string", + "description": "The names of the arguments that are required.", + }, + "description": "A list of the names of the arguments that are required.", + }, + "package_requirements": { + "type": "array", + "items": { + "type": "string", + "description": "The of the Python package that is required to execute the ability.", + }, + "description": "A list of the names of the Python packages that are required to execute the ability.", + }, + "code": { + "type": "string", + "description": "The Python code that will be executed when the ability is called.", + }, + } + + @classmethod + def required_arguments(cls) -> list[str]: + return [ + "ability_name", + "description", + "arguments", + "required_arguments", + "package_requirements", + "code", + ] + + async def __call__( + self, + ability_name: str, + description: str, + arguments: list[dict], + required_arguments: list[str], + package_requirements: list[str], + code: str, + ) -> AbilityResult: + raise NotImplementedError diff --git a/autogpt/core/ability/builtins/file_operations.py b/autogpt/core/ability/builtins/file_operations.py new file mode 100644 index 00000000..43cd0d0c --- /dev/null +++ b/autogpt/core/ability/builtins/file_operations.py @@ -0,0 +1,167 @@ +import logging +import os + +from autogpt.core.ability.base import Ability, AbilityConfiguration +from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge +from autogpt.core.workspace import Workspace + + +class ReadFile(Ability): + default_configuration = AbilityConfiguration( + packages_required=["unstructured"], + workspace_required=True, + ) + + def __init__( + self, + logger: logging.Logger, + workspace: Workspace, + ): + self._logger = logger + self._workspace = workspace + + @property + def description(self) -> str: + return "Read and parse all text from a file." + + @property + def arguments(self) -> dict: + return { + "filename": { + "type": "string", + "description": "The name of the file to read.", + }, + } + + def _check_preconditions(self, filename: str) -> AbilityResult | None: + message = "" + try: + pass + except ImportError: + message = "Package charset_normalizer is not installed." + + try: + file_path = self._workspace.get_path(filename) + if not file_path.exists(): + message = f"File {filename} does not exist." + if not file_path.is_file(): + message = f"{filename} is not a file." + except ValueError as e: + message = str(e) + + if message: + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename}, + success=False, + message=message, + data=None, + ) + + def __call__(self, filename: str) -> AbilityResult: + if result := self._check_preconditions(filename): + return result + + from unstructured.partition.auto import partition + + file_path = self._workspace.get_path(filename) + try: + elements = partition(str(file_path)) + # TODO: Lots of other potentially useful information is available + # in the partitioned file. Consider returning more of it. + new_knowledge = Knowledge( + content="\n\n".join([element.text for element in elements]), + content_type=ContentType.TEXT, + content_metadata={"filename": filename}, + ) + success = True + message = f"File {file_path} read successfully." + except IOError as e: + new_knowledge = None + success = False + message = str(e) + + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename}, + success=success, + message=message, + new_knowledge=new_knowledge, + ) + + +class WriteFile(Ability): + default_configuration = AbilityConfiguration( + packages_required=["unstructured"], + workspace_required=True, + ) + + def __init__( + self, + logger: logging.Logger, + workspace: Workspace, + ): + self._logger = logger + self._workspace = workspace + + @property + def description(self) -> str: + return "Write text to a file." + + @property + def arguments(self) -> dict: + return { + "filename": { + "type": "string", + "description": "The name of the file to write.", + }, + "contents": { + "type": "string", + "description": "The contents of the file to write.", + }, + } + + def _check_preconditions( + self, filename: str, contents: str + ) -> AbilityResult | None: + message = "" + try: + file_path = self._workspace.get_path(filename) + if file_path.exists(): + message = f"File {filename} already exists." + if len(contents): + message = f"File {filename} was not given any content." + except ValueError as e: + message = str(e) + + if message: + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename, "contents": contents}, + success=False, + message=message, + data=None, + ) + + def __call__(self, filename: str, contents: str) -> AbilityResult: + if result := self._check_preconditions(filename, contents): + return result + + file_path = self._workspace.get_path(filename) + try: + directory = os.path.dirname(file_path) + os.makedirs(directory) + with open(filename, "w", encoding="utf-8") as f: + f.write(contents) + success = True + message = f"File {file_path} written successfully." + except IOError as e: + success = False + message = str(e) + + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename}, + success=success, + message=message, + ) diff --git a/autogpt/core/ability/builtins/query_language_model.py b/autogpt/core/ability/builtins/query_language_model.py new file mode 100644 index 00000000..95a5e094 --- /dev/null +++ b/autogpt/core/ability/builtins/query_language_model.py @@ -0,0 +1,78 @@ +import logging + +from autogpt.core.ability.base import Ability, AbilityConfiguration +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.planning.simple import LanguageModelConfiguration +from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat +from autogpt.core.resource.model_providers import ( + LanguageModelMessage, + LanguageModelProvider, + MessageRole, + ModelProviderName, + OpenAIModelName, +) + + +class QueryLanguageModel(Ability): + default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.QueryLanguageModel", + ), + language_model_required=LanguageModelConfiguration( + model_name=OpenAIModelName.GPT3, + provider_name=ModelProviderName.OPENAI, + temperature=0.9, + ), + ) + + def __init__( + self, + logger: logging.Logger, + configuration: AbilityConfiguration, + language_model_provider: LanguageModelProvider, + ): + self._logger = logger + self._configuration = configuration + self._language_model_provider = language_model_provider + + @classmethod + def description(cls) -> str: + return "Query a language model. A query should be a question and any relevant context." + + @classmethod + def arguments(cls) -> dict: + return { + "query": { + "type": "string", + "description": "A query for a language model. A query should contain a question and any relevant context.", + }, + } + + @classmethod + def required_arguments(cls) -> list[str]: + return ["query"] + + async def __call__(self, query: str) -> AbilityResult: + messages = [ + LanguageModelMessage( + content=query, + role=MessageRole.USER, + ), + ] + model_response = await self._language_model_provider.create_language_completion( + model_prompt=messages, + functions=[], + model_name=self._configuration.language_model_required.model_name, + completion_parser=self._parse_response, + ) + return AbilityResult( + ability_name=self.name(), + ability_args={"query": query}, + success=True, + message=model_response.content["content"], + ) + + @staticmethod + def _parse_response(response_content: dict) -> dict: + return {"content": response_content["content"]} diff --git a/autogpt/core/ability/schema.py b/autogpt/core/ability/schema.py new file mode 100644 index 00000000..5bba5b7f --- /dev/null +++ b/autogpt/core/ability/schema.py @@ -0,0 +1,26 @@ +import enum +from typing import Any + +from pydantic import BaseModel + + +class ContentType(str, enum.Enum): + # TBD what these actually are. + TEXT = "text" + CODE = "code" + + +class Knowledge(BaseModel): + content: str + content_type: ContentType + content_metadata: dict[str, Any] + + +class AbilityResult(BaseModel): + """The AbilityResult is a standard response struct for an ability.""" + + ability_name: str + ability_args: dict[str, str] + success: bool + message: str + new_knowledge: Knowledge = None diff --git a/autogpt/core/ability/simple.py b/autogpt/core/ability/simple.py new file mode 100644 index 00000000..459a8f13 --- /dev/null +++ b/autogpt/core/ability/simple.py @@ -0,0 +1,96 @@ +import logging + +from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry +from autogpt.core.ability.builtins import BUILTIN_ABILITIES +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings +from autogpt.core.memory.base import Memory +from autogpt.core.plugin.simple import SimplePluginService +from autogpt.core.resource.model_providers import ( + LanguageModelProvider, + ModelProviderName, +) +from autogpt.core.workspace.base import Workspace + + +class AbilityRegistryConfiguration(SystemConfiguration): + """Configuration for the AbilityRegistry subsystem.""" + + abilities: dict[str, AbilityConfiguration] + + +class AbilityRegistrySettings(SystemSettings): + configuration: AbilityRegistryConfiguration + + +class SimpleAbilityRegistry(AbilityRegistry, Configurable): + default_settings = AbilityRegistrySettings( + name="simple_ability_registry", + description="A simple ability registry.", + configuration=AbilityRegistryConfiguration( + abilities={ + ability_name: ability.default_configuration + for ability_name, ability in BUILTIN_ABILITIES.items() + }, + ), + ) + + def __init__( + self, + settings: AbilityRegistrySettings, + logger: logging.Logger, + memory: Memory, + workspace: Workspace, + model_providers: dict[ModelProviderName, LanguageModelProvider], + ): + self._configuration = settings.configuration + self._logger = logger + self._memory = memory + self._workspace = workspace + self._model_providers = model_providers + self._abilities = [] + for ( + ability_name, + ability_configuration, + ) in self._configuration.abilities.items(): + self.register_ability(ability_name, ability_configuration) + + def register_ability( + self, ability_name: str, ability_configuration: AbilityConfiguration + ) -> None: + ability_class = SimplePluginService.get_plugin(ability_configuration.location) + ability_args = { + "logger": self._logger.getChild(ability_name), + "configuration": ability_configuration, + } + if ability_configuration.packages_required: + # TODO: Check packages are installed and maybe install them. + pass + if ability_configuration.memory_provider_required: + ability_args["memory"] = self._memory + if ability_configuration.workspace_required: + ability_args["workspace"] = self._workspace + if ability_configuration.language_model_required: + ability_args["language_model_provider"] = self._model_providers[ + ability_configuration.language_model_required.provider_name + ] + ability = ability_class(**ability_args) + self._abilities.append(ability) + + def list_abilities(self) -> list[str]: + return [ + f"{ability.name()}: {ability.description()}" for ability in self._abilities + ] + + def dump_abilities(self) -> list[dict]: + return [ability.dump() for ability in self._abilities] + + def get_ability(self, ability_name: str) -> Ability: + for ability in self._abilities: + if ability.name() == ability_name: + return ability + raise ValueError(f"Ability '{ability_name}' not found.") + + async def perform(self, ability_name: str, **kwargs) -> AbilityResult: + ability = self.get_ability(ability_name) + return await ability(**kwargs) diff --git a/autogpt/core/agent/__init__.py b/autogpt/core/agent/__init__.py new file mode 100644 index 00000000..9324042b --- /dev/null +++ b/autogpt/core/agent/__init__.py @@ -0,0 +1,3 @@ +"""The Agent is an autonomouos entity guided by a LLM provider.""" +from autogpt.core.agent.base import Agent +from autogpt.core.agent.simple import AgentSettings, SimpleAgent diff --git a/autogpt/core/agent/base.py b/autogpt/core/agent/base.py new file mode 100644 index 00000000..c574dcea --- /dev/null +++ b/autogpt/core/agent/base.py @@ -0,0 +1,26 @@ +import abc +import logging +from pathlib import Path + + +class Agent(abc.ABC): + @abc.abstractmethod + def __init__(self, *args, **kwargs): + ... + + @classmethod + @abc.abstractmethod + def from_workspace( + cls, + workspace_path: Path, + logger: logging.Logger, + ) -> "Agent": + ... + + @abc.abstractmethod + async def determine_next_ability(self, *args, **kwargs): + ... + + @abc.abstractmethod + def __repr__(self): + ... diff --git a/autogpt/core/agent/simple.py b/autogpt/core/agent/simple.py new file mode 100644 index 00000000..bb986b9f --- /dev/null +++ b/autogpt/core/agent/simple.py @@ -0,0 +1,398 @@ +import logging +from datetime import datetime +from pathlib import Path +from typing import Any + +from pydantic import BaseModel + +from autogpt.core.ability import ( + AbilityRegistrySettings, + AbilityResult, + SimpleAbilityRegistry, +) +from autogpt.core.agent.base import Agent +from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings +from autogpt.core.memory import MemorySettings, SimpleMemory +from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus +from autogpt.core.plugin.simple import ( + PluginLocation, + PluginStorageFormat, + SimplePluginService, +) +from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings +from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings + + +class AgentSystems(SystemConfiguration): + ability_registry: PluginLocation + memory: PluginLocation + embedding_model: PluginLocation + openai_provider: PluginLocation + planning: PluginLocation + workspace: PluginLocation + + +class AgentConfiguration(SystemConfiguration): + cycle_count: int + max_task_cycle_count: int + creation_time: str + name: str + role: str + goals: list[str] + systems: AgentSystems + + +class AgentSystemSettings(SystemSettings): + configuration: AgentConfiguration + + +class AgentSettings(BaseModel): + agent: AgentSystemSettings + ability_registry: AbilityRegistrySettings + memory: MemorySettings + openai_provider: OpenAISettings + planning: PlannerSettings + workspace: WorkspaceSettings + + def update_agent_name_and_goals(self, agent_goals: dict) -> None: + self.agent.configuration.name = agent_goals["agent_name"] + self.agent.configuration.role = agent_goals["agent_role"] + self.agent.configuration.goals = agent_goals["agent_goals"] + + +class SimpleAgent(Agent, Configurable): + default_settings = AgentSystemSettings( + name="simple_agent", + description="A simple agent.", + configuration=AgentConfiguration( + name="Entrepreneur-GPT", + role=( + "An AI designed to autonomously develop and run businesses with " + "the sole goal of increasing your net worth." + ), + goals=[ + "Increase net worth", + "Grow Twitter Account", + "Develop and manage multiple businesses autonomously", + ], + cycle_count=0, + max_task_cycle_count=3, + creation_time="", + systems=AgentSystems( + ability_registry=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.SimpleAbilityRegistry", + ), + memory=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.memory.SimpleMemory", + ), + openai_provider=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.resource.model_providers.OpenAIProvider", + ), + planning=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.planning.SimplePlanner", + ), + workspace=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.workspace.SimpleWorkspace", + ), + ), + ), + ) + + def __init__( + self, + settings: AgentSystemSettings, + logger: logging.Logger, + ability_registry: SimpleAbilityRegistry, + memory: SimpleMemory, + openai_provider: OpenAIProvider, + planning: SimplePlanner, + workspace: SimpleWorkspace, + ): + self._configuration = settings.configuration + self._logger = logger + self._ability_registry = ability_registry + self._memory = memory + # FIXME: Need some work to make this work as a dict of providers + # Getting the construction of the config to work is a bit tricky + self._openai_provider = openai_provider + self._planning = planning + self._workspace = workspace + self._task_queue = [] + self._completed_tasks = [] + self._current_task = None + self._next_ability = None + + @classmethod + def from_workspace( + cls, + workspace_path: Path, + logger: logging.Logger, + ) -> "SimpleAgent": + agent_settings = SimpleWorkspace.load_agent_settings(workspace_path) + agent_args = {} + + agent_args["settings"] = agent_settings.agent + agent_args["logger"] = logger + agent_args["workspace"] = cls._get_system_instance( + "workspace", + agent_settings, + logger, + ) + agent_args["openai_provider"] = cls._get_system_instance( + "openai_provider", + agent_settings, + logger, + ) + agent_args["embedding_model"] = cls._get_system_instance( + "embedding_model", + agent_settings, + logger, + model_providers={"openai": agent_args["openai_provider"]}, + ) + agent_args["planning"] = cls._get_system_instance( + "planning", + agent_settings, + logger, + model_providers={"openai": agent_args["openai_provider"]}, + ) + agent_args["memory"] = cls._get_system_instance( + "memory", + agent_settings, + logger, + workspace=agent_args["workspace"], + ) + + agent_args["ability_registry"] = cls._get_system_instance( + "ability_registry", + agent_settings, + logger, + workspace=agent_args["workspace"], + memory=agent_args["memory"], + model_providers={"openai": agent_args["openai_provider"]}, + ) + + return cls(**agent_args) + + async def build_initial_plan(self) -> dict: + plan = await self._planning.make_initial_plan( + agent_name=self._configuration.name, + agent_role=self._configuration.role, + agent_goals=self._configuration.goals, + abilities=self._ability_registry.list_abilities(), + ) + tasks = [Task.parse_obj(task) for task in plan.content["task_list"]] + + # TODO: Should probably do a step to evaluate the quality of the generated tasks, + # and ensure that they have actionable ready and acceptance criteria + + self._task_queue.extend(tasks) + self._task_queue.sort(key=lambda t: t.priority, reverse=True) + self._task_queue[-1].context.status = TaskStatus.READY + return plan.content + + async def determine_next_ability(self, *args, **kwargs): + if not self._task_queue: + return {"response": "I don't have any tasks to work on right now."} + + self._configuration.cycle_count += 1 + task = self._task_queue.pop() + self._logger.info(f"Working on task: {task}") + + task = await self._evaluate_task_and_add_context(task) + next_ability = await self._choose_next_ability( + task, + self._ability_registry.dump_abilities(), + ) + self._current_task = task + self._next_ability = next_ability.content + return self._current_task, self._next_ability + + async def execute_next_ability(self, user_input: str, *args, **kwargs): + if user_input == "y": + ability = self._ability_registry.get_ability( + self._next_ability["next_ability"] + ) + ability_response = await ability(**self._next_ability["ability_arguments"]) + await self._update_tasks_and_memory(ability_response) + if self._current_task.context.status == TaskStatus.DONE: + self._completed_tasks.append(self._current_task) + else: + self._task_queue.append(self._current_task) + self._current_task = None + self._next_ability = None + + return ability_response + else: + raise NotImplementedError + + async def _evaluate_task_and_add_context(self, task: Task) -> Task: + """Evaluate the task and add context to it.""" + if task.context.status == TaskStatus.IN_PROGRESS: + # Nothing to do here + return task + else: + self._logger.debug(f"Evaluating task {task} and adding relevant context.") + # TODO: Look up relevant memories (need working memory system) + # TODO: Evaluate whether there is enough information to start the task (language model call). + task.context.enough_info = True + task.context.status = TaskStatus.IN_PROGRESS + return task + + async def _choose_next_ability(self, task: Task, ability_schema: list[dict]): + """Choose the next ability to use for the task.""" + self._logger.debug(f"Choosing next ability for task {task}.") + if task.context.cycle_count > self._configuration.max_task_cycle_count: + # Don't hit the LLM, just set the next action as "breakdown_task" with an appropriate reason + raise NotImplementedError + elif not task.context.enough_info: + # Don't ask the LLM, just set the next action as "breakdown_task" with an appropriate reason + raise NotImplementedError + else: + next_ability = await self._planning.determine_next_ability( + task, ability_schema + ) + return next_ability + + async def _update_tasks_and_memory(self, ability_result: AbilityResult): + self._current_task.context.cycle_count += 1 + self._current_task.context.prior_actions.append(ability_result) + # TODO: Summarize new knowledge + # TODO: store knowledge and summaries in memory and in relevant tasks + # TODO: evaluate whether the task is complete + + def __repr__(self): + return "SimpleAgent()" + + ################################################################ + # Factory interface for agent bootstrapping and initialization # + ################################################################ + + @classmethod + def build_user_configuration(cls) -> dict[str, Any]: + """Build the user's configuration.""" + configuration_dict = { + "agent": cls.get_user_config(), + } + + system_locations = configuration_dict["agent"]["configuration"]["systems"] + for system_name, system_location in system_locations.items(): + system_class = SimplePluginService.get_plugin(system_location) + configuration_dict[system_name] = system_class.get_user_config() + configuration_dict = _prune_empty_dicts(configuration_dict) + return configuration_dict + + @classmethod + def compile_settings( + cls, logger: logging.Logger, user_configuration: dict + ) -> AgentSettings: + """Compile the user's configuration with the defaults.""" + logger.debug("Processing agent system configuration.") + configuration_dict = { + "agent": cls.build_agent_configuration( + user_configuration.get("agent", {}) + ).dict(), + } + + system_locations = configuration_dict["agent"]["configuration"]["systems"] + + # Build up default configuration + for system_name, system_location in system_locations.items(): + logger.debug(f"Compiling configuration for system {system_name}") + system_class = SimplePluginService.get_plugin(system_location) + configuration_dict[system_name] = system_class.build_agent_configuration( + user_configuration.get(system_name, {}) + ).dict() + + return AgentSettings.parse_obj(configuration_dict) + + @classmethod + async def determine_agent_name_and_goals( + cls, + user_objective: str, + agent_settings: AgentSettings, + logger: logging.Logger, + ) -> dict: + logger.debug("Loading OpenAI provider.") + provider: OpenAIProvider = cls._get_system_instance( + "openai_provider", + agent_settings, + logger=logger, + ) + logger.debug("Loading agent planner.") + agent_planner: SimplePlanner = cls._get_system_instance( + "planning", + agent_settings, + logger=logger, + model_providers={"openai": provider}, + ) + logger.debug("determining agent name and goals.") + model_response = await agent_planner.decide_name_and_goals( + user_objective, + ) + + return model_response.content + + @classmethod + def provision_agent( + cls, + agent_settings: AgentSettings, + logger: logging.Logger, + ): + agent_settings.agent.configuration.creation_time = datetime.now().strftime( + "%Y%m%d_%H%M%S" + ) + workspace: SimpleWorkspace = cls._get_system_instance( + "workspace", + agent_settings, + logger=logger, + ) + return workspace.setup_workspace(agent_settings, logger) + + @classmethod + def _get_system_instance( + cls, + system_name: str, + agent_settings: AgentSettings, + logger: logging.Logger, + *args, + **kwargs, + ): + system_locations = agent_settings.agent.configuration.systems.dict() + + system_settings = getattr(agent_settings, system_name) + system_class = SimplePluginService.get_plugin(system_locations[system_name]) + system_instance = system_class( + system_settings, + *args, + logger=logger.getChild(system_name), + **kwargs, + ) + return system_instance + + +def _prune_empty_dicts(d: dict) -> dict: + """ + Prune branches from a nested dictionary if the branch only contains empty dictionaries at the leaves. + + Args: + d: The dictionary to prune. + + Returns: + The pruned dictionary. + """ + pruned = {} + for key, value in d.items(): + if isinstance(value, dict): + pruned_value = _prune_empty_dicts(value) + if ( + pruned_value + ): # if the pruned dictionary is not empty, add it to the result + pruned[key] = pruned_value + else: + pruned[key] = value + return pruned diff --git a/autogpt/core/configuration/__init__.py b/autogpt/core/configuration/__init__.py index e69de29b..b6fee749 100644 --- a/autogpt/core/configuration/__init__.py +++ b/autogpt/core/configuration/__init__.py @@ -0,0 +1,7 @@ +"""The configuration encapsulates settings for all Agent subsystems.""" +from autogpt.core.configuration.schema import ( + Configurable, + SystemConfiguration, + SystemSettings, + UserConfigurable, +) diff --git a/autogpt/core/configuration/schema.py b/autogpt/core/configuration/schema.py index aed484ef..4a1a4d49 100644 --- a/autogpt/core/configuration/schema.py +++ b/autogpt/core/configuration/schema.py @@ -1,9 +1,12 @@ import abc -import copy import typing from typing import Any, Generic, TypeVar -from pydantic import BaseModel +from pydantic import BaseModel, Field + + +def UserConfigurable(*args, **kwargs): + return Field(*args, **kwargs, user_configurable=True) class SystemConfiguration(BaseModel): @@ -15,14 +18,13 @@ class SystemConfiguration(BaseModel): use_enum_values = True -class SystemSettings(BaseModel, abc.ABC): +class SystemSettings(BaseModel): """A base class for all system settings.""" name: str - description: typing.Optional[str] + description: str class Config: - arbitrary_types_allowed = True extra = "forbid" use_enum_values = True @@ -34,27 +36,29 @@ class Configurable(abc.ABC, Generic[S]): """A base class for all configurable objects.""" prefix: str = "" - defaults_settings: typing.ClassVar[S] + default_settings: typing.ClassVar[S] @classmethod def get_user_config(cls) -> dict[str, Any]: - return _get_user_config_fields(cls.defaults_settings) + return _get_user_config_fields(cls.default_settings) @classmethod - def build_agent_configuration(cls, configuration: dict = {}) -> S: + def build_agent_configuration(cls, configuration: dict) -> S: """Process the configuration for this object.""" - defaults_settings = cls.defaults_settings.dict() - final_configuration = deep_update(defaults_settings, configuration) + defaults = cls.default_settings.dict() + final_configuration = deep_update(defaults, configuration) - return cls.defaults_settings.__class__.parse_obj(final_configuration) + return cls.default_settings.__class__.parse_obj(final_configuration) def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]: """ Get the user config fields of a Pydantic model instance. + Args: instance: The Pydantic model instance. + Returns: The user config fields of the instance. """ @@ -83,13 +87,14 @@ def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]: def deep_update(original_dict: dict, update_dict: dict) -> dict: """ Recursively update a dictionary. + Args: original_dict (dict): The dictionary to be updated. update_dict (dict): The dictionary to update with. + Returns: dict: The updated dictionary. """ - original_dict = copy.deepcopy(original_dict) for key, value in update_dict.items(): if ( key in original_dict diff --git a/autogpt/core/memory/__init__.py b/autogpt/core/memory/__init__.py new file mode 100644 index 00000000..eeba203a --- /dev/null +++ b/autogpt/core/memory/__init__.py @@ -0,0 +1,3 @@ +"""The memory subsystem manages the Agent's long-term memory.""" +from autogpt.core.memory.base import Memory +from autogpt.core.memory.simple import MemorySettings, SimpleMemory diff --git a/autogpt/core/memory/base.py b/autogpt/core/memory/base.py new file mode 100644 index 00000000..74a42840 --- /dev/null +++ b/autogpt/core/memory/base.py @@ -0,0 +1,13 @@ +import abc + + +class Memory(abc.ABC): + pass + + +class MemoryItem(abc.ABC): + pass + + +class MessageHistory(abc.ABC): + pass diff --git a/autogpt/core/memory/simple.py b/autogpt/core/memory/simple.py new file mode 100644 index 00000000..2433f48b --- /dev/null +++ b/autogpt/core/memory/simple.py @@ -0,0 +1,47 @@ +import json +import logging + +from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings +from autogpt.core.memory.base import Memory +from autogpt.core.workspace import Workspace + + +class MemoryConfiguration(SystemConfiguration): + pass + + +class MemorySettings(SystemSettings): + configuration: MemoryConfiguration + + +class MessageHistory: + def __init__(self, previous_message_history: list[str]): + self._message_history = previous_message_history + + +class SimpleMemory(Memory, Configurable): + default_settings = MemorySettings( + name="simple_memory", + description="A simple memory.", + configuration=MemoryConfiguration(), + ) + + def __init__( + self, + settings: MemorySettings, + logger: logging.Logger, + workspace: Workspace, + ): + self._configuration = settings.configuration + self._logger = logger + self._message_history = self._load_message_history(workspace) + + @staticmethod + def _load_message_history(workspace: Workspace): + message_history_path = workspace.get_path("message_history.json") + if message_history_path.exists(): + with message_history_path.open("r") as f: + message_history = json.load(f) + else: + message_history = [] + return MessageHistory(message_history) diff --git a/autogpt/core/planning/__init__.py b/autogpt/core/planning/__init__.py new file mode 100644 index 00000000..9125feb1 --- /dev/null +++ b/autogpt/core/planning/__init__.py @@ -0,0 +1,10 @@ +"""The planning system organizes the Agent's activities.""" +from autogpt.core.planning.schema import ( + LanguageModelClassification, + LanguageModelPrompt, + LanguageModelResponse, + Task, + TaskStatus, + TaskType, +) +from autogpt.core.planning.simple import PlannerSettings, SimplePlanner diff --git a/autogpt/core/planning/base.py b/autogpt/core/planning/base.py new file mode 100644 index 00000000..cfda45a3 --- /dev/null +++ b/autogpt/core/planning/base.py @@ -0,0 +1,76 @@ +import abc + +from autogpt.core.configuration import SystemConfiguration +from autogpt.core.planning.schema import ( + LanguageModelClassification, + LanguageModelPrompt, +) + +# class Planner(abc.ABC): +# """Manages the agent's planning and goal-setting by constructing language model prompts.""" +# +# @staticmethod +# @abc.abstractmethod +# async def decide_name_and_goals( +# user_objective: str, +# ) -> LanguageModelResponse: +# """Decide the name and goals of an Agent from a user-defined objective. +# +# Args: +# user_objective: The user-defined objective for the agent. +# +# Returns: +# The agent name and goals as a response from the language model. +# +# """ +# ... +# +# @abc.abstractmethod +# async def plan(self, context: PlanningContext) -> LanguageModelResponse: +# """Plan the next ability for the Agent. +# +# Args: +# context: A context object containing information about the agent's +# progress, result, memories, and feedback. +# +# +# Returns: +# The next ability the agent should take along with thoughts and reasoning. +# +# """ +# ... +# +# @abc.abstractmethod +# def reflect( +# self, +# context: ReflectionContext, +# ) -> LanguageModelResponse: +# """Reflect on a planned ability and provide self-criticism. +# +# +# Args: +# context: A context object containing information about the agent's +# reasoning, plan, thoughts, and criticism. +# +# Returns: +# Self-criticism about the agent's plan. +# +# """ +# ... + + +class PromptStrategy(abc.ABC): + default_configuration: SystemConfiguration + + @property + @abc.abstractmethod + def model_classification(self) -> LanguageModelClassification: + ... + + @abc.abstractmethod + def build_prompt(self, *_, **kwargs) -> LanguageModelPrompt: + ... + + @abc.abstractmethod + def parse_response_content(self, response_content: dict) -> dict: + ... diff --git a/autogpt/core/planning/schema.py b/autogpt/core/planning/schema.py new file mode 100644 index 00000000..4c19ea4b --- /dev/null +++ b/autogpt/core/planning/schema.py @@ -0,0 +1,76 @@ +import enum + +from pydantic import BaseModel, Field + +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.resource.model_providers.schema import ( + LanguageModelFunction, + LanguageModelMessage, + LanguageModelProviderModelResponse, +) + + +class LanguageModelClassification(str, enum.Enum): + """The LanguageModelClassification is a functional description of the model. + + This is used to determine what kind of model to use for a given prompt. + Sometimes we prefer a faster or cheaper model to accomplish a task when + possible. + + """ + + FAST_MODEL: str = "fast_model" + SMART_MODEL: str = "smart_model" + + +class LanguageModelPrompt(BaseModel): + messages: list[LanguageModelMessage] + functions: list[LanguageModelFunction] = Field(default_factory=list) + + def __str__(self): + return "\n\n".join([f"{m.role.value}: {m.content}" for m in self.messages]) + + +class LanguageModelResponse(LanguageModelProviderModelResponse): + """Standard response struct for a response from a language model.""" + + +class TaskType(str, enum.Enum): + RESEARCH: str = "research" + WRITE: str = "write" + EDIT: str = "edit" + CODE: str = "code" + DESIGN: str = "design" + TEST: str = "test" + PLAN: str = "plan" + + +class TaskStatus(str, enum.Enum): + BACKLOG: str = "backlog" + READY: str = "ready" + IN_PROGRESS: str = "in_progress" + DONE: str = "done" + + +class TaskContext(BaseModel): + cycle_count: int = 0 + status: TaskStatus = TaskStatus.BACKLOG + parent: "Task" = None + prior_actions: list[AbilityResult] = Field(default_factory=list) + memories: list = Field(default_factory=list) + user_input: list[str] = Field(default_factory=list) + supplementary_info: list[str] = Field(default_factory=list) + enough_info: bool = False + + +class Task(BaseModel): + objective: str + type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema + priority: int + ready_criteria: list[str] + acceptance_criteria: list[str] + context: TaskContext = Field(default_factory=TaskContext) + + +# Need to resolve the circular dependency between Task and TaskContext once both models are defined. +TaskContext.update_forward_refs() diff --git a/autogpt/core/planning/simple.py b/autogpt/core/planning/simple.py new file mode 100644 index 00000000..633511ec --- /dev/null +++ b/autogpt/core/planning/simple.py @@ -0,0 +1,182 @@ +import logging +import platform +import time + +import distro + +from autogpt.core.configuration import ( + Configurable, + SystemConfiguration, + SystemSettings, + UserConfigurable, +) +from autogpt.core.planning import strategies +from autogpt.core.planning.base import PromptStrategy +from autogpt.core.planning.schema import ( + LanguageModelClassification, + LanguageModelResponse, + Task, +) +from autogpt.core.resource.model_providers import ( + LanguageModelProvider, + ModelProviderName, + OpenAIModelName, +) +from autogpt.core.workspace import Workspace + + +class LanguageModelConfiguration(SystemConfiguration): + """Struct for model configuration.""" + + model_name: str = UserConfigurable() + provider_name: ModelProviderName = UserConfigurable() + temperature: float = UserConfigurable() + + +class PromptStrategiesConfiguration(SystemConfiguration): + name_and_goals: strategies.NameAndGoalsConfiguration + initial_plan: strategies.InitialPlanConfiguration + next_ability: strategies.NextAbilityConfiguration + + +class PlannerConfiguration(SystemConfiguration): + """Configuration for the Planner subsystem.""" + + models: dict[LanguageModelClassification, LanguageModelConfiguration] + prompt_strategies: PromptStrategiesConfiguration + + +class PlannerSettings(SystemSettings): + """Settings for the Planner subsystem.""" + + configuration: PlannerConfiguration + + +class SimplePlanner(Configurable): + """Manages the agent's planning and goal-setting by constructing language model prompts.""" + + default_settings = PlannerSettings( + name="planner", + description="Manages the agent's planning and goal-setting by constructing language model prompts.", + configuration=PlannerConfiguration( + models={ + LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration( + model_name=OpenAIModelName.GPT3, + provider_name=ModelProviderName.OPENAI, + temperature=0.9, + ), + LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration( + model_name=OpenAIModelName.GPT4, + provider_name=ModelProviderName.OPENAI, + temperature=0.9, + ), + }, + prompt_strategies=PromptStrategiesConfiguration( + name_and_goals=strategies.NameAndGoals.default_configuration, + initial_plan=strategies.InitialPlan.default_configuration, + next_ability=strategies.NextAbility.default_configuration, + ), + ), + ) + + def __init__( + self, + settings: PlannerSettings, + logger: logging.Logger, + model_providers: dict[ModelProviderName, LanguageModelProvider], + workspace: Workspace = None, # Workspace is not available during bootstrapping. + ) -> None: + self._configuration = settings.configuration + self._logger = logger + self._workspace = workspace + + self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {} + for model, model_config in self._configuration.models.items(): + self._providers[model] = model_providers[model_config.provider_name] + + self._prompt_strategies = { + "name_and_goals": strategies.NameAndGoals( + **self._configuration.prompt_strategies.name_and_goals.dict() + ), + "initial_plan": strategies.InitialPlan( + **self._configuration.prompt_strategies.initial_plan.dict() + ), + "next_ability": strategies.NextAbility( + **self._configuration.prompt_strategies.next_ability.dict() + ), + } + + async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse: + return await self.chat_with_model( + self._prompt_strategies["name_and_goals"], + user_objective=user_objective, + ) + + async def make_initial_plan( + self, + agent_name: str, + agent_role: str, + agent_goals: list[str], + abilities: list[str], + ) -> LanguageModelResponse: + return await self.chat_with_model( + self._prompt_strategies["initial_plan"], + agent_name=agent_name, + agent_role=agent_role, + agent_goals=agent_goals, + abilities=abilities, + ) + + async def determine_next_ability( + self, + task: Task, + ability_schema: list[dict], + ): + return await self.chat_with_model( + self._prompt_strategies["next_ability"], + task=task, + ability_schema=ability_schema, + ) + + async def chat_with_model( + self, + prompt_strategy: PromptStrategy, + **kwargs, + ) -> LanguageModelResponse: + model_classification = prompt_strategy.model_classification + model_configuration = self._configuration.models[model_classification].dict() + self._logger.debug(f"Using model configuration: {model_configuration}") + del model_configuration["provider_name"] + provider = self._providers[model_classification] + + template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy) + template_kwargs.update(kwargs) + prompt = prompt_strategy.build_prompt(**template_kwargs) + + self._logger.debug(f"Using prompt:\n{prompt}\n\n") + response = await provider.create_language_completion( + model_prompt=prompt.messages, + functions=prompt.functions, + **model_configuration, + completion_parser=prompt_strategy.parse_response_content, + ) + return LanguageModelResponse.parse_obj(response.dict()) + + def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy): + provider = self._providers[strategy.model_classification] + template_kwargs = { + "os_info": get_os_info(), + "api_budget": provider.get_remaining_budget(), + "current_time": time.strftime("%c"), + } + return template_kwargs + + +def get_os_info() -> str: + os_name = platform.system() + os_info = ( + platform.platform(terse=True) + if os_name != "Linux" + else distro.name(pretty=True) + ) + return os_info diff --git a/autogpt/core/planning/strategies/__init__.py b/autogpt/core/planning/strategies/__init__.py new file mode 100644 index 00000000..856c000e --- /dev/null +++ b/autogpt/core/planning/strategies/__init__.py @@ -0,0 +1,12 @@ +from autogpt.core.planning.strategies.initial_plan import ( + InitialPlan, + InitialPlanConfiguration, +) +from autogpt.core.planning.strategies.name_and_goals import ( + NameAndGoals, + NameAndGoalsConfiguration, +) +from autogpt.core.planning.strategies.next_ability import ( + NextAbility, + NextAbilityConfiguration, +) diff --git a/autogpt/core/planning/strategies/initial_plan.py b/autogpt/core/planning/strategies/initial_plan.py new file mode 100644 index 00000000..2f8d99b8 --- /dev/null +++ b/autogpt/core/planning/strategies/initial_plan.py @@ -0,0 +1,190 @@ +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.planning.base import PromptStrategy +from autogpt.core.planning.schema import ( + LanguageModelClassification, + LanguageModelPrompt, + Task, + TaskType, +) +from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list +from autogpt.core.resource.model_providers import ( + LanguageModelFunction, + LanguageModelMessage, + MessageRole, +) + + +class InitialPlanConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable() + system_prompt_template: str = UserConfigurable() + system_info: list[str] = UserConfigurable() + user_prompt_template: str = UserConfigurable() + create_plan_function: dict = UserConfigurable() + + +class InitialPlan(PromptStrategy): + DEFAULT_SYSTEM_PROMPT_TEMPLATE = ( + "You are an expert project planner. You're responsibility is to create work plans for autonomous agents. " + "You will be given a name, a role, set of goals for the agent to accomplish. Your job is to " + "break down those goals into a set of tasks that the agent can accomplish to achieve those goals. " + "Agents are resourceful, but require clear instructions. Each task you create should have clearly defined " + "`ready_criteria` that the agent can check to see if the task is ready to be started. Each task should " + "also have clearly defined `acceptance_criteria` that the agent can check to evaluate if the task is complete. " + "You should create as many tasks as you think is necessary to accomplish the goals.\n\n" + "System Info:\n{system_info}" + ) + + DEFAULT_SYSTEM_INFO = [ + "The OS you are running on is: {os_info}", + "It takes money to let you run. Your API budget is ${api_budget:.3f}", + "The current time and date is {current_time}", + ] + + DEFAULT_USER_PROMPT_TEMPLATE = ( + "You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}" + ) + + DEFAULT_CREATE_PLAN_FUNCTION = { + "name": "create_initial_agent_plan", + "description": "Creates a set of tasks that forms the initial plan for an autonomous agent.", + "parameters": { + "type": "object", + "properties": { + "task_list": { + "type": "array", + "items": { + "type": "object", + "properties": { + "objective": { + "type": "string", + "description": "An imperative verb phrase that succinctly describes the task.", + }, + "type": { + "type": "string", + "description": "A categorization for the task. ", + "enum": [t.value for t in TaskType], + }, + "acceptance_criteria": { + "type": "array", + "items": { + "type": "string", + "description": "A list of measurable and testable criteria that must be met for the task to be considered complete.", + }, + }, + "priority": { + "type": "integer", + "description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.", + "minimum": 1, + "maximum": 10, + }, + "ready_criteria": { + "type": "array", + "items": { + "type": "string", + "description": "A list of measurable and testable criteria that must be met before the task can be started.", + }, + }, + }, + "required": [ + "objective", + "type", + "acceptance_criteria", + "priority", + "ready_criteria", + ], + }, + }, + }, + }, + } + + default_configuration = InitialPlanConfiguration( + model_classification=LanguageModelClassification.SMART_MODEL, + system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, + system_info=DEFAULT_SYSTEM_INFO, + user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, + create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION, + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt_template: str, + system_info: list[str], + user_prompt_template: str, + create_plan_function: dict, + ): + self._model_classification = model_classification + self._system_prompt_template = system_prompt_template + self._system_info = system_info + self._user_prompt_template = user_prompt_template + self._create_plan_function = create_plan_function + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt( + self, + agent_name: str, + agent_role: str, + agent_goals: list[str], + abilities: list[str], + os_info: str, + api_budget: float, + current_time: str, + **kwargs, + ) -> LanguageModelPrompt: + template_kwargs = { + "agent_name": agent_name, + "agent_role": agent_role, + "os_info": os_info, + "api_budget": api_budget, + "current_time": current_time, + **kwargs, + } + template_kwargs["agent_goals"] = to_numbered_list( + agent_goals, **template_kwargs + ) + template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs) + template_kwargs["system_info"] = to_numbered_list( + self._system_info, **template_kwargs + ) + + system_prompt = LanguageModelMessage( + role=MessageRole.SYSTEM, + content=self._system_prompt_template.format(**template_kwargs), + ) + user_prompt = LanguageModelMessage( + role=MessageRole.USER, + content=self._user_prompt_template.format(**template_kwargs), + ) + create_plan_function = LanguageModelFunction( + json_schema=self._create_plan_function, + ) + + return LanguageModelPrompt( + messages=[system_prompt, user_prompt], + functions=[create_plan_function], + # TODO: + tokens_used=0, + ) + + def parse_response_content( + self, + response_content: dict, + ) -> dict: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + parsed_response = json_loads(response_content["function_call"]["arguments"]) + parsed_response["task_list"] = [ + Task.parse_obj(task) for task in parsed_response["task_list"] + ] + return parsed_response diff --git a/autogpt/core/planning/strategies/name_and_goals.py b/autogpt/core/planning/strategies/name_and_goals.py new file mode 100644 index 00000000..c4f1e764 --- /dev/null +++ b/autogpt/core/planning/strategies/name_and_goals.py @@ -0,0 +1,139 @@ +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.planning.base import PromptStrategy +from autogpt.core.planning.schema import ( + LanguageModelClassification, + LanguageModelPrompt, +) +from autogpt.core.planning.strategies.utils import json_loads +from autogpt.core.resource.model_providers import ( + LanguageModelFunction, + LanguageModelMessage, + MessageRole, +) + + +class NameAndGoalsConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable() + system_prompt: str = UserConfigurable() + user_prompt_template: str = UserConfigurable() + create_agent_function: dict = UserConfigurable() + + +class NameAndGoals(PromptStrategy): + DEFAULT_SYSTEM_PROMPT = ( + "Your job is to respond to a user-defined task by invoking the `create_agent` function " + "to generate an autonomous agent to complete the task. You should supply a role-based " + "name for the agent, an informative description for what the agent does, and 1 to 5 " + "goals that are optimally aligned with the successful completion of its assigned task.\n\n" + "Example Input:\n" + "Help me with marketing my business\n\n" + "Example Function Call:\n" + "create_agent(name='CMOGPT', " + "description='A professional digital marketer AI that assists Solopreneurs in " + "growing their businesses by providing world-class expertise in solving " + "marketing problems for SaaS, content products, agencies, and more.', " + "goals=['Engage in effective problem-solving, prioritization, planning, and " + "supporting execution to address your marketing needs as your virtual Chief " + "Marketing Officer.', 'Provide specific, actionable, and concise advice to " + "help you make informed decisions without the use of platitudes or overly " + "wordy explanations.', 'Identify and prioritize quick wins and cost-effective " + "campaigns that maximize results with minimal time and budget investment.', " + "'Proactively take the lead in guiding you and offering suggestions when faced " + "with unclear information or uncertainty to ensure your marketing strategy " + "remains on track.'])" + ) + + DEFAULT_USER_PROMPT_TEMPLATE = "'{user_objective}'" + + DEFAULT_CREATE_AGENT_FUNCTION = { + "name": "create_agent", + "description": ("Create a new autonomous AI agent to complete a given task."), + "parameters": { + "type": "object", + "properties": { + "agent_name": { + "type": "string", + "description": "A short role-based name for an autonomous agent.", + }, + "agent_role": { + "type": "string", + "description": "An informative one sentence description of what the AI agent does", + }, + "agent_goals": { + "type": "array", + "minItems": 1, + "maxItems": 5, + "items": { + "type": "string", + }, + "description": ( + "One to five highly effective goals that are optimally aligned with the completion of a " + "specific task. The number and complexity of the goals should correspond to the " + "complexity of the agent's primary objective." + ), + }, + }, + "required": ["agent_name", "agent_role", "agent_goals"], + }, + } + + default_configuration = NameAndGoalsConfiguration( + model_classification=LanguageModelClassification.SMART_MODEL, + system_prompt=DEFAULT_SYSTEM_PROMPT, + user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, + create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION, + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt: str, + user_prompt_template: str, + create_agent_function: str, + ): + self._model_classification = model_classification + self._system_prompt_message = system_prompt + self._user_prompt_template = user_prompt_template + self._create_agent_function = create_agent_function + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt: + system_message = LanguageModelMessage( + role=MessageRole.SYSTEM, + content=self._system_prompt_message, + ) + user_message = LanguageModelMessage( + role=MessageRole.USER, + content=self._user_prompt_template.format( + user_objective=user_objective, + ), + ) + create_agent_function = LanguageModelFunction( + json_schema=self._create_agent_function, + ) + prompt = LanguageModelPrompt( + messages=[system_message, user_message], + functions=[create_agent_function], + # TODO + tokens_used=0, + ) + return prompt + + def parse_response_content( + self, + response_content: dict, + ) -> dict: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + parsed_response = json_loads(response_content["function_call"]["arguments"]) + return parsed_response diff --git a/autogpt/core/planning/strategies/next_ability.py b/autogpt/core/planning/strategies/next_ability.py new file mode 100644 index 00000000..70ea458a --- /dev/null +++ b/autogpt/core/planning/strategies/next_ability.py @@ -0,0 +1,183 @@ +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.planning.base import PromptStrategy +from autogpt.core.planning.schema import ( + LanguageModelClassification, + LanguageModelPrompt, + Task, +) +from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list +from autogpt.core.resource.model_providers import ( + LanguageModelFunction, + LanguageModelMessage, + MessageRole, +) + + +class NextAbilityConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable() + system_prompt_template: str = UserConfigurable() + system_info: list[str] = UserConfigurable() + user_prompt_template: str = UserConfigurable() + additional_ability_arguments: dict = UserConfigurable() + + +class NextAbility(PromptStrategy): + DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}" + + DEFAULT_SYSTEM_INFO = [ + "The OS you are running on is: {os_info}", + "It takes money to let you run. Your API budget is ${api_budget:.3f}", + "The current time and date is {current_time}", + ] + + DEFAULT_USER_PROMPT_TEMPLATE = ( + "Your current task is is {task_objective}.\n" + "You have taken {cycle_count} actions on this task already. " + "Here is the actions you have taken and their results:\n" + "{action_history}\n\n" + "Here is additional information that may be useful to you:\n" + "{additional_info}\n\n" + "Additionally, you should consider the following:\n" + "{user_input}\n\n" + "Your task of {task_objective} is complete when the following acceptance criteria have been met:\n" + "{acceptance_criteria}\n\n" + "Please choose one of the provided functions to accomplish this task. " + "Some tasks may require multiple functions to accomplish. If that is the case, choose the function that " + "you think is most appropriate for the current situation given your progress so far." + ) + + DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = { + "motivation": { + "type": "string", + "description": "Your justification for choosing choosing this function instead of a different one.", + }, + "self_criticism": { + "type": "string", + "description": "Thoughtful self-criticism that explains why this function may not be the best choice.", + }, + "reasoning": { + "type": "string", + "description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.", + }, + } + + default_configuration = NextAbilityConfiguration( + model_classification=LanguageModelClassification.SMART_MODEL, + system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, + system_info=DEFAULT_SYSTEM_INFO, + user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, + additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS, + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt_template: str, + system_info: list[str], + user_prompt_template: str, + additional_ability_arguments: dict, + ): + self._model_classification = model_classification + self._system_prompt_template = system_prompt_template + self._system_info = system_info + self._user_prompt_template = user_prompt_template + self._additional_ability_arguments = additional_ability_arguments + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt( + self, + task: Task, + ability_schema: list[dict], + os_info: str, + api_budget: float, + current_time: str, + **kwargs, + ) -> LanguageModelPrompt: + template_kwargs = { + "os_info": os_info, + "api_budget": api_budget, + "current_time": current_time, + **kwargs, + } + + for ability in ability_schema: + ability["parameters"]["properties"].update( + self._additional_ability_arguments + ) + ability["parameters"]["required"] += list( + self._additional_ability_arguments.keys() + ) + + template_kwargs["task_objective"] = task.objective + template_kwargs["cycle_count"] = task.context.cycle_count + template_kwargs["action_history"] = to_numbered_list( + [action.summary() for action in task.context.prior_actions], + no_items_response="You have not taken any actions yet.", + **template_kwargs, + ) + template_kwargs["additional_info"] = to_numbered_list( + [memory.summary() for memory in task.context.memories] + + [info.summary() for info in task.context.supplementary_info], + no_items_response="There is no additional information available at this time.", + **template_kwargs, + ) + template_kwargs["user_input"] = to_numbered_list( + [user_input.summary() for user_input in task.context.user_input], + no_items_response="There are no additional considerations at this time.", + **template_kwargs, + ) + template_kwargs["acceptance_criteria"] = to_numbered_list( + [acceptance_criteria for acceptance_criteria in task.acceptance_criteria], + **template_kwargs, + ) + + template_kwargs["system_info"] = to_numbered_list( + self._system_info, + **template_kwargs, + ) + + system_prompt = LanguageModelMessage( + role=MessageRole.SYSTEM, + content=self._system_prompt_template.format(**template_kwargs), + ) + user_prompt = LanguageModelMessage( + role=MessageRole.USER, + content=self._user_prompt_template.format(**template_kwargs), + ) + functions = [ + LanguageModelFunction(json_schema=ability) for ability in ability_schema + ] + + return LanguageModelPrompt( + messages=[system_prompt, user_prompt], + functions=functions, + # TODO: + tokens_used=0, + ) + + def parse_response_content( + self, + response_content: dict, + ) -> dict: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + function_name = response_content["function_call"]["name"] + function_arguments = json_loads(response_content["function_call"]["arguments"]) + parsed_response = { + "motivation": function_arguments.pop("motivation"), + "self_criticism": function_arguments.pop("self_criticism"), + "reasoning": function_arguments.pop("reasoning"), + "next_ability": function_name, + "ability_arguments": function_arguments, + } + return parsed_response diff --git a/autogpt/core/planning/strategies/utils.py b/autogpt/core/planning/strategies/utils.py new file mode 100644 index 00000000..5a725903 --- /dev/null +++ b/autogpt/core/planning/strategies/utils.py @@ -0,0 +1,27 @@ +import ast +import json + + +def to_numbered_list( + items: list[str], no_items_response: str = "", **template_args +) -> str: + if items: + return "\n".join( + f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items) + ) + else: + return no_items_response + + +def json_loads(json_str: str): + # TODO: this is a hack function for now. Trying to see what errors show up in testing. + # Can hopefully just replace with a call to ast.literal_eval (the function api still + # sometimes returns json strings with minor issues like trailing commas). + try: + return ast.literal_eval(json_str) + except json.decoder.JSONDecodeError as e: + try: + print(f"json decode error {e}. trying literal eval") + return ast.literal_eval(json_str) + except Exception: + breakpoint() diff --git a/autogpt/core/planning/templates.py b/autogpt/core/planning/templates.py new file mode 100644 index 00000000..e28f2ed7 --- /dev/null +++ b/autogpt/core/planning/templates.py @@ -0,0 +1,102 @@ +# Rules of thumb: +# - Templates don't add new lines at the end of the string. This is the +# responsibility of the or a consuming template. + +#################### +# Planner defaults # +#################### + + +USER_OBJECTIVE = ( + "Write a wikipedia style article about the project: " + "https://github.com/significant-gravitas/Auto-GPT" +) + + +ABILITIES = ( + 'analyze_code: Analyze Code, args: "code": ""', + 'execute_python_file: Execute Python File, args: "filename": ""', + 'append_to_file: Append to file, args: "filename": "", "text": ""', + 'delete_file: Delete file, args: "filename": ""', + 'list_files: List Files in Directory, args: "directory": ""', + 'read_file: Read a file, args: "filename": ""', + 'write_to_file: Write to file, args: "filename": "", "text": ""', + 'google: Google Search, args: "query": ""', + 'improve_code: Get Improved Code, args: "suggestions": "", "code": ""', + 'browse_website: Browse Website, args: "url": "", "question": ""', + 'write_tests: Write Tests, args: "code": "", "focus": ""', + 'get_hyperlinks: Get hyperlinks, args: "url": ""', + 'get_text_summary: Get text summary, args: "url": "", "question": ""', + 'task_complete: Task Complete (Shutdown), args: "reason": ""', +) + + +# Plan Prompt +# ----------- + + +PLAN_PROMPT_CONSTRAINTS = ( + "~4000 word limit for short term memory. Your short term memory is short, so " + "immediately save important information to files.", + "If you are unsure how you previously did something or want to recall past " + "events, thinking about similar events will help you remember.", + "No user assistance", + "Exclusively use the commands listed below e.g. command_name", +) + +PLAN_PROMPT_RESOURCES = ( + "Internet access for searches and information gathering.", + "Long-term memory management.", + "File output.", +) + +PLAN_PROMPT_PERFORMANCE_EVALUATIONS = ( + "Continuously review and analyze your actions to ensure you are performing to" + " the best of your abilities.", + "Constructively self-criticize your big-picture behavior constantly.", + "Reflect on past decisions and strategies to refine your approach.", + "Every command has a cost, so be smart and efficient. Aim to complete tasks in" + " the least number of steps.", + "Write all code to a file", +) + + +PLAN_PROMPT_RESPONSE_DICT = { + "thoughts": { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user", + }, + "command": {"name": "command name", "args": {"arg name": "value"}}, +} + +PLAN_PROMPT_RESPONSE_FORMAT = ( + "You should only respond in JSON format as described below\n" + "Response Format:\n" + "{response_json_structure}\n" + "Ensure the response can be parsed by Python json.loads" +) + +PLAN_TRIGGERING_PROMPT = ( + "Determine which next command to use, and respond using the format specified above:" +) + +PLAN_PROMPT_MAIN = ( + "{header}\n\n" + "GOALS:\n\n{goals}\n\n" + "Info:\n{info}\n\n" + "Constraints:\n{constraints}\n\n" + "Commands:\n{commands}\n\n" + "Resources:\n{resources}\n\n" + "Performance Evaluations:\n{performance_evaluations}\n\n" + "You should only respond in JSON format as described below\n" + "Response Format:\n{response_json_structure}\n" + "Ensure the response can be parsed by Python json.loads" +) + + +########################### +# Parameterized templates # +########################### diff --git a/autogpt/core/plugin/__init__.py b/autogpt/core/plugin/__init__.py new file mode 100644 index 00000000..dfa19259 --- /dev/null +++ b/autogpt/core/plugin/__init__.py @@ -0,0 +1,2 @@ +"""The plugin system allows the Agent to be extended with new functionality.""" +from autogpt.core.plugin.base import PluginService diff --git a/autogpt/core/plugin/base.py b/autogpt/core/plugin/base.py new file mode 100644 index 00000000..bbd99ad8 --- /dev/null +++ b/autogpt/core/plugin/base.py @@ -0,0 +1,155 @@ +import abc +import enum +from typing import TYPE_CHECKING, Type + +from pydantic import BaseModel + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable + +if TYPE_CHECKING: + from autogpt.core.ability import Ability, AbilityRegistry + from autogpt.core.memory import Memory + from autogpt.core.resource.model_providers import ( + EmbeddingModelProvider, + LanguageModelProvider, + ) + + # Expand to other types as needed + PluginType = ( + Type[Ability] # Swappable now + | Type[AbilityRegistry] # Swappable maybe never + | Type[LanguageModelProvider] # Swappable soon + | Type[EmbeddingModelProvider] # Swappable soon + | Type[Memory] # Swappable now + # | Type[Planner] # Swappable soon + ) + + +class PluginStorageFormat(str, enum.Enum): + """Supported plugin storage formats. + + Plugins can be stored at one of these supported locations. + + """ + + INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults + WORKSPACE = "workspace" # Required now + # OPENAPI_URL = "open_api_url" # Soon (requires some tooling we don't have yet). + # OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now) + # GIT = "git" # Maybe later (or soon) + # PYPI = "pypi" # Maybe later + # AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service" # Long term solution, requires design + # AUTO = "auto" # Feature for later maybe, automatically find plugin. + + +# Installed package example +# PluginLocation( +# storage_format='installed_package', +# storage_route='autogpt_plugins.twitter.SendTwitterMessage' +# ) +# Workspace example +# PluginLocation( +# storage_format='workspace', +# storage_route='relative/path/to/plugin.pkl' +# OR +# storage_route='relative/path/to/plugin.py' +# ) +# Git +# PluginLocation( +# storage_format='git', +# Exact format TBD. +# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py' +# ) +# PyPI +# PluginLocation( +# storage_format='pypi', +# storage_route='package_name' +# ) + + +# PluginLocation( +# storage_format='installed_package', +# storage_route='autogpt_plugins.twitter.SendTwitterMessage' +# ) + + +# A plugin storage route. +# +# This is a string that specifies where to load a plugin from +# (e.g. an import path or file path). +PluginStorageRoute = str + + +class PluginLocation(SystemConfiguration): + """A plugin location. + + This is a combination of a plugin storage format and a plugin storage route. + It is used by the PluginService to load plugins. + + """ + + storage_format: PluginStorageFormat = UserConfigurable() + storage_route: PluginStorageRoute = UserConfigurable() + + +class PluginMetadata(BaseModel): + """Metadata about a plugin.""" + + name: str + description: str + location: PluginLocation + + +class PluginService(abc.ABC): + """Base class for plugin service. + + The plugin service should be stateless. This defines the interface for + loading plugins from various storage formats. + + """ + + @staticmethod + @abc.abstractmethod + def get_plugin(plugin_location: PluginLocation) -> "PluginType": + """Get a plugin from a plugin location.""" + ... + + #################################### + # Low-level storage format loaders # + #################################### + @staticmethod + @abc.abstractmethod + def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from a file path.""" + + ... + + @staticmethod + @abc.abstractmethod + def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from an import path.""" + ... + + @staticmethod + @abc.abstractmethod + def resolve_name_to_path( + plugin_route: PluginStorageRoute, path_type: str + ) -> PluginStorageRoute: + """Resolve a plugin name to a plugin path.""" + ... + + ##################################### + # High-level storage format loaders # + ##################################### + + @staticmethod + @abc.abstractmethod + def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from the workspace.""" + ... + + @staticmethod + @abc.abstractmethod + def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from an installed package.""" + ... diff --git a/autogpt/core/plugin/simple.py b/autogpt/core/plugin/simple.py new file mode 100644 index 00000000..aacf84af --- /dev/null +++ b/autogpt/core/plugin/simple.py @@ -0,0 +1,74 @@ +from importlib import import_module +from typing import TYPE_CHECKING + +from autogpt.core.plugin.base import ( + PluginLocation, + PluginService, + PluginStorageFormat, + PluginStorageRoute, +) + +if TYPE_CHECKING: + from autogpt.core.plugin.base import PluginType + + +class SimplePluginService(PluginService): + @staticmethod + def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType": + """Get a plugin from a plugin location.""" + if isinstance(plugin_location, dict): + plugin_location = PluginLocation.parse_obj(plugin_location) + if plugin_location.storage_format == PluginStorageFormat.WORKSPACE: + return SimplePluginService.load_from_workspace( + plugin_location.storage_route + ) + elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE: + return SimplePluginService.load_from_installed_package( + plugin_location.storage_route + ) + else: + raise NotImplementedError( + f"Plugin storage format {plugin_location.storage_format} is not implemented." + ) + + #################################### + # Low-level storage format loaders # + #################################### + @staticmethod + def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from a file path.""" + # TODO: Define an on disk storage format and implement this. + # Can pull from existing zip file loading implementation + raise NotImplemented("Loading from file path is not implemented.") + + @staticmethod + def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from an import path.""" + module_path, _, class_name = plugin_route.rpartition(".") + return getattr(import_module(module_path), class_name) + + @staticmethod + def resolve_name_to_path( + plugin_route: PluginStorageRoute, path_type: str + ) -> PluginStorageRoute: + """Resolve a plugin name to a plugin path.""" + # TODO: Implement a discovery system for finding plugins by name from known + # storage locations. E.g. if we know that path_type is a file path, we can + # search the workspace for it. If it's an import path, we can check the core + # system and the auto_gpt_plugins package. + raise NotImplemented("Resolving plugin name to path is not implemented.") + + ##################################### + # High-level storage format loaders # + ##################################### + + @staticmethod + def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from the workspace.""" + plugin = SimplePluginService.load_from_file_path(plugin_route) + return plugin + + @staticmethod + def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType": + plugin = SimplePluginService.load_from_import_path(plugin_route) + return plugin diff --git a/autogpt/core/resource/__init__.py b/autogpt/core/resource/__init__.py new file mode 100644 index 00000000..12576c20 --- /dev/null +++ b/autogpt/core/resource/__init__.py @@ -0,0 +1,7 @@ +from autogpt.core.resource.schema import ( + ProviderBudget, + ProviderCredentials, + ProviderSettings, + ProviderUsage, + ResourceType, +) diff --git a/autogpt/core/resource/model_providers/__init__.py b/autogpt/core/resource/model_providers/__init__.py new file mode 100644 index 00000000..ac9545d6 --- /dev/null +++ b/autogpt/core/resource/model_providers/__init__.py @@ -0,0 +1,44 @@ +from autogpt.core.resource.model_providers.openai import ( + OPEN_AI_MODELS, + OpenAIModelName, + OpenAIProvider, + OpenAISettings, +) +from autogpt.core.resource.model_providers.schema import ( + Embedding, + EmbeddingModelProvider, + EmbeddingModelProviderModelInfo, + EmbeddingModelProviderModelResponse, + LanguageModelFunction, + LanguageModelMessage, + LanguageModelProvider, + LanguageModelProviderModelInfo, + LanguageModelProviderModelResponse, + MessageRole, + ModelProvider, + ModelProviderBudget, + ModelProviderCredentials, + ModelProviderModelInfo, + ModelProviderModelResponse, + ModelProviderName, + ModelProviderService, + ModelProviderSettings, + ModelProviderUsage, +) + +__all__ = [ + "ModelProvider", + "ModelProviderName", + "ModelProviderSettings", + "EmbeddingModelProvider", + "EmbeddingModelProviderModelResponse", + "LanguageModelProvider", + "LanguageModelProviderModelResponse", + "LanguageModelFunction", + "LanguageModelMessage", + "MessageRole", + "OpenAIModelName", + "OPEN_AI_MODELS", + "OpenAIProvider", + "OpenAISettings", +] diff --git a/autogpt/core/resource/model_providers/openai.py b/autogpt/core/resource/model_providers/openai.py new file mode 100644 index 00000000..3707796a --- /dev/null +++ b/autogpt/core/resource/model_providers/openai.py @@ -0,0 +1,373 @@ +import enum +import functools +import logging +import math +import time +from typing import Callable, ParamSpec, TypeVar + +import openai +from openai.error import APIError, RateLimitError + +from autogpt.core.configuration import ( + Configurable, + SystemConfiguration, + UserConfigurable, +) +from autogpt.core.resource.model_providers.schema import ( + Embedding, + EmbeddingModelProvider, + EmbeddingModelProviderModelInfo, + EmbeddingModelProviderModelResponse, + LanguageModelFunction, + LanguageModelMessage, + LanguageModelProvider, + LanguageModelProviderModelInfo, + LanguageModelProviderModelResponse, + ModelProviderBudget, + ModelProviderCredentials, + ModelProviderName, + ModelProviderService, + ModelProviderSettings, + ModelProviderUsage, +) + +OpenAIEmbeddingParser = Callable[[Embedding], Embedding] +OpenAIChatParser = Callable[[str], dict] + + +class OpenAIModelName(str, enum.Enum): + ADA = "text-embedding-ada-002" + GPT3 = "gpt-3.5-turbo-0613" + GPT3_16K = "gpt-3.5-turbo-16k-0613" + GPT4 = "gpt-4-0613" + GPT4_32K = "gpt-4-32k-0613" + + +OPEN_AI_EMBEDDING_MODELS = { + OpenAIModelName.ADA: EmbeddingModelProviderModelInfo( + name=OpenAIModelName.ADA, + service=ModelProviderService.EMBEDDING, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.0004, + completion_token_cost=0.0, + max_tokens=8191, + embedding_dimensions=1536, + ), +} + + +OPEN_AI_LANGUAGE_MODELS = { + OpenAIModelName.GPT3: LanguageModelProviderModelInfo( + name=OpenAIModelName.GPT3, + service=ModelProviderService.LANGUAGE, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.0015, + completion_token_cost=0.002, + max_tokens=4096, + ), + OpenAIModelName.GPT3_16K: LanguageModelProviderModelInfo( + name=OpenAIModelName.GPT3, + service=ModelProviderService.LANGUAGE, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.003, + completion_token_cost=0.002, + max_tokens=16384, + ), + OpenAIModelName.GPT4: LanguageModelProviderModelInfo( + name=OpenAIModelName.GPT4, + service=ModelProviderService.LANGUAGE, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.03, + completion_token_cost=0.06, + max_tokens=8192, + ), + OpenAIModelName.GPT4_32K: LanguageModelProviderModelInfo( + name=OpenAIModelName.GPT4_32K, + service=ModelProviderService.LANGUAGE, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.06, + completion_token_cost=0.12, + max_tokens=32768, + ), +} + + +OPEN_AI_MODELS = { + **OPEN_AI_LANGUAGE_MODELS, + **OPEN_AI_EMBEDDING_MODELS, +} + + +class OpenAIConfiguration(SystemConfiguration): + retries_per_request: int = UserConfigurable() + + +class OpenAIModelProviderBudget(ModelProviderBudget): + graceful_shutdown_threshold: float = UserConfigurable() + warning_threshold: float = UserConfigurable() + + +class OpenAISettings(ModelProviderSettings): + configuration: OpenAIConfiguration + credentials: ModelProviderCredentials() + budget: OpenAIModelProviderBudget + + +class OpenAIProvider( + Configurable, + LanguageModelProvider, + EmbeddingModelProvider, +): + default_settings = OpenAISettings( + name="openai_provider", + description="Provides access to OpenAI's API.", + configuration=OpenAIConfiguration( + retries_per_request=10, + ), + credentials=ModelProviderCredentials(), + budget=OpenAIModelProviderBudget( + total_budget=math.inf, + total_cost=0.0, + remaining_budget=math.inf, + usage=ModelProviderUsage( + prompt_tokens=0, + completion_tokens=0, + total_tokens=0, + ), + graceful_shutdown_threshold=0.005, + warning_threshold=0.01, + ), + ) + + def __init__( + self, + settings: OpenAISettings, + logger: logging.Logger, + ): + self._configuration = settings.configuration + self._credentials = settings.credentials + self._budget = settings.budget + + self._logger = logger + + retry_handler = _OpenAIRetryHandler( + logger=self._logger, + num_retries=self._configuration.retries_per_request, + ) + + self._create_completion = retry_handler(_create_completion) + self._create_embedding = retry_handler(_create_embedding) + + def get_token_limit(self, model_name: str) -> int: + """Get the token limit for a given model.""" + return OPEN_AI_MODELS[model_name].max_tokens + + def get_remaining_budget(self) -> float: + """Get the remaining budget.""" + return self._budget.remaining_budget + + async def create_language_completion( + self, + model_prompt: list[LanguageModelMessage], + functions: list[LanguageModelFunction], + model_name: OpenAIModelName, + completion_parser: Callable[[dict], dict], + **kwargs, + ) -> LanguageModelProviderModelResponse: + """Create a completion using the OpenAI API.""" + completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs) + response = await self._create_completion( + messages=model_prompt, + **completion_kwargs, + ) + response_args = { + "model_info": OPEN_AI_LANGUAGE_MODELS[model_name], + "prompt_tokens_used": response.usage.prompt_tokens, + "completion_tokens_used": response.usage.completion_tokens, + } + + parsed_response = completion_parser( + response.choices[0].message.to_dict_recursive() + ) + response = LanguageModelProviderModelResponse( + content=parsed_response, **response_args + ) + self._budget.update_usage_and_cost(response) + return response + + async def create_embedding( + self, + text: str, + model_name: OpenAIModelName, + embedding_parser: Callable[[Embedding], Embedding], + **kwargs, + ) -> EmbeddingModelProviderModelResponse: + """Create an embedding using the OpenAI API.""" + embedding_kwargs = self._get_embedding_kwargs(model_name, **kwargs) + response = await self._create_embedding(text=text, **embedding_kwargs) + + response_args = { + "model_info": OPEN_AI_EMBEDDING_MODELS[model_name], + "prompt_tokens_used": response.usage.prompt_tokens, + "completion_tokens_used": response.usage.completion_tokens, + } + response = EmbeddingModelProviderModelResponse( + **response_args, + embedding=embedding_parser(response.embeddings[0]), + ) + self._budget.update_usage_and_cost(response) + return response + + def _get_completion_kwargs( + self, + model_name: OpenAIModelName, + functions: list[LanguageModelFunction], + **kwargs, + ) -> dict: + """Get kwargs for completion API call. + + Args: + model: The model to use. + kwargs: Keyword arguments to override the default values. + + Returns: + The kwargs for the chat API call. + + """ + completion_kwargs = { + "model": model_name, + **kwargs, + **self._credentials.unmasked(), + } + if functions: + completion_kwargs["functions"] = functions + + return completion_kwargs + + def _get_embedding_kwargs( + self, + model_name: OpenAIModelName, + **kwargs, + ) -> dict: + """Get kwargs for embedding API call. + + Args: + model: The model to use. + kwargs: Keyword arguments to override the default values. + + Returns: + The kwargs for the embedding API call. + + """ + embedding_kwargs = { + "model": model_name, + **kwargs, + **self._credentials.unmasked(), + } + + return embedding_kwargs + + def __repr__(self): + return "OpenAIProvider()" + + +async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding: + """Embed text using the OpenAI API. + + Args: + text str: The text to embed. + model_name str: The name of the model to use. + + Returns: + str: The embedding. + """ + return await openai.Embedding.acreate( + input=[text], + **kwargs, + ) + + +async def _create_completion( + messages: list[LanguageModelMessage], *_, **kwargs +) -> openai.Completion: + """Create a chat completion using the OpenAI API. + + Args: + messages: The prompt to use. + + Returns: + The completion. + + """ + messages = [message.dict() for message in messages] + if "functions" in kwargs: + kwargs["functions"] = [function.json_schema for function in kwargs["functions"]] + return await openai.ChatCompletion.acreate( + messages=messages, + **kwargs, + ) + + +_T = TypeVar("_T") +_P = ParamSpec("_P") + + +class _OpenAIRetryHandler: + """Retry Handler for OpenAI API call. + + Args: + num_retries int: Number of retries. Defaults to 10. + backoff_base float: Base for exponential backoff. Defaults to 2. + warn_user bool: Whether to warn the user. Defaults to True. + """ + + _retry_limit_msg = "Error: Reached rate limit, passing..." + _api_key_error_msg = ( + "Please double check that you have setup a PAID OpenAI API Account. You can " + "read more here: https://docs.agpt.co/setup/#getting-an-api-key" + ) + _backoff_msg = "Error: API Bad gateway. Waiting {backoff} seconds..." + + def __init__( + self, + logger: logging.Logger, + num_retries: int = 10, + backoff_base: float = 2.0, + warn_user: bool = True, + ): + self._logger = logger + self._num_retries = num_retries + self._backoff_base = backoff_base + self._warn_user = warn_user + + def _log_rate_limit_error(self) -> None: + self._logger.debug(self._retry_limit_msg) + if self._warn_user: + self._logger.warning(self._api_key_error_msg) + self._warn_user = False + + def _backoff(self, attempt: int) -> None: + backoff = self._backoff_base ** (attempt + 2) + self._logger.debug(self._backoff_msg.format(backoff=backoff)) + time.sleep(backoff) + + def __call__(self, func: Callable[_P, _T]) -> Callable[_P, _T]: + @functools.wraps(func) + async def _wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T: + num_attempts = self._num_retries + 1 # +1 for the first attempt + for attempt in range(1, num_attempts + 1): + try: + return await func(*args, **kwargs) + + except RateLimitError: + if attempt == num_attempts: + raise + self._log_rate_limit_error() + + except APIError as e: + if (e.http_status != 502) or (attempt == num_attempts): + raise + + self._backoff(attempt) + + return _wrapped diff --git a/autogpt/core/resource/model_providers/schema.py b/autogpt/core/resource/model_providers/schema.py new file mode 100644 index 00000000..266b4c81 --- /dev/null +++ b/autogpt/core/resource/model_providers/schema.py @@ -0,0 +1,219 @@ +import abc +import enum +from typing import Callable, ClassVar + +from pydantic import BaseModel, Field, SecretStr, validator + +from autogpt.core.configuration import UserConfigurable +from autogpt.core.resource.schema import ( + Embedding, + ProviderBudget, + ProviderCredentials, + ProviderSettings, + ProviderUsage, + ResourceType, +) + + +class ModelProviderService(str, enum.Enum): + """A ModelService describes what kind of service the model provides.""" + + EMBEDDING: str = "embedding" + LANGUAGE: str = "language" + TEXT: str = "text" + + +class ModelProviderName(str, enum.Enum): + OPENAI: str = "openai" + + +class MessageRole(str, enum.Enum): + USER = "user" + SYSTEM = "system" + ASSISTANT = "assistant" + + +class LanguageModelMessage(BaseModel): + role: MessageRole + content: str + + +class LanguageModelFunction(BaseModel): + json_schema: dict + + +class ModelProviderModelInfo(BaseModel): + """Struct for model information. + + Would be lovely to eventually get this directly from APIs, but needs to be + scraped from websites for now. + + """ + + name: str + service: ModelProviderService + provider_name: ModelProviderName + prompt_token_cost: float = 0.0 + completion_token_cost: float = 0.0 + + +class ModelProviderModelResponse(BaseModel): + """Standard response struct for a response from a model.""" + + prompt_tokens_used: int + completion_tokens_used: int + model_info: ModelProviderModelInfo + + +class ModelProviderCredentials(ProviderCredentials): + """Credentials for a model provider.""" + + api_key: SecretStr | None = UserConfigurable(default=None) + api_type: SecretStr | None = UserConfigurable(default=None) + api_base: SecretStr | None = UserConfigurable(default=None) + api_version: SecretStr | None = UserConfigurable(default=None) + deployment_id: SecretStr | None = UserConfigurable(default=None) + + def unmasked(self) -> dict: + return unmask(self) + + class Config: + extra = "ignore" + + +def unmask(model: BaseModel): + unmasked_fields = {} + for field_name, field in model.__fields__.items(): + value = getattr(model, field_name) + if isinstance(value, SecretStr): + unmasked_fields[field_name] = value.get_secret_value() + else: + unmasked_fields[field_name] = value + return unmasked_fields + + +class ModelProviderUsage(ProviderUsage): + """Usage for a particular model from a model provider.""" + + completion_tokens: int = 0 + prompt_tokens: int = 0 + total_tokens: int = 0 + + def update_usage( + self, + model_response: ModelProviderModelResponse, + ) -> None: + self.completion_tokens += model_response.completion_tokens_used + self.prompt_tokens += model_response.prompt_tokens_used + self.total_tokens += ( + model_response.completion_tokens_used + model_response.prompt_tokens_used + ) + + +class ModelProviderBudget(ProviderBudget): + total_budget: float = UserConfigurable() + total_cost: float + remaining_budget: float + usage: ModelProviderUsage + + def update_usage_and_cost( + self, + model_response: ModelProviderModelResponse, + ) -> None: + """Update the usage and cost of the provider.""" + model_info = model_response.model_info + self.usage.update_usage(model_response) + incremental_cost = ( + model_response.completion_tokens_used * model_info.completion_token_cost + + model_response.prompt_tokens_used * model_info.prompt_token_cost + ) / 1000.0 + self.total_cost += incremental_cost + self.remaining_budget -= incremental_cost + + +class ModelProviderSettings(ProviderSettings): + resource_type = ResourceType.MODEL + credentials: ModelProviderCredentials + budget: ModelProviderBudget + + +class ModelProvider(abc.ABC): + """A ModelProvider abstracts the details of a particular provider of models.""" + + defaults: ClassVar[ModelProviderSettings] + + @abc.abstractmethod + def get_token_limit(self, model_name: str) -> int: + ... + + @abc.abstractmethod + def get_remaining_budget(self) -> float: + ... + + +#################### +# Embedding Models # +#################### + + +class EmbeddingModelProviderModelInfo(ModelProviderModelInfo): + """Struct for embedding model information.""" + + model_service = ModelProviderService.EMBEDDING + embedding_dimensions: int + + +class EmbeddingModelProviderModelResponse(ModelProviderModelResponse): + """Standard response struct for a response from an embedding model.""" + + embedding: Embedding = Field(default_factory=list) + + @classmethod + @validator("completion_tokens_used") + def _verify_no_completion_tokens_used(cls, v): + if v > 0: + raise ValueError("Embeddings should not have completion tokens used.") + return v + + +class EmbeddingModelProvider(ModelProvider): + @abc.abstractmethod + async def create_embedding( + self, + text: str, + model_name: str, + embedding_parser: Callable[[Embedding], Embedding], + **kwargs, + ) -> EmbeddingModelProviderModelResponse: + ... + + +################### +# Language Models # +################### + + +class LanguageModelProviderModelInfo(ModelProviderModelInfo): + """Struct for language model information.""" + + model_service = ModelProviderService.LANGUAGE + max_tokens: int + + +class LanguageModelProviderModelResponse(ModelProviderModelResponse): + """Standard response struct for a response from a language model.""" + + content: dict = None + + +class LanguageModelProvider(ModelProvider): + @abc.abstractmethod + async def create_language_completion( + self, + model_prompt: list[LanguageModelMessage], + functions: list[LanguageModelFunction], + model_name: str, + completion_parser: Callable[[dict], dict], + **kwargs, + ) -> LanguageModelProviderModelResponse: + ... diff --git a/autogpt/core/resource/schema.py b/autogpt/core/resource/schema.py new file mode 100644 index 00000000..0f97aedb --- /dev/null +++ b/autogpt/core/resource/schema.py @@ -0,0 +1,57 @@ +import abc +import enum + +from pydantic import SecretBytes, SecretField, SecretStr + +from autogpt.core.configuration import ( + SystemConfiguration, + SystemSettings, + UserConfigurable, +) + + +class ResourceType(str, enum.Enum): + """An enumeration of resource types.""" + + MODEL = "model" + MEMORY = "memory" + + +class ProviderUsage(SystemConfiguration, abc.ABC): + @abc.abstractmethod + def update_usage(self, *args, **kwargs) -> None: + """Update the usage of the resource.""" + ... + + +class ProviderBudget(SystemConfiguration): + total_budget: float = UserConfigurable() + total_cost: float + remaining_budget: float + usage: ProviderUsage + + @abc.abstractmethod + def update_usage_and_cost(self, *args, **kwargs) -> None: + """Update the usage and cost of the resource.""" + ... + + +class ProviderCredentials(SystemConfiguration): + """Struct for credentials.""" + + class Config: + json_encoders = { + SecretStr: lambda v: v.get_secret_value() if v else None, + SecretBytes: lambda v: v.get_secret_value() if v else None, + SecretField: lambda v: v.get_secret_value() if v else None, + } + + +class ProviderSettings(SystemSettings): + resource_type: ResourceType + credentials: ProviderCredentials | None = None + budget: ProviderBudget | None = None + + +# Used both by model providers and memory providers +Embedding = list[float] diff --git a/autogpt/core/runner/__init__.py b/autogpt/core/runner/__init__.py new file mode 100644 index 00000000..25c7b650 --- /dev/null +++ b/autogpt/core/runner/__init__.py @@ -0,0 +1,3 @@ +""" +This module contains the runner for the v2 agent server and client. +""" diff --git a/autogpt/core/runner/cli_app/__init__.py b/autogpt/core/runner/cli_app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/runner/cli_app/cli.py b/autogpt/core/runner/cli_app/cli.py new file mode 100644 index 00000000..8d33c560 --- /dev/null +++ b/autogpt/core/runner/cli_app/cli.py @@ -0,0 +1,49 @@ +from pathlib import Path + +import click +import yaml + +from autogpt.core.runner.cli_app.main import run_auto_gpt +from autogpt.core.runner.client_lib.shared_click_commands import ( + DEFAULT_SETTINGS_FILE, + make_settings, + status, +) +from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions + + +@click.group() +def autogpt(): + """Temporary command group for v2 commands.""" + pass + + +autogpt.add_command(make_settings) +autogpt.add_command(status) + + +@autogpt.command() +@click.option( + "--settings-file", + type=click.Path(), + default=DEFAULT_SETTINGS_FILE, +) +@click.option( + "--pdb", + is_flag=True, + help="Drop into a debugger if an error is raised.", +) +@coroutine +async def run(settings_file: str, pdb: bool) -> None: + """Run the Auto-GPT agent.""" + click.echo("Running Auto-GPT agent...") + settings_file = Path(settings_file) + settings = {} + if settings_file.exists(): + settings = yaml.safe_load(settings_file.read_text()) + main = handle_exceptions(run_auto_gpt, with_debugger=pdb) + await main(settings) + + +if __name__ == "__main__": + autogpt() diff --git a/autogpt/core/runner/cli_app/main.py b/autogpt/core/runner/cli_app/main.py new file mode 100644 index 00000000..a8ce6d7f --- /dev/null +++ b/autogpt/core/runner/cli_app/main.py @@ -0,0 +1,108 @@ +import click + +from autogpt.core.agent import AgentSettings, SimpleAgent +from autogpt.core.runner.client_lib.logging import get_client_logger + + +async def run_auto_gpt(user_configuration: dict): + """Run the Auto-GPT CLI client.""" + + client_logger = get_client_logger() + client_logger.debug("Getting agent settings") + + agent_workspace = ( + user_configuration.get("workspace", {}).get("configuration", {}).get("root", "") + ) + + if not agent_workspace: # We don't have an agent yet. + ################# + # Bootstrapping # + ################# + # Step 1. Collate the user's settings with the default system settings. + agent_settings: AgentSettings = SimpleAgent.compile_settings( + client_logger, + user_configuration, + ) + + # Step 2. Get a name and goals for the agent. + # First we need to figure out what the user wants to do with the agent. + # We'll do this by asking the user for a prompt. + user_objective = click.prompt("What do you want Auto-GPT to do?") + # Ask a language model to determine a name and goals for a suitable agent. + name_and_goals = await SimpleAgent.determine_agent_name_and_goals( + user_objective, + agent_settings, + client_logger, + ) + print(parse_agent_name_and_goals(name_and_goals)) + # Finally, update the agent settings with the name and goals. + agent_settings.update_agent_name_and_goals(name_and_goals) + + # Step 3. Provision the agent. + agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger) + print("agent is provisioned") + + # launch agent interaction loop + agent = SimpleAgent.from_workspace( + agent_workspace, + client_logger, + ) + print("agent is loaded") + + plan = await agent.build_initial_plan() + print(parse_agent_plan(plan)) + + while True: + current_task, next_ability = await agent.determine_next_ability(plan) + print(parse_next_ability(current_task, next_ability)) + user_input = click.prompt( + "Should the agent proceed with this ability?", + default="y", + ) + ability_result = await agent.execute_next_ability(user_input) + print(parse_ability_result(ability_result)) + + +def parse_agent_name_and_goals(name_and_goals: dict) -> str: + parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n" + parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n" + parsed_response += "Agent Goals:\n" + for i, goal in enumerate(name_and_goals["agent_goals"]): + parsed_response += f"{i+1}. {goal}\n" + return parsed_response + + +def parse_agent_plan(plan: dict) -> str: + parsed_response = f"Agent Plan:\n" + for i, task in enumerate(plan["task_list"]): + parsed_response += f"{i+1}. {task['objective']}\n" + parsed_response += f"Task type: {task['type']} " + parsed_response += f"Priority: {task['priority']}\n" + parsed_response += f"Ready Criteria:\n" + for j, criteria in enumerate(task["ready_criteria"]): + parsed_response += f" {j+1}. {criteria}\n" + parsed_response += f"Acceptance Criteria:\n" + for j, criteria in enumerate(task["acceptance_criteria"]): + parsed_response += f" {j+1}. {criteria}\n" + parsed_response += "\n" + + return parsed_response + + +def parse_next_ability(current_task, next_ability: dict) -> str: + parsed_response = f"Current Task: {current_task.objective}\n" + ability_args = ", ".join( + f"{k}={v}" for k, v in next_ability["ability_arguments"].items() + ) + parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n" + parsed_response += f"Motivation: {next_ability['motivation']}\n" + parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n" + parsed_response += f"Reasoning: {next_ability['reasoning']}\n" + return parsed_response + + +def parse_ability_result(ability_result) -> str: + parsed_response = f"Ability Result: {ability_result['success']}\n" + parsed_response += f"Message: {ability_result['message']}\n" + parsed_response += f"Data: {ability_result['data']}\n" + return parsed_response diff --git a/autogpt/core/runner/cli_web_app/__init__.py b/autogpt/core/runner/cli_web_app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/runner/cli_web_app/cli.py b/autogpt/core/runner/cli_web_app/cli.py new file mode 100644 index 00000000..6600b8e1 --- /dev/null +++ b/autogpt/core/runner/cli_web_app/cli.py @@ -0,0 +1,101 @@ +import contextlib +import pathlib +import shlex +import subprocess +import sys +import time + +import click +import requests +import uvicorn +import yaml + +from autogpt.core.runner.client_lib.shared_click_commands import ( + DEFAULT_SETTINGS_FILE, + make_settings, + status, +) +from autogpt.core.runner.client_lib.utils import coroutine + + +@click.group() +def autogpt(): + """Temporary command group for v2 commands.""" + pass + + +autogpt.add_command(make_settings) +autogpt.add_command(status) + + +@autogpt.command() +@click.option( + "host", + "--host", + default="localhost", + help="The host for the webserver.", + type=click.STRING, +) +@click.option( + "port", + "--port", + default=8080, + help="The port of the webserver.", + type=click.INT, +) +def server(host: str, port: int) -> None: + """Run the Auto-GPT runner httpserver.""" + click.echo("Running Auto-GPT runner httpserver...") + uvicorn.run( + "autogpt.core.runner.cli_web_app.server.api:app", + workers=1, + host=host, + port=port, + reload=True, + ) + + +@autogpt.command() +@click.option( + "--settings-file", + type=click.Path(), + default=DEFAULT_SETTINGS_FILE, +) +@coroutine +async def client(settings_file) -> None: + """Run the Auto-GPT runner client.""" + settings_file = pathlib.Path(settings_file) + settings = {} + if settings_file.exists(): + settings = yaml.safe_load(settings_file.read_text()) + + from autogpt.core.runner.cli_web_app.client.client import run + + with autogpt_server(): + run() + + +@contextlib.contextmanager +def autogpt_server(): + host = "localhost" + port = 8080 + cmd = shlex.split( + f"{sys.executable} autogpt/core/runner/cli_web_app/cli.py server --host {host} --port {port}" + ) + server_process = subprocess.Popen( + args=cmd, + ) + started = False + + while not started: + try: + requests.get(f"http://{host}:{port}") + started = True + except requests.exceptions.ConnectionError: + time.sleep(0.2) + yield server_process + server_process.terminate() + + +if __name__ == "__main__": + autogpt() diff --git a/autogpt/core/runner/cli_web_app/client/__init__.py b/autogpt/core/runner/cli_web_app/client/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/runner/cli_web_app/client/client.py b/autogpt/core/runner/cli_web_app/client/client.py new file mode 100644 index 00000000..346203f7 --- /dev/null +++ b/autogpt/core/runner/cli_web_app/client/client.py @@ -0,0 +1,16 @@ +import json + +import requests + + +def run(): + body = json.dumps( + {"ai_name": "HelloBot", "ai_role": "test", "ai_goals": ["goal1", "goal2"]} + ) + + header = {"Content-Type": "application/json", "openai_api_key": "asdf"} + print("Sending: ", header, body) + response = requests.post( + "http://localhost:8080/api/v1/agents", data=body, headers=header + ) + print(response.content.decode("utf-8")) diff --git a/autogpt/core/runner/cli_web_app/server/__init__.py b/autogpt/core/runner/cli_web_app/server/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/runner/cli_web_app/server/api.py b/autogpt/core/runner/cli_web_app/server/api.py new file mode 100644 index 00000000..01c50b06 --- /dev/null +++ b/autogpt/core/runner/cli_web_app/server/api.py @@ -0,0 +1,48 @@ +import uuid + +from fastapi import APIRouter, FastAPI, Request + +from autogpt.core.runner.cli_web_app.server.schema import InteractRequestBody + +router = APIRouter() + + +@router.post("/agents") +async def create_agent(request: Request): + """Create a new agent.""" + agent_id = uuid.uuid4().hex + return {"agent_id": agent_id} + + +@router.post("/agents/{agent_id}") +async def interact(request: Request, agent_id: str, body: InteractRequestBody): + """Interact with an agent.""" + + # check headers + + # check if agent_id exists + + # get agent object from somewhere, e.g. a database/disk/global dict + + # continue agent interaction with user input + + return { + "thoughts": { + "thoughts": { + "text": "text", + "reasoning": "reasoning", + "plan": "plan", + "criticism": "criticism", + "speak": "speak", + }, + "commands": { + "name": "name", + "args": {"arg_1": "value_1", "arg_2": "value_2"}, + }, + }, + "messages": ["message1", agent_id], + } + + +app = FastAPI() +app.include_router(router, prefix="/api/v1") diff --git a/autogpt/core/runner/cli_web_app/server/schema.py b/autogpt/core/runner/cli_web_app/server/schema.py new file mode 100644 index 00000000..272fbc78 --- /dev/null +++ b/autogpt/core/runner/cli_web_app/server/schema.py @@ -0,0 +1,36 @@ +from uuid import UUID + +from pydantic import BaseModel, validator + + +class AgentInfo(BaseModel): + id: UUID = None + objective: str = "" + name: str = "" + role: str = "" + goals: list[str] = [] + + +class AgentConfiguration(BaseModel): + """Configuration for creation of a new agent.""" + + # We'll want to get this schema from the configuration, so it needs to be dynamic. + user_configuration: dict + agent_goals: AgentInfo + + @validator("agent_goals") + def only_objective_or_name_role_goals(cls, agent_goals): + goals_specification = [agent_goals.name, agent_goals.role, agent_goals.goals] + if agent_goals.objective and any(goals_specification): + raise ValueError("Cannot specify both objective and name, role, or goals") + if not agent_goals.objective and not all(goals_specification): + raise ValueError("Must specify either objective or name, role, and goals") + + +class InteractRequestBody(BaseModel): + user_input: str = "" + + +class InteractResponseBody(BaseModel): + thoughts: dict[str, str] # TBD + messages: list[str] # for example diff --git a/autogpt/core/runner/cli_web_app/server/services/__init__.py b/autogpt/core/runner/cli_web_app/server/services/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/runner/cli_web_app/server/services/users.py b/autogpt/core/runner/cli_web_app/server/services/users.py new file mode 100644 index 00000000..5192dcdb --- /dev/null +++ b/autogpt/core/runner/cli_web_app/server/services/users.py @@ -0,0 +1,20 @@ +import uuid + +from fastapi import Request + + +class UserService: + def __init__(self): + self.users = {} + + def get_user_id(self, request: Request) -> uuid.UUID: + # TODO: something real. I don't know how this works. + hostname = request.client.host + port = request.client.port + user = f"{hostname}:{port}" + if user not in self.users: + self.users[user] = uuid.uuid4() + return self.users[user] + + +USER_SERVICE = UserService() diff --git a/autogpt/core/runner/client_lib/__init__.py b/autogpt/core/runner/client_lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/core/runner/client_lib/logging.py b/autogpt/core/runner/client_lib/logging.py new file mode 100644 index 00000000..04f0a9c4 --- /dev/null +++ b/autogpt/core/runner/client_lib/logging.py @@ -0,0 +1,20 @@ +import logging + + +def get_client_logger(): + # Configure logging before we do anything else. + # Application logs need a place to live. + client_logger = logging.getLogger("autogpt_client_application") + client_logger.setLevel(logging.DEBUG) + + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + ch.setFormatter(formatter) + + client_logger.addHandler(ch) + + return client_logger diff --git a/autogpt/core/runner/client_lib/settings.py b/autogpt/core/runner/client_lib/settings.py new file mode 100644 index 00000000..9c998302 --- /dev/null +++ b/autogpt/core/runner/client_lib/settings.py @@ -0,0 +1,14 @@ +from pathlib import Path + +import yaml + +from autogpt.core.agent import SimpleAgent + + +def make_user_configuration(settings_file_path: Path): + user_configuration = SimpleAgent.build_user_configuration() + + settings_file_path.parent.mkdir(parents=True, exist_ok=True) + print("Writing settings to", settings_file_path) + with settings_file_path.open("w") as f: + yaml.safe_dump(user_configuration, f) diff --git a/autogpt/core/runner/client_lib/shared_click_commands.py b/autogpt/core/runner/client_lib/shared_click_commands.py new file mode 100644 index 00000000..5be52acb --- /dev/null +++ b/autogpt/core/runner/client_lib/shared_click_commands.py @@ -0,0 +1,19 @@ +import pathlib + +import click + +DEFAULT_SETTINGS_FILE = str( + pathlib.Path("~/auto-gpt/default_agent_settings.yml").expanduser() +) + + +@click.command() +@click.option( + "--settings-file", + type=click.Path(), + default=DEFAULT_SETTINGS_FILE, +) +def make_settings(settings_file: str) -> None: + from autogpt.core.runner.client_lib.settings import make_user_configuration + + make_user_configuration(pathlib.Path(settings_file)) diff --git a/autogpt/core/runner/client_lib/utils.py b/autogpt/core/runner/client_lib/utils.py new file mode 100644 index 00000000..39b5135f --- /dev/null +++ b/autogpt/core/runner/client_lib/utils.py @@ -0,0 +1,61 @@ +import asyncio +import functools +from bdb import BdbQuit +from typing import Callable, ParamSpec, TypeVar + +import click + +P = ParamSpec("P") +T = TypeVar("T") + + +def handle_exceptions( + application_main: Callable[P, T], + with_debugger: bool, +) -> Callable[P, T]: + """Wraps a function so that it drops a user into a debugger if it raises an error. + + This is intended to be used as a wrapper for the main function of a CLI application. + It will catch all errors and drop a user into a debugger if the error is not a + KeyboardInterrupt. If the error is a KeyboardInterrupt, it will raise the error. + If the error is not a KeyboardInterrupt, it will log the error and drop a user into a + debugger if with_debugger is True. If with_debugger is False, it will raise the error. + + Parameters + ---------- + application_main + The function to wrap. + with_debugger + Whether to drop a user into a debugger if an error is raised. + + Returns + ------- + Callable + The wrapped function. + + """ + + @functools.wraps(application_main) + async def wrapped(*args: P.args, **kwargs: P.kwargs) -> T: + try: + return await application_main(*args, **kwargs) + except (BdbQuit, KeyboardInterrupt, click.Abort): + raise + except Exception as e: + if with_debugger: + print(f"Uncaught exception {e}") + import pdb + + pdb.post_mortem() + else: + raise + + return wrapped + + +def coroutine(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + return asyncio.run(f(*args, **kwargs)) + + return wrapper diff --git a/autogpt/core/workspace/__init__.py b/autogpt/core/workspace/__init__.py new file mode 100644 index 00000000..f474d4fa --- /dev/null +++ b/autogpt/core/workspace/__init__.py @@ -0,0 +1,3 @@ +"""The workspace is the central hub for the Agent's on disk resources.""" +from autogpt.core.workspace.base import Workspace +from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings diff --git a/autogpt/core/workspace/base.py b/autogpt/core/workspace/base.py new file mode 100644 index 00000000..b011056c --- /dev/null +++ b/autogpt/core/workspace/base.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import abc +import logging +import typing +from pathlib import Path + +if typing.TYPE_CHECKING: + from autogpt.core.configuration import AgentConfiguration + + +class Workspace(abc.ABC): + """The workspace is the root directory for all generated files. + + The workspace is responsible for creating the root directory and + providing a method for getting the full path to an item in the + workspace. + + """ + + @property + @abc.abstractmethod + def root(self) -> Path: + """The root directory of the workspace.""" + ... + + @property + @abc.abstractmethod + def restrict_to_workspace(self) -> bool: + """Whether to restrict generated paths to the workspace.""" + ... + + @staticmethod + @abc.abstractmethod + def setup_workspace( + configuration: AgentConfiguration, logger: logging.Logger + ) -> Path: + """Create the workspace root directory and set up all initial content. + + Parameters + ---------- + configuration + The Agent's configuration. + logger + The Agent's logger. + + Returns + ------- + Path + The path to the workspace root directory. + + """ + ... + + @abc.abstractmethod + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the workspace. + + Parameters + ---------- + relative_path + The path to the item relative to the workspace root. + + Returns + ------- + Path + The full path to the item. + + """ + ... diff --git a/autogpt/core/workspace/simple.py b/autogpt/core/workspace/simple.py new file mode 100644 index 00000000..45e1f9dd --- /dev/null +++ b/autogpt/core/workspace/simple.py @@ -0,0 +1,193 @@ +import json +import logging +import typing +from pathlib import Path + +from pydantic import SecretField + +from autogpt.core.configuration import ( + Configurable, + SystemConfiguration, + SystemSettings, + UserConfigurable, +) +from autogpt.core.workspace.base import Workspace + +if typing.TYPE_CHECKING: + # Cyclic import + from autogpt.core.agent.simple import AgentSettings + + +class WorkspaceConfiguration(SystemConfiguration): + root: str + parent: str = UserConfigurable() + restrict_to_workspace: bool = UserConfigurable() + + +class WorkspaceSettings(SystemSettings): + configuration: WorkspaceConfiguration + + +class SimpleWorkspace(Configurable, Workspace): + default_settings = WorkspaceSettings( + name="workspace", + description="The workspace is the root directory for all agent activity.", + configuration=WorkspaceConfiguration( + root="", + parent="~/auto-gpt/agents", + restrict_to_workspace=True, + ), + ) + + NULL_BYTES = ["\0", "\000", "\x00", "\u0000", "%00"] + + def __init__( + self, + settings: WorkspaceSettings, + logger: logging.Logger, + ): + self._configuration = settings.configuration + self._logger = logger.getChild("workspace") + + @property + def root(self) -> Path: + return Path(self._configuration.root) + + @property + def debug_log_path(self) -> Path: + return self.root / "logs" / "debug.log" + + @property + def cycle_log_path(self) -> Path: + return self.root / "logs" / "cycle.log" + + @property + def configuration_path(self) -> Path: + return self.root / "configuration.yml" + + @property + def restrict_to_workspace(self) -> bool: + return self._configuration.restrict_to_workspace + + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the workspace. + + Parameters + ---------- + relative_path + The relative path to resolve in the workspace. + + Returns + ------- + Path + The resolved path relative to the workspace. + + """ + return self._sanitize_path( + relative_path, + root=self.root, + restrict_to_root=self.restrict_to_workspace, + ) + + def _sanitize_path( + self, + relative_path: str | Path, + root: str | Path = None, + restrict_to_root: bool = True, + ) -> Path: + """Resolve the relative path within the given root if possible. + + Parameters + ---------- + relative_path + The relative path to resolve. + root + The root path to resolve the relative path within. + restrict_to_root + Whether to restrict the path to the root. + + Returns + ------- + Path + The resolved path. + + Raises + ------ + ValueError + If the path is absolute and a root is provided. + ValueError + If the path is outside the root and the root is restricted. + + """ + + # Posix systems disallow null bytes in paths. Windows is agnostic about it. + # Do an explicit check here for all sorts of null byte representations. + + for null_byte in self.NULL_BYTES: + if null_byte in str(relative_path) or null_byte in str(root): + raise ValueError("embedded null byte") + + if root is None: + return Path(relative_path).resolve() + + self._logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") + root, relative_path = Path(root).resolve(), Path(relative_path) + self._logger.debug(f"Resolved root as '{root}'") + + if relative_path.is_absolute(): + raise ValueError( + f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." + ) + full_path = root.joinpath(relative_path).resolve() + + self._logger.debug(f"Joined paths as '{full_path}'") + + if restrict_to_root and not full_path.is_relative_to(root): + raise ValueError( + f"Attempted to access path '{full_path}' outside of workspace '{root}'." + ) + + return full_path + + ################################### + # Factory methods for agent setup # + ################################### + + @staticmethod + def setup_workspace(settings: "AgentSettings", logger: logging.Logger) -> Path: + workspace_parent = settings.workspace.configuration.parent + workspace_parent = Path(workspace_parent).expanduser().resolve() + workspace_parent.mkdir(parents=True, exist_ok=True) + + agent_name = settings.agent.name + + workspace_root = workspace_parent / agent_name + workspace_root.mkdir(parents=True, exist_ok=True) + + settings.workspace.configuration.root = str(workspace_root) + + with (workspace_root / "agent_settings.json").open("w") as f: + settings_json = settings.json( + encoder=lambda x: x.get_secret_value() + if isinstance(x, SecretField) + else x, + ) + f.write(settings_json) + + # TODO: What are all the kinds of logs we want here? + log_path = workspace_root / "logs" + log_path.mkdir(parents=True, exist_ok=True) + (log_path / "debug.log").touch() + (log_path / "cycle.log").touch() + + return workspace_root + + @staticmethod + def load_agent_settings(workspace_root: Path) -> "AgentSettings": + # Cyclic import + from autogpt.core.agent.simple import AgentSettings + + with (workspace_root / "agent_settings.json").open("r") as f: + agent_settings = json.load(f) + + return AgentSettings.parse_obj(agent_settings) diff --git a/autogpt/plugins/plugin_config.py b/autogpt/plugins/plugin_config.py index 53a83b16..bdf77d83 100644 --- a/autogpt/plugins/plugin_config.py +++ b/autogpt/plugins/plugin_config.py @@ -1,14 +1,11 @@ from typing import Any +from pydantic import BaseModel -class PluginConfig: + +class PluginConfig(BaseModel): """Class for holding configuration of a single plugin""" - def __init__(self, name: str, enabled: bool = False, config: dict[str, Any] = None): - self.name = name - self.enabled = enabled - # Arbitray config options for this plugin. API keys or plugin-specific options live here. - self.config = config or {} - - def __repr__(self): - return f"PluginConfig('{self.name}', {self.enabled}, {str(self.config)}" + name: str + enabled: bool = False + config: dict[str, Any] = None diff --git a/autogpt/plugins/plugins_config.py b/autogpt/plugins/plugins_config.py index dedffd21..7fcb5197 100644 --- a/autogpt/plugins/plugins_config.py +++ b/autogpt/plugins/plugins_config.py @@ -1,36 +1,24 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, Union +from typing import TYPE_CHECKING, Union import yaml if TYPE_CHECKING: from autogpt.config import Config +from pydantic import BaseModel + from autogpt.logs import logger from autogpt.plugins.plugin_config import PluginConfig -class PluginsConfig: +class PluginsConfig(BaseModel): """Class for holding configuration of all plugins""" plugins: dict[str, PluginConfig] - def __init__(self, plugins_config: dict[str, Any]): - self.plugins = {} - for name, plugin in plugins_config.items(): - if type(plugin) == dict: - self.plugins[name] = PluginConfig( - name, - plugin.get("enabled", False), - plugin.get("config", {}), - ) - elif type(plugin) == PluginConfig: - self.plugins[name] = plugin - else: - raise ValueError(f"Invalid plugin config data type: {type(plugin)}") - def __repr__(self): return f"PluginsConfig({self.plugins})" @@ -43,7 +31,7 @@ class PluginsConfig: @classmethod def load_config(cls, global_config: Config) -> "PluginsConfig": - empty_config = cls({}) + empty_config = cls(plugins={}) try: config_data = cls.deserialize_config_file(global_config=global_config) @@ -52,7 +40,7 @@ class PluginsConfig: f"Expected plugins config to be a dict, got {type(config_data)}, continuing without plugins" ) return empty_config - return cls(config_data) + return cls(plugins=config_data) except BaseException as e: logger.error( @@ -61,14 +49,28 @@ class PluginsConfig: return empty_config @classmethod - def deserialize_config_file(cls, global_config: Config) -> dict[str, Any]: + def deserialize_config_file(cls, global_config: Config) -> dict[str, PluginConfig]: plugins_config_path = global_config.plugins_config_file if not os.path.exists(plugins_config_path): logger.warn("plugins_config.yaml does not exist, creating base config.") cls.create_empty_plugins_config(global_config=global_config) with open(plugins_config_path, "r") as f: - return yaml.load(f, Loader=yaml.FullLoader) + plugins_config = yaml.load(f, Loader=yaml.FullLoader) + + plugins = {} + for name, plugin in plugins_config.items(): + if type(plugin) == dict: + plugins[name] = PluginConfig( + name=name, + enabled=plugin.get("enabled", False), + config=plugin.get("config", {}), + ) + elif type(plugin) == PluginConfig: + plugins[name] = plugin + else: + raise ValueError(f"Invalid plugin config data type: {type(plugin)}") + return plugins @staticmethod def create_empty_plugins_config(global_config: Config): diff --git a/requirements.txt b/requirements.txt index 28aaf32f..30ae8399 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,6 +30,10 @@ en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_ prompt_toolkit>=3.0.38 pydantic +# web server +fastapi +uvicorn + ##Dev coverage flake8 From 0c8288b5e141a4aca1059312868a627230443159 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 5 Jul 2023 22:36:17 +0200 Subject: [PATCH 11/34] Update OpenAI model ID mappings to `-0613` (#4889) --- autogpt/llm/providers/openai.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autogpt/llm/providers/openai.py b/autogpt/llm/providers/openai.py index 397b4791..baf7ab87 100644 --- a/autogpt/llm/providers/openai.py +++ b/autogpt/llm/providers/openai.py @@ -73,10 +73,10 @@ OPEN_AI_CHAT_MODELS = { } # Set aliases for rolling model IDs chat_model_mapping = { - "gpt-3.5-turbo": "gpt-3.5-turbo-0301", + "gpt-3.5-turbo": "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613", - "gpt-4": "gpt-4-0314", - "gpt-4-32k": "gpt-4-32k-0314", + "gpt-4": "gpt-4-0613", + "gpt-4-32k": "gpt-4-32k-0613", } for alias, target in chat_model_mapping.items(): alias_info = ChatModelInfo(**OPEN_AI_CHAT_MODELS[target].__dict__) From 9cf35010c6bae725f23b6cd608768d679f5ae4ad Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 6 Jul 2023 01:05:07 +0200 Subject: [PATCH 12/34] Fix plugin loading issues (#4888) * Fix Config model initialization * Fix basedir determination in install_plugin_dependencies * Add logging to install_plugin_dependencies() --------- Co-authored-by: collijk --- autogpt/config/config.py | 58 +++++++++++++------------------ autogpt/plugins/plugins_config.py | 55 +++++++++++++++++++---------- scripts/install_plugin_deps.py | 44 ++++++++++++++++------- tests/conftest.py | 6 +++- tests/unit/test_plugins.py | 12 +++++-- 5 files changed, 107 insertions(+), 68 deletions(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 1c2084f7..5711764c 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -4,7 +4,7 @@ from __future__ import annotations import contextlib import os import re -from typing import Dict +from typing import Dict, Union import yaml from colorama import Fore @@ -83,18 +83,6 @@ class Config(SystemSettings): plugins: list[str] authorise_key: str - def __init__(self, **kwargs): - super().__init__(**kwargs) - - # Hotfix: Call model_post_init explictly as it doesn't seem to be called for pydantic<2.0.0 - # https://github.com/pydantic/pydantic/issues/1729#issuecomment-1300576214 - self.model_post_init(**kwargs) - - # Executed immediately after init by Pydantic - def model_post_init(self, **kwargs) -> None: - if not self.plugins_config.plugins: - self.plugins_config = PluginsConfig.load_config(self) - class ConfigBuilder(Configurable[Config]): default_plugins_config_file = os.path.join( @@ -213,21 +201,16 @@ class ConfigBuilder(Configurable[Config]): "chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True", } - # Converting to a list from comma-separated string - disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES") - if disabled_command_categories: - config_dict[ - "disabled_command_categories" - ] = disabled_command_categories.split(",") + config_dict["disabled_command_categories"] = _safe_split( + os.getenv("DISABLED_COMMAND_CATEGORIES") + ) - # Converting to a list from comma-separated string - shell_denylist = os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS")) - if shell_denylist: - config_dict["shell_denylist"] = shell_denylist.split(",") - - shell_allowlist = os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS")) - if shell_allowlist: - config_dict["shell_allowlist"] = shell_allowlist.split(",") + config_dict["shell_denylist"] = _safe_split( + os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS")) + ) + config_dict["shell_allowlist"] = _safe_split( + os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS")) + ) config_dict["google_custom_search_engine_id"] = os.getenv( "GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID") @@ -237,13 +220,13 @@ class ConfigBuilder(Configurable[Config]): "ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID") ) - plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS") - if plugins_allowlist: - config_dict["plugins_allowlist"] = plugins_allowlist.split(",") - - plugins_denylist = os.getenv("DENYLISTED_PLUGINS") - if plugins_denylist: - config_dict["plugins_denylist"] = plugins_denylist.split(",") + config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS")) + config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS")) + config_dict["plugins_config"] = PluginsConfig.load_config( + config_dict["plugins_config_file"], + config_dict["plugins_denylist"], + config_dict["plugins_allowlist"], + ) with contextlib.suppress(TypeError): config_dict["image_size"] = int(os.getenv("IMAGE_SIZE")) @@ -325,3 +308,10 @@ def check_openai_api_key(config: Config) -> None: else: print("Invalid OpenAI API key!") exit(1) + + +def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]: + """Split a string by a separator. Return an empty list if the string is None.""" + if s is None: + return [] + return s.split(sep) diff --git a/autogpt/plugins/plugins_config.py b/autogpt/plugins/plugins_config.py index 7fcb5197..13b87130 100644 --- a/autogpt/plugins/plugins_config.py +++ b/autogpt/plugins/plugins_config.py @@ -1,13 +1,9 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Union +from typing import Union import yaml - -if TYPE_CHECKING: - from autogpt.config import Config - from pydantic import BaseModel from autogpt.logs import logger @@ -30,11 +26,20 @@ class PluginsConfig(BaseModel): return plugin_config is not None and plugin_config.enabled @classmethod - def load_config(cls, global_config: Config) -> "PluginsConfig": + def load_config( + cls, + plugins_config_file: str, + plugins_denylist: list[str], + plugins_allowlist: list[str], + ) -> "PluginsConfig": empty_config = cls(plugins={}) try: - config_data = cls.deserialize_config_file(global_config=global_config) + config_data = cls.deserialize_config_file( + plugins_config_file, + plugins_denylist, + plugins_allowlist, + ) if type(config_data) != dict: logger.error( f"Expected plugins config to be a dict, got {type(config_data)}, continuing without plugins" @@ -49,13 +54,21 @@ class PluginsConfig(BaseModel): return empty_config @classmethod - def deserialize_config_file(cls, global_config: Config) -> dict[str, PluginConfig]: - plugins_config_path = global_config.plugins_config_file - if not os.path.exists(plugins_config_path): + def deserialize_config_file( + cls, + plugins_config_file: str, + plugins_denylist: list[str], + plugins_allowlist: list[str], + ) -> dict[str, PluginConfig]: + if not os.path.exists(plugins_config_file): logger.warn("plugins_config.yaml does not exist, creating base config.") - cls.create_empty_plugins_config(global_config=global_config) + cls.create_empty_plugins_config( + plugins_config_file, + plugins_denylist, + plugins_allowlist, + ) - with open(plugins_config_path, "r") as f: + with open(plugins_config_file, "r") as f: plugins_config = yaml.load(f, Loader=yaml.FullLoader) plugins = {} @@ -73,23 +86,27 @@ class PluginsConfig(BaseModel): return plugins @staticmethod - def create_empty_plugins_config(global_config: Config): + def create_empty_plugins_config( + plugins_config_file: str, + plugins_denylist: list[str], + plugins_allowlist: list[str], + ): """Create an empty plugins_config.yaml file. Fill it with values from old env variables.""" base_config = {} - logger.debug(f"Legacy plugin denylist: {global_config.plugins_denylist}") - logger.debug(f"Legacy plugin allowlist: {global_config.plugins_allowlist}") + logger.debug(f"Legacy plugin denylist: {plugins_denylist}") + logger.debug(f"Legacy plugin allowlist: {plugins_allowlist}") # Backwards-compatibility shim - for plugin_name in global_config.plugins_denylist: + for plugin_name in plugins_denylist: base_config[plugin_name] = {"enabled": False, "config": {}} - for plugin_name in global_config.plugins_allowlist: + for plugin_name in plugins_allowlist: base_config[plugin_name] = {"enabled": True, "config": {}} logger.debug(f"Constructed base plugins config: {base_config}") - logger.debug(f"Creating plugin config file {global_config.plugins_config_file}") - with open(global_config.plugins_config_file, "w+") as f: + logger.debug(f"Creating plugin config file {plugins_config_file}") + with open(plugins_config_file, "w+") as f: f.write(yaml.dump(base_config)) return base_config diff --git a/scripts/install_plugin_deps.py b/scripts/install_plugin_deps.py index 00d9f8a3..1cd0bd1a 100644 --- a/scripts/install_plugin_deps.py +++ b/scripts/install_plugin_deps.py @@ -5,6 +5,8 @@ import zipfile from glob import glob from pathlib import Path +from autogpt.logs import logger + def install_plugin_dependencies(): """ @@ -18,28 +20,46 @@ def install_plugin_dependencies(): """ plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins")) + logger.debug(f"Checking for dependencies in zipped plugins...") + # Install zip-based plugins - for plugin in plugins_dir.glob("*.zip"): - with zipfile.ZipFile(str(plugin), "r") as zfile: - try: - basedir = zfile.namelist()[0] - basereqs = os.path.join(basedir, "requirements.txt") - extracted = zfile.extract(basereqs, path=plugins_dir) - subprocess.check_call( - [sys.executable, "-m", "pip", "install", "-r", extracted] - ) - os.remove(extracted) - os.rmdir(os.path.join(plugins_dir, basedir)) - except KeyError: + for plugin_archive in plugins_dir.glob("*.zip"): + logger.debug(f"Checking for requirements in '{plugin_archive}'...") + with zipfile.ZipFile(str(plugin_archive), "r") as zfile: + if not zfile.namelist(): continue + # Assume the first entry in the list will be (in) the lowest common dir + first_entry = zfile.namelist()[0] + basedir = first_entry.rsplit("/", 1)[0] if "/" in first_entry else "" + logger.debug(f"Looking for requirements.txt in '{basedir}'") + + basereqs = os.path.join(basedir, "requirements.txt") + try: + extracted = zfile.extract(basereqs, path=plugins_dir) + except KeyError as e: + logger.debug(e.args[0]) + continue + + logger.debug(f"Installing dependencies from '{basereqs}'...") + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "-r", extracted] + ) + os.remove(extracted) + os.rmdir(os.path.join(plugins_dir, basedir)) + + logger.debug(f"Checking for dependencies in other plugin folders...") + # Install directory-based plugins for requirements_file in glob(f"{plugins_dir}/*/requirements.txt"): + logger.debug(f"Installing dependencies from '{requirements_file}'...") subprocess.check_call( [sys.executable, "-m", "pip", "install", "-r", requirements_file], stdout=subprocess.DEVNULL, ) + logger.debug("Finished installing plugin dependencies") + if __name__ == "__main__": install_plugin_dependencies() diff --git a/tests/conftest.py b/tests/conftest.py index 920fc4e4..f2ca5904 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -59,7 +59,11 @@ def config( # avoid circular dependency from autogpt.plugins.plugins_config import PluginsConfig - config.plugins_config = PluginsConfig.load_config(global_config=config) + config.plugins_config = PluginsConfig.load_config( + plugins_config_file=config.plugins_config_file, + plugins_denylist=config.plugins_denylist, + plugins_allowlist=config.plugins_allowlist, + ) # Do a little setup and teardown since the config object is a singleton mocker.patch.multiple( diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py index 24b7d1e9..981715ac 100644 --- a/tests/unit/test_plugins.py +++ b/tests/unit/test_plugins.py @@ -70,7 +70,11 @@ def test_create_base_config(config: Config): config.plugins_denylist = ["c", "d"] os.remove(config.plugins_config_file) - plugins_config = PluginsConfig.load_config(global_config=config) + plugins_config = PluginsConfig.load_config( + plugins_config_file=config.plugins_config_file, + plugins_denylist=config.plugins_denylist, + plugins_allowlist=config.plugins_allowlist, + ) # Check the structure of the plugins config data assert len(plugins_config.plugins) == 4 @@ -102,7 +106,11 @@ def test_load_config(config: Config): f.write(yaml.dump(test_config)) # Load the config from disk - plugins_config = PluginsConfig.load_config(global_config=config) + plugins_config = PluginsConfig.load_config( + plugins_config_file=config.plugins_config_file, + plugins_denylist=config.plugins_denylist, + plugins_allowlist=config.plugins_allowlist, + ) # Check that the loaded config is equal to the test config assert len(plugins_config.plugins) == 2 From bfb45f2cbd2a2a4581057e8e4079aa01c1a7ecdf Mon Sep 17 00:00:00 2001 From: uta <122957026+uta0x89@users.noreply.github.com> Date: Thu, 6 Jul 2023 08:37:01 +0900 Subject: [PATCH 13/34] Fix errors in Mandatory Tasks of Benchmarks (#4893) Co-authored-by: merwanehamadi --- tests/challenges/basic_abilities/test_write_file.py | 2 ++ tests/challenges/memory/test_memory_challenge_a.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/challenges/basic_abilities/test_write_file.py b/tests/challenges/basic_abilities/test_write_file.py index be1f0a93..2a202ee3 100644 --- a/tests/challenges/basic_abilities/test_write_file.py +++ b/tests/challenges/basic_abilities/test_write_file.py @@ -1,5 +1,6 @@ import pytest +from autogpt.config import Config from autogpt.workspace import Workspace from tests.challenges.challenge_decorator.challenge_decorator import challenge from tests.challenges.utils import get_workspace_path, run_challenge @@ -17,6 +18,7 @@ USER_INPUTS = [ @challenge() def test_write_file( + config: Config, patched_api_requestor: None, monkeypatch: pytest.MonkeyPatch, level_to_run: int, diff --git a/tests/challenges/memory/test_memory_challenge_a.py b/tests/challenges/memory/test_memory_challenge_a.py index 1130079b..bbd221f4 100644 --- a/tests/challenges/memory/test_memory_challenge_a.py +++ b/tests/challenges/memory/test_memory_challenge_a.py @@ -1,6 +1,7 @@ import pytest from pytest_mock import MockerFixture +from autogpt.config import Config from autogpt.workspace import Workspace from tests.challenges.challenge_decorator.challenge_decorator import challenge from tests.challenges.utils import get_workspace_path, run_challenge @@ -12,6 +13,7 @@ USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFol @challenge() def test_memory_challenge_a( + config: Config, patched_api_requestor: MockerFixture, monkeypatch: pytest.MonkeyPatch, level_to_run: int, From 2fcd91b765e88e2112a5e68d356a23ef6aed92b1 Mon Sep 17 00:00:00 2001 From: NeonN3mesis <129052650+NeonN3mesis@users.noreply.github.com> Date: Thu, 6 Jul 2023 13:05:48 -0400 Subject: [PATCH 14/34] New Challenge test_information_retrieval_challenge_c (#4855) * New Challenge test_information_retrieval_challenge_c I created a new challenge needs a bit of work * Update current_score.json Changed max level beaten to null * reformatted test_information_retrieval_challenge_c with black reformatted test_information_retrieval_challenge_c with black --------- Co-authored-by: merwanehamadi --- tests/challenges/current_score.json | 4 ++ .../test_information_retrieval_challenge_c.py | 47 +++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py diff --git a/tests/challenges/current_score.json b/tests/challenges/current_score.json index 732e7391..524f4fd9 100644 --- a/tests/challenges/current_score.json +++ b/tests/challenges/current_score.json @@ -23,6 +23,10 @@ "information_retrieval_challenge_b": { "max_level": 1, "max_level_beaten": null + }, + "information_retrieval_challenge_c": { + "max_level": 3, + "max_level_beaten": null } }, "kubernetes": { diff --git a/tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py b/tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py new file mode 100644 index 00000000..e827967d --- /dev/null +++ b/tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py @@ -0,0 +1,47 @@ +import pytest +from pytest_mock import MockerFixture + +from autogpt.workspace import Workspace +from tests.challenges.challenge_decorator.challenge_decorator import challenge +from tests.challenges.utils import get_workspace_path, run_challenge + +CYCLE_COUNT = 3 +COO = [["Luke Lafreniere"], ["Luke Lafreniere"], ["Luke Lafreniere 2017"]] + +OUTPUT_LOCATION = "output.txt" +USER_INPUTS = [ + "Write to a file called output.txt containing the name and title of the current Chief Operating Officer of Floatplane Media.", + "Write to a file called output.txt containing the name and title of the current Chief Operating Officer of https://www.floatplane.com.", + "Write to a file called output.txt containing the name and title of the current Chief Operating Officer of https://www.floatplane.com and the year it was formed.", +] + + +@challenge() +def test_information_retrieval_challenge_c( + monkeypatch: pytest.MonkeyPatch, + patched_api_requestor: MockerFixture, + level_to_run: int, + challenge_name: str, + workspace: Workspace, + patched_make_workspace: pytest.fixture, +) -> None: + """ + Test the challenge_c function in a given agent by mocking user inputs and checking the output file content. + + :param get_floatplane_ceo_agent: The agent to test. + :param monkeypatch: pytest's monkeypatch utility for modifying builtins. + """ + run_challenge( + challenge_name, + level_to_run, + monkeypatch, + USER_INPUTS[level_to_run - 1], + CYCLE_COUNT, + ) + + file_path = get_workspace_path(workspace, OUTPUT_LOCATION) + with open(file_path, "r") as file: + content = file.read() + coo_name = COO[level_to_run - 1] + for chief in coo_name: + assert chief in content, f"Expected the file to contain {chief}" From e0882955e3db1d23e2e2fa4b2c688dcf46d2d866 Mon Sep 17 00:00:00 2001 From: kerta1n <36344851+kerta1n@users.noreply.github.com> Date: Thu, 6 Jul 2023 13:11:43 -0700 Subject: [PATCH 15/34] Update docs to use `docker compose` v2 (#4471) * Update setup.md Change "docker-compose" command to "docker compose" to avoid future issues with running the Docker method (`docker-compose` is v1 and is outdated, is not a recognized command with newer versions of Docker engine) * Update usage.md * Update comment in docker-compose.yml --------- Co-authored-by: Reinier van der Leer --- docker-compose.yml | 5 +++-- docs/setup.md | 14 +++++++------- docs/usage.md | 6 +++--- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 7afa224a..945f969b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,7 @@ # To boot the app run the following: -# docker-compose run auto-gpt -# NOTE: Version 3.9 requires at least docker-compose version 1.29.0 ! +# docker compose run auto-gpt +# NOTE: Version 3.9 requires at least Docker Compose version 2 and Docker Engine version 20.10.13! + version: "3.9" services: diff --git a/docs/setup.md b/docs/setup.md index e894ebe2..43b9d9c8 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -159,28 +159,28 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt ### Run with Docker -Easiest is to use `docker-compose`. +Easiest is to use `docker compose`. Important: Docker Compose version 1.29.0 or later is required to use version 3.9 of the Compose file format. You can check the version of Docker Compose installed on your system by running the following command: - docker-compose version + docker compose version This will display the version of Docker Compose that is currently installed on your system. If you need to upgrade Docker Compose to a newer version, you can follow the installation instructions in the Docker documentation: https://docs.docker.com/compose/install/ -Once you have a recent version of docker-compose, run the commands below in your Auto-GPT folder. +Once you have a recent version of Docker Compose, run the commands below in your Auto-GPT folder. 1. Build the image. If you have pulled the image from Docker Hub, skip this step (NOTE: You *will* need to do this if you are modifying requirements.txt to add/remove dependencies like Python libs/frameworks) :::shell - docker-compose build auto-gpt + docker compose build auto-gpt 2. Run Auto-GPT :::shell - docker-compose run --rm auto-gpt + docker compose run --rm auto-gpt By default, this will also start and attach a Redis memory backend. If you do not want this, comment or remove the `depends: - redis` and `redis:` sections from @@ -190,7 +190,7 @@ Once you have a recent version of docker-compose, run the commands below in your You can pass extra arguments, e.g. running with `--gpt3only` and `--continuous`: ``` shell -docker-compose run --rm auto-gpt --gpt3only --continuous +docker compose run --rm auto-gpt --gpt3only --continuous ``` If you dare, you can also build and run it with "vanilla" docker commands: @@ -200,7 +200,7 @@ docker run -it --env-file=.env -v $PWD:/app auto-gpt docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous ``` -[docker-compose file]: https://github.com/Significant-Gravitas/Auto-GPT/blob/stable/docker-compose.yml +[Docker Compose file]: https://github.com/Significant-Gravitas/Auto-GPT/blob/stable/docker-compose.yml ### Run with Dev Container diff --git a/docs/usage.md b/docs/usage.md index 2e88298c..42b86eae 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -11,11 +11,11 @@ Running with `--help` lists all the possible command line arguments you can pass !!! info For use with Docker, replace the script in the examples with - `docker-compose run --rm auto-gpt`: + `docker compose run --rm auto-gpt`: :::shell - docker-compose run --rm auto-gpt --help - docker-compose run --rm auto-gpt --ai-settings + docker compose run --rm auto-gpt --help + docker compose run --rm auto-gpt --ai-settings !!! note Replace anything in angled brackets (<>) to a value you want to specify From 70e8b07428e50cadc8d601441f1cfe4b09bd1710 Mon Sep 17 00:00:00 2001 From: Scott Schluer Date: Thu, 6 Jul 2023 13:44:53 -0700 Subject: [PATCH 16/34] Utilize environment variables for all agent key bindings (#3774) Co-authored-by: Reinier van der Leer --- autogpt/agent/agent.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index a21197cc..3975c0b5 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -196,8 +196,9 @@ class Agent: # to exit self.user_input = "" logger.info( - "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands, " - "'n' to exit program, or enter feedback for " + f"Enter '{self.config.authorise_key}' to authorise command, " + f"'{self.config.authorise_key} -N' to run N continuous commands, " + f"'{self.config.exit_key}' to exit program, or enter feedback for " f"{self.ai_name}..." ) while True: @@ -225,8 +226,8 @@ class Agent: user_input = "GENERATE NEXT COMMAND JSON" except ValueError: logger.warn( - "Invalid input format. Please enter 'y -n' where n is" - " the number of continuous tasks." + f"Invalid input format. Please enter '{self.config.authorise_key} -n' " + "where n is the number of continuous tasks." ) continue break From 6893b7e5e7563facaad1e4e93da4066480c5bb89 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 7 Jul 2023 01:10:11 +0200 Subject: [PATCH 17/34] Fix log textareas in bug issue template --- .github/ISSUE_TEMPLATE/1.bug.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 3c660a8e..53c7f9da 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -140,8 +140,8 @@ body: ⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️ "The log files are located in the folder 'logs' inside the main auto-gpt folder." - - - type: input + + - type: textarea attributes: label: Upload Activity Log Content description: | @@ -152,7 +152,7 @@ body: validations: required: false - - type: input + - type: textarea attributes: label: Upload Error Log Content description: | From e4a337f1a5d6210a7417899f1844a685908eaf58 Mon Sep 17 00:00:00 2001 From: James Date: Fri, 7 Jul 2023 08:00:58 +0800 Subject: [PATCH 18/34] Fix potential passing of NoneType to remove_ansi_escape (#4882) Co-authored-by: Reinier van der Leer Co-authored-by: Luke <2609441+lc0rp@users.noreply.github.com> --- autogpt/logs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/logs.py b/autogpt/logs.py index 329afb8b..9d99f274 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -271,7 +271,7 @@ def print_assistant_thoughts( assistant_thoughts_criticism = None assistant_thoughts = assistant_reply_json_valid.get("thoughts", {}) - assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text")) + assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", "")) if assistant_thoughts: assistant_thoughts_reasoning = remove_ansi_escape( assistant_thoughts.get("reasoning") From ac1751866396a66a050cdfbff7aca14359ed60fc Mon Sep 17 00:00:00 2001 From: Jayden <47970983+jayden5744@users.noreply.github.com> Date: Fri, 7 Jul 2023 09:51:59 +0900 Subject: [PATCH 19/34] Fix Azure OpenAI setup problems (#4875) * [Fix] Recover the azure config load function * [Style] Apply black, isort, mypy, autoflake * [Fix] Rename the return parameter from 'azure_model_map' to 'azure_model_to_deployment_id_map' * [Feat] Change the azure config file path to be dynamically configurable * [Test] Add azure_config and azure deployment_id_for_model * [Style] Apply black, isort, mypy, autoflake * [Style] Apply black, isort, mypy, autoflake * Refactor Azure configuration - Refactor the `azure_config_file` attribute in the `Config` class to be optional. - Refactor the `azure_model_to_deployment_id_map` attribute in the `Config` class to be optional and provide default values. - Update the `get_azure_deployment_id_for_model` function to accept additional parameters. - Update references to `get_azure_deployment_id_for_model` in `create_text_completion`, `create_chat_completion`, and `get_embedding` functions to pass the required parameters. * Clean up process for azure * Docstring * revert some unneccessary fiddling * Avoid altering args to models * Retry on 404s * Don't permanently change the environment * Formatting --------- Co-authored-by: Luke <2609441+lc0rp@users.noreply.github.com> Co-authored-by: lc0rp <2609411+lc0rp@users.noreply.github.com> Co-authored-by: collijk --- .env.template | 4 +++ autogpt/config/config.py | 45 +++++++++++++++++++---- autogpt/llm/utils/__init__.py | 9 ++--- autogpt/memory/vector/utils.py | 4 ++- tests/unit/test_config.py | 65 ++++++++++++++++++++++++++++++++++ 5 files changed, 114 insertions(+), 13 deletions(-) diff --git a/.env.template b/.env.template index c3fcb761..49b999a1 100644 --- a/.env.template +++ b/.env.template @@ -58,6 +58,10 @@ OPENAI_API_KEY=your-openai-api-key ## USE_AZURE - Use Azure OpenAI or not (Default: False) # USE_AZURE=False +## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml) +# AZURE_CONFIG_FILE=azure.yaml + + ################################################################################ ### LLM MODELS ################################################################################ diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 5711764c..0de4ccdc 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -44,6 +44,8 @@ class Config(SystemSettings): openai_organization: Optional[str] = None temperature: float use_azure: bool + azure_config_file: Optional[str] = None + azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None execute_local_commands: bool restrict_to_workspace: bool openai_api_type: Optional[str] = None @@ -83,6 +85,31 @@ class Config(SystemSettings): plugins: list[str] authorise_key: str + def get_azure_kwargs(self, model: str) -> dict[str, str]: + """Get the kwargs for the Azure API.""" + deployment_id = { + self.fast_llm_model: self.azure_model_to_deployment_id_map.get( + "fast_llm_model_deployment_id" + ), + self.smart_llm_model: self.azure_model_to_deployment_id_map.get( + "smart_llm_model_deployment_id" + ), + "text-embedding-ada-002": self.azure_model_to_deployment_id_map.get( + "embedding_model_deployment_id" + ), + }.get(model, None) + + kwargs = { + "api_type": self.openai_api_type, + "api_base": self.openai_api_base, + "api_version": self.openai_api_version, + } + if model == "text-embedding-ada-002": + kwargs["engine"] = deployment_id + else: + kwargs["deployment_id"] = deployment_id + return kwargs + class ConfigBuilder(Configurable[Config]): default_plugins_config_file = os.path.join( @@ -125,6 +152,7 @@ class ConfigBuilder(Configurable[Config]): browse_spacy_language_model="en_core_web_sm", temperature=0, use_azure=False, + azure_config_file=AZURE_CONFIG_FILE, execute_local_commands=False, restrict_to_workspace=True, openai_functions=False, @@ -168,6 +196,7 @@ class ConfigBuilder(Configurable[Config]): "browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"), "openai_api_key": os.getenv("OPENAI_API_KEY"), "use_azure": os.getenv("USE_AZURE") == "True", + "azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE), "execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True", "restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True") @@ -236,12 +265,15 @@ class ConfigBuilder(Configurable[Config]): config_dict["temperature"] = float(os.getenv("TEMPERATURE")) if config_dict["use_azure"]: - azure_config = cls.load_azure_config() + azure_config = cls.load_azure_config(config_dict["azure_config_file"]) config_dict["openai_api_type"] = azure_config["openai_api_type"] config_dict["openai_api_base"] = azure_config["openai_api_base"] config_dict["openai_api_version"] = azure_config["openai_api_version"] + config_dict["azure_model_to_deployment_id_map"] = azure_config[ + "azure_model_to_deployment_id_map" + ] - if os.getenv("OPENAI_API_BASE_URL"): + elif os.getenv("OPENAI_API_BASE_URL"): config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL") openai_organization = os.getenv("OPENAI_ORGANIZATION") @@ -270,10 +302,11 @@ class ConfigBuilder(Configurable[Config]): config_params = yaml.load(file, Loader=yaml.FullLoader) or {} return { - "openai_api_type": config_params.get("azure_api_type") or "azure", - "openai_api_base": config_params.get("azure_api_base") or "", - "openai_api_version": config_params.get("azure_api_version") - or "2023-03-15-preview", + "openai_api_type": config_params.get("azure_api_type", "azure"), + "openai_api_base": config_params.get("azure_api_base", ""), + "openai_api_version": config_params.get( + "azure_api_version", "2023-03-15-preview" + ), "azure_model_to_deployment_id_map": config_params.get( "azure_model_map", {} ), diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index 1d4f4f97..f1b69c7b 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -1,12 +1,10 @@ from __future__ import annotations -from dataclasses import asdict from typing import List, Literal, Optional from colorama import Fore from autogpt.config import Config -from autogpt.logs import logger from ..api_manager import ApiManager from ..base import ChatModelResponse, ChatSequence, Message @@ -74,7 +72,7 @@ def create_text_completion( temperature = config.temperature if config.use_azure: - kwargs = {"deployment_id": config.get_azure_deployment_id_for_model(model)} + kwargs = config.get_azure_kwargs(model) else: kwargs = {"model": model} @@ -141,9 +139,8 @@ def create_chat_completion( chat_completion_kwargs["api_key"] = config.openai_api_key if config.use_azure: - chat_completion_kwargs[ - "deployment_id" - ] = config.get_azure_deployment_id_for_model(model) + chat_completion_kwargs.update(config.get_azure_kwargs(model)) + if functions: chat_completion_kwargs["functions"] = [ function.__dict__ for function in functions diff --git a/autogpt/memory/vector/utils.py b/autogpt/memory/vector/utils.py index beb2fcf9..74438f28 100644 --- a/autogpt/memory/vector/utils.py +++ b/autogpt/memory/vector/utils.py @@ -42,7 +42,7 @@ def get_embedding( model = config.embedding_model if config.use_azure: - kwargs = {"engine": config.get_azure_deployment_id_for_model(model)} + kwargs = config.get_azure_kwargs(model) else: kwargs = {"model": model} @@ -51,6 +51,8 @@ def get_embedding( f" with model '{model}'" + (f" via Azure deployment '{kwargs['engine']}'" if config.use_azure else "") ) + if config.use_azure: + breakpoint() embeddings = iopenai.create_embedding( input, diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 66334253..a0096a9f 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -2,6 +2,7 @@ Test cases for the config class, which handles the configuration settings for the AI and ensures it behaves as a singleton. """ +import os from unittest import mock from unittest.mock import patch @@ -145,6 +146,70 @@ def test_missing_azure_config(workspace: Workspace): assert azure_config["azure_model_to_deployment_id_map"] == {} +def test_azure_config(workspace: Workspace) -> None: + yaml_content = """ +azure_api_type: azure +azure_api_base: https://dummy.openai.azure.com +azure_api_version: 2023-06-01-preview +azure_model_map: + fast_llm_model_deployment_id: gpt-3.5-turbo + smart_llm_model_deployment_id: gpt-4 + embedding_model_deployment_id: embedding-deployment-id-for-azure +""" + config_file = workspace.get_path("azure.yaml") + config_file.write_text(yaml_content) + os.environ["USE_AZURE"] = "True" + os.environ["AZURE_CONFIG_FILE"] = str(config_file) + config = ConfigBuilder.build_config_from_env() + + assert config.openai_api_type == "azure" + assert config.openai_api_base == "https://dummy.openai.azure.com" + assert config.openai_api_version == "2023-06-01-preview" + assert config.azure_model_to_deployment_id_map == { + "fast_llm_model_deployment_id": "gpt-3.5-turbo", + "smart_llm_model_deployment_id": "gpt-4", + "embedding_model_deployment_id": "embedding-deployment-id-for-azure", + } + + del os.environ["USE_AZURE"] + del os.environ["AZURE_CONFIG_FILE"] + + +def test_azure_deployment_id_for_model(workspace: Workspace) -> None: + yaml_content = """ +azure_api_type: azure +azure_api_base: https://dummy.openai.azure.com +azure_api_version: 2023-06-01-preview +azure_model_map: + fast_llm_model_deployment_id: gpt-3.5-turbo + smart_llm_model_deployment_id: gpt-4 + embedding_model_deployment_id: embedding-deployment-id-for-azure +""" + config_file = workspace.get_path("azure.yaml") + config_file.write_text(yaml_content) + os.environ["USE_AZURE"] = "True" + os.environ["AZURE_CONFIG_FILE"] = str(config_file) + config = ConfigBuilder.build_config_from_env() + + config.fast_llm_model = "fast_llm_model" + config.smart_llm_model = "smart_llm_model" + + def _get_deployment_id(model): + kwargs = config.get_azure_kwargs(model) + return kwargs.get("deployment_id", kwargs.get("engine")) + + assert _get_deployment_id(config.fast_llm_model) == "gpt-3.5-turbo" + assert _get_deployment_id(config.smart_llm_model) == "gpt-4" + assert ( + _get_deployment_id("text-embedding-ada-002") + == "embedding-deployment-id-for-azure" + ) + assert _get_deployment_id("dummy") is None + + del os.environ["USE_AZURE"] + del os.environ["AZURE_CONFIG_FILE"] + + def test_create_config_gpt4only(config: Config) -> None: fast_llm_model = config.fast_llm_model smart_llm_model = config.smart_llm_model From bde007e6f7e09081e464dd3d6fac58ad066cd9ca Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 7 Jul 2023 03:42:18 +0200 Subject: [PATCH 20/34] Use GPT-4 in Agent loop by default (#4899) * Use GPT-4 as default smart LLM in Agent * Rename (smart|fast)_llm_model to (smart|fast)_llm everywhere * Fix test_config.py::test_initial_values * Fix test_config.py::test_azure_config * Fix Azure config backwards compatibility --- .env.template | 8 +-- autogpt/agent/agent.py | 14 ++--- autogpt/config/config.py | 26 +++++---- autogpt/configurator.py | 21 +++---- autogpt/llm/chat.py | 4 +- autogpt/llm/utils/__init__.py | 10 ++-- autogpt/memory/message_history.py | 12 ++-- autogpt/processing/text.py | 2 +- autogpt/setup.py | 2 +- azure.yaml.template | 4 +- docs/configuration/options.md | 4 +- docs/setup.md | 6 +- docs/usage.md | 2 +- tests/unit/test_agent_manager.py | 2 +- tests/unit/test_config.py | 90 +++++++++++++++--------------- tests/unit/test_message_history.py | 14 ++--- 16 files changed, 109 insertions(+), 112 deletions(-) diff --git a/.env.template b/.env.template index 49b999a1..1c164911 100644 --- a/.env.template +++ b/.env.template @@ -66,11 +66,11 @@ OPENAI_API_KEY=your-openai-api-key ### LLM MODELS ################################################################################ -## SMART_LLM_MODEL - Smart language model (Default: gpt-3.5-turbo) -# SMART_LLM_MODEL=gpt-3.5-turbo +## SMART_LLM - Smart language model (Default: gpt-4) +# SMART_LLM=gpt-4 -## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo) -# FAST_LLM_MODEL=gpt-3.5-turbo +## FAST_LLM - Fast language model (Default: gpt-3.5-turbo) +# FAST_LLM=gpt-3.5-turbo ## EMBEDDING_MODEL - Model to use for creating embeddings # EMBEDDING_MODEL=text-embedding-ada-002 diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 3975c0b5..10051956 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -81,9 +81,7 @@ class Agent: self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S") self.cycle_count = 0 self.log_cycle_handler = LogCycleHandler() - self.fast_token_limit = OPEN_AI_CHAT_MODELS.get( - config.fast_llm_model - ).max_tokens + self.smart_token_limit = OPEN_AI_CHAT_MODELS.get(config.smart_llm).max_tokens def start_interaction_loop(self): # Avoid circular imports @@ -138,8 +136,8 @@ class Agent: self, self.system_prompt, self.triggering_prompt, - self.fast_token_limit, - self.config.fast_llm_model, + self.smart_token_limit, + self.config.smart_llm, ) try: @@ -283,12 +281,12 @@ class Agent: result = f"Command {command_name} returned: " f"{command_result}" result_tlength = count_string_tokens( - str(command_result), self.config.fast_llm_model + str(command_result), self.config.smart_llm ) memory_tlength = count_string_tokens( - str(self.history.summary_message()), self.config.fast_llm_model + str(self.history.summary_message()), self.config.smart_llm ) - if result_tlength + memory_tlength + 600 > self.fast_token_limit: + if result_tlength + memory_tlength + 600 > self.smart_token_limit: result = f"Failure: command {command_name} returned too much output. \ Do not execute this command again with the same arguments." diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 0de4ccdc..4224cee4 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -17,8 +17,8 @@ from typing import Optional class Config(SystemSettings): - fast_llm_model: str - smart_llm_model: str + fast_llm: str + smart_llm: str continuous_mode: bool skip_news: bool workspace_path: Optional[str] = None @@ -88,11 +88,17 @@ class Config(SystemSettings): def get_azure_kwargs(self, model: str) -> dict[str, str]: """Get the kwargs for the Azure API.""" deployment_id = { - self.fast_llm_model: self.azure_model_to_deployment_id_map.get( - "fast_llm_model_deployment_id" + self.fast_llm: self.azure_model_to_deployment_id_map.get( + "fast_llm_deployment_id", + self.azure_model_to_deployment_id_map.get( + "fast_llm_model_deployment_id" # backwards compatibility + ), ), - self.smart_llm_model: self.azure_model_to_deployment_id_map.get( - "smart_llm_model_deployment_id" + self.smart_llm: self.azure_model_to_deployment_id_map.get( + "smart_llm_deployment_id", + self.azure_model_to_deployment_id_map.get( + "smart_llm_model_deployment_id" # backwards compatibility + ), ), "text-embedding-ada-002": self.azure_model_to_deployment_id_map.get( "embedding_model_deployment_id" @@ -129,8 +135,8 @@ class ConfigBuilder(Configurable[Config]): default_settings = Config( name="Default Server Config", description="This is a default server configuration", - smart_llm_model="gpt-3.5-turbo", - fast_llm_model="gpt-3.5-turbo", + smart_llm="gpt-4", + fast_llm="gpt-3.5-turbo", continuous_mode=False, continuous_limit=0, skip_news=False, @@ -190,8 +196,8 @@ class ConfigBuilder(Configurable[Config]): "shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"), "ai_settings_file": os.getenv("AI_SETTINGS_FILE"), "prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"), - "fast_llm_model": os.getenv("FAST_LLM_MODEL"), - "smart_llm_model": os.getenv("SMART_LLM_MODEL"), + "fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")), + "smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")), "embedding_model": os.getenv("EMBEDDING_MODEL"), "browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"), "openai_api_key": os.getenv("OPENAI_API_KEY"), diff --git a/autogpt/configurator.py b/autogpt/configurator.py index cc21414c..9d22f092 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -87,21 +87,18 @@ def create_config( # Set the default LLM models if gpt3only: logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") - # --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config - config.fast_llm_model = GPT_3_MODEL - config.smart_llm_model = GPT_3_MODEL + # --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config + config.fast_llm = GPT_3_MODEL + config.smart_llm = GPT_3_MODEL - elif ( - gpt4only - and check_model(GPT_4_MODEL, model_type="smart_llm_model") == GPT_4_MODEL - ): + elif gpt4only and check_model(GPT_4_MODEL, model_type="smart_llm") == GPT_4_MODEL: logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") - # --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config - config.fast_llm_model = GPT_4_MODEL - config.smart_llm_model = GPT_4_MODEL + # --gpt4only should always use gpt-4, despite user's SMART_LLM config + config.fast_llm = GPT_4_MODEL + config.smart_llm = GPT_4_MODEL else: - config.fast_llm_model = check_model(config.fast_llm_model, "fast_llm_model") - config.smart_llm_model = check_model(config.smart_llm_model, "smart_llm_model") + config.fast_llm = check_model(config.fast_llm, "fast_llm") + config.smart_llm = check_model(config.smart_llm, "smart_llm") if memory_type: supported_memory = get_supported_memory_backends() diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index c5d5a945..e97b7936 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -35,13 +35,13 @@ def chat_with_ai( system_prompt (str): The prompt explaining the rules to the AI. triggering_prompt (str): The input from the user. token_limit (int): The maximum number of tokens allowed in the API call. - model (str, optional): The model to use. If None, the config.fast_llm_model will be used. Defaults to None. + model (str, optional): The model to use. By default, the config.smart_llm will be used. Returns: str: The AI's response. """ if model is None: - model = config.fast_llm_model + model = config.smart_llm # Reserve 1000 tokens for the response logger.debug(f"Token limit: {token_limit}") diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index f1b69c7b..a3f53c33 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -21,8 +21,8 @@ def call_ai_function( function: str, args: list, description: str, + config: Config, model: Optional[str] = None, - config: Optional[Config] = None, ) -> str: """Call an AI function @@ -39,7 +39,7 @@ def call_ai_function( str: The response from the function """ if model is None: - model = config.smart_llm_model + model = config.smart_llm # For each arg, if any are None, convert to "None": args = [str(arg) if arg is not None else "None" for arg in args] # parse args to comma separated string @@ -67,7 +67,7 @@ def create_text_completion( max_output_tokens: Optional[int], ) -> str: if model is None: - model = config.fast_llm_model + model = config.fast_llm if temperature is None: temperature = config.temperature @@ -173,9 +173,7 @@ def create_chat_completion( ) -def check_model( - model_name: str, model_type: Literal["smart_llm_model", "fast_llm_model"] -) -> str: +def check_model(model_name: str, model_type: Literal["smart_llm", "fast_llm"]) -> str: """Check if model is available for use. If not, return gpt-3.5-turbo.""" api_manager = ApiManager() models = api_manager.get_models() diff --git a/autogpt/memory/message_history.py b/autogpt/memory/message_history.py index f3e1dc30..c7e8b73a 100644 --- a/autogpt/memory/message_history.py +++ b/autogpt/memory/message_history.py @@ -171,14 +171,14 @@ class MessageHistory: # Assume an upper bound length for the summary prompt template, i.e. Your task is to create a concise running summary...., in summarize_batch func # TODO make this default dynamic prompt_template_length = 100 - max_tokens = OPEN_AI_CHAT_MODELS.get(config.fast_llm_model).max_tokens - summary_tlength = count_string_tokens(str(self.summary), config.fast_llm_model) + max_tokens = OPEN_AI_CHAT_MODELS.get(config.fast_llm).max_tokens + summary_tlength = count_string_tokens(str(self.summary), config.fast_llm) batch = [] batch_tlength = 0 # TODO Can put a cap on length of total new events and drop some previous events to save API cost, but need to think thru more how to do it without losing the context for event in new_events: - event_tlength = count_string_tokens(str(event), config.fast_llm_model) + event_tlength = count_string_tokens(str(event), config.fast_llm) if ( batch_tlength + event_tlength @@ -187,7 +187,7 @@ class MessageHistory: # The batch is full. Summarize it and start a new one. self.summarize_batch(batch, config) summary_tlength = count_string_tokens( - str(self.summary), config.fast_llm_model + str(self.summary), config.fast_llm ) batch = [event] batch_tlength = event_tlength @@ -217,9 +217,7 @@ Latest Development: """ ''' - prompt = ChatSequence.for_model( - config.fast_llm_model, [Message("user", prompt)] - ) + prompt = ChatSequence.for_model(config.fast_llm, [Message("user", prompt)]) self.agent.log_cycle_handler.log_cycle( self.agent.ai_name, self.agent.created_at, diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 24851b1c..6eecbde9 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -82,7 +82,7 @@ def summarize_text( if instruction and question: raise ValueError("Parameters 'question' and 'instructions' cannot both be set") - model = config.fast_llm_model + model = config.fast_llm if question: instruction = ( diff --git a/autogpt/setup.py b/autogpt/setup.py index f17a91e0..fd8d33d8 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -178,7 +178,7 @@ def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig: # Call LLM with the string as user input output = create_chat_completion( ChatSequence.for_model( - config.fast_llm_model, + config.fast_llm, [ Message("system", system_prompt), Message("user", prompt_ai_config_automatic), diff --git a/azure.yaml.template b/azure.yaml.template index ab6e9fb6..6fe2af7a 100644 --- a/azure.yaml.template +++ b/azure.yaml.template @@ -2,6 +2,6 @@ azure_api_type: azure azure_api_base: your-base-url-for-azure azure_api_version: api-version-for-azure azure_model_map: - fast_llm_model_deployment_id: gpt35-deployment-id-for-azure - smart_llm_model_deployment_id: gpt4-deployment-id-for-azure + fast_llm_deployment_id: gpt35-deployment-id-for-azure + smart_llm_deployment_id: gpt4-deployment-id-for-azure embedding_model_deployment_id: embedding-deployment-id-for-azure diff --git a/docs/configuration/options.md b/docs/configuration/options.md index 07e76c68..b9c67806 100644 --- a/docs/configuration/options.md +++ b/docs/configuration/options.md @@ -16,7 +16,7 @@ Configuration is controlled through the `Config` object. You can set configurati - `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: text-embedding-ada-002 - `EXECUTE_LOCAL_COMMANDS`: If shell commands should be executed locally. Default: False - `EXIT_KEY`: Exit key accepted to exit. Default: n -- `FAST_LLM_MODEL`: LLM Model to use for most tasks. Default: gpt-3.5-turbo +- `FAST_LLM`: LLM Model to use for most tasks. Default: gpt-3.5-turbo - `GITHUB_API_KEY`: [Github API Key](https://github.com/settings/tokens). Optional. - `GITHUB_USERNAME`: GitHub Username. Optional. - `GOOGLE_API_KEY`: Google API key. Optional. @@ -43,7 +43,7 @@ Configuration is controlled through the `Config` object. You can set configurati - `SHELL_ALLOWLIST`: List of shell commands that ARE allowed to be executed by Auto-GPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `allowlist`. Default: None - `SHELL_COMMAND_CONTROL`: Whether to use `allowlist` or `denylist` to determine what shell commands can be executed (Default: denylist) - `SHELL_DENYLIST`: List of shell commands that ARE NOT allowed to be executed by Auto-GPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `denylist`. Default: sudo,su -- `SMART_LLM_MODEL`: LLM Model to use for "smart" tasks. Default: gpt-3.5-turbo +- `SMART_LLM`: LLM Model to use for "smart" tasks. Default: gpt-4 - `STREAMELEMENTS_VOICE`: StreamElements voice to use. Default: Brian - `TEMPERATURE`: Value of temperature given to OpenAI. Value from 0 to 2. Lower is more deterministic, higher is more random. See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature - `TEXT_TO_SPEECH_PROVIDER`: Text to Speech Provider. Options are `gtts`, `macos`, `elevenlabs`, and `streamelements`. Default: gtts diff --git a/docs/setup.md b/docs/setup.md index 43b9d9c8..ba2d6a5f 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -133,8 +133,8 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt make an Azure configuration file: - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: - - `fast_llm_model_deployment_id`: your gpt-3.5-turbo or gpt-4 deployment ID - - `smart_llm_model_deployment_id`: your gpt-4 deployment ID + - `fast_llm_deployment_id`: your gpt-3.5-turbo or gpt-4 deployment ID + - `smart_llm_deployment_id`: your gpt-4 deployment ID - `embedding_model_deployment_id`: your text-embedding-ada-002 v2 deployment ID Example: @@ -143,7 +143,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt # Please specify all of these values as double-quoted strings # Replace string in angled brackets (<>) to your own deployment Name azure_model_map: - fast_llm_model_deployment_id: "" + fast_llm_deployment_id: "" ... Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model. diff --git a/docs/usage.md b/docs/usage.md index 42b86eae..a9ef2883 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -72,7 +72,7 @@ If you don't have access to GPT-4, this mode allows you to use Auto-GPT! ./run.sh --gpt3only ``` -You can achieve the same by setting `SMART_LLM_MODEL` in `.env` to `gpt-3.5-turbo`. +You can achieve the same by setting `SMART_LLM` in `.env` to `gpt-3.5-turbo`. ### GPT-4 ONLY Mode diff --git a/tests/unit/test_agent_manager.py b/tests/unit/test_agent_manager.py index 7140db05..113771f7 100644 --- a/tests/unit/test_agent_manager.py +++ b/tests/unit/test_agent_manager.py @@ -35,7 +35,7 @@ def mock_create_chat_completion(mocker, config): wraps=create_chat_completion, ) mock_create_chat_completion.return_value = ChatModelResponse( - model_info=OPEN_AI_CHAT_MODELS[config.fast_llm_model], + model_info=OPEN_AI_CHAT_MODELS[config.fast_llm], content="irrelevant", function_call={}, ) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index a0096a9f..d5c9d97d 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -20,8 +20,8 @@ def test_initial_values(config: Config): assert config.debug_mode == False assert config.continuous_mode == False assert config.speak_mode == False - assert config.fast_llm_model == "gpt-3.5-turbo" - assert config.smart_llm_model == "gpt-3.5-turbo" + assert config.fast_llm == "gpt-3.5-turbo" + assert config.smart_llm == "gpt-4" def test_set_continuous_mode(config: Config): @@ -52,32 +52,32 @@ def test_set_speak_mode(config: Config): config.speak_mode = speak_mode -def test_set_fast_llm_model(config: Config): +def test_set_fast_llm(config: Config): """ - Test if the set_fast_llm_model() method updates the fast_llm_model attribute. + Test if the set_fast_llm() method updates the fast_llm attribute. """ # Store model name to reset it after the test - fast_llm_model = config.fast_llm_model + fast_llm = config.fast_llm - config.fast_llm_model = "gpt-3.5-turbo-test" - assert config.fast_llm_model == "gpt-3.5-turbo-test" + config.fast_llm = "gpt-3.5-turbo-test" + assert config.fast_llm == "gpt-3.5-turbo-test" # Reset model name - config.fast_llm_model = fast_llm_model + config.fast_llm = fast_llm -def test_set_smart_llm_model(config: Config): +def test_set_smart_llm(config: Config): """ - Test if the set_smart_llm_model() method updates the smart_llm_model attribute. + Test if the set_smart_llm() method updates the smart_llm attribute. """ # Store model name to reset it after the test - smart_llm_model = config.smart_llm_model + smart_llm = config.smart_llm - config.smart_llm_model = "gpt-4-test" - assert config.smart_llm_model == "gpt-4-test" + config.smart_llm = "gpt-4-test" + assert config.smart_llm == "gpt-4-test" # Reset model name - config.smart_llm_model = smart_llm_model + config.smart_llm = smart_llm def test_set_debug_mode(config: Config): @@ -95,15 +95,15 @@ def test_set_debug_mode(config: Config): @patch("openai.Model.list") -def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config): +def test_smart_and_fast_llms_set_to_gpt4(mock_list_models, config: Config): """ Test if models update to gpt-3.5-turbo if both are set to gpt-4. """ - fast_llm_model = config.fast_llm_model - smart_llm_model = config.smart_llm_model + fast_llm = config.fast_llm + smart_llm = config.smart_llm - config.fast_llm_model = "gpt-4" - config.smart_llm_model = "gpt-4" + config.fast_llm = "gpt-4" + config.smart_llm = "gpt-4" mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]} @@ -124,12 +124,12 @@ def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config) skip_news=False, ) - assert config.fast_llm_model == "gpt-3.5-turbo" - assert config.smart_llm_model == "gpt-3.5-turbo" + assert config.fast_llm == "gpt-3.5-turbo" + assert config.smart_llm == "gpt-3.5-turbo" # Reset config - config.fast_llm_model = fast_llm_model - config.smart_llm_model = smart_llm_model + config.fast_llm = fast_llm + config.smart_llm = smart_llm def test_missing_azure_config(workspace: Workspace): @@ -152,8 +152,8 @@ azure_api_type: azure azure_api_base: https://dummy.openai.azure.com azure_api_version: 2023-06-01-preview azure_model_map: - fast_llm_model_deployment_id: gpt-3.5-turbo - smart_llm_model_deployment_id: gpt-4 + fast_llm_deployment_id: gpt-3.5-turbo + smart_llm_deployment_id: gpt-4 embedding_model_deployment_id: embedding-deployment-id-for-azure """ config_file = workspace.get_path("azure.yaml") @@ -166,8 +166,8 @@ azure_model_map: assert config.openai_api_base == "https://dummy.openai.azure.com" assert config.openai_api_version == "2023-06-01-preview" assert config.azure_model_to_deployment_id_map == { - "fast_llm_model_deployment_id": "gpt-3.5-turbo", - "smart_llm_model_deployment_id": "gpt-4", + "fast_llm_deployment_id": "gpt-3.5-turbo", + "smart_llm_deployment_id": "gpt-4", "embedding_model_deployment_id": "embedding-deployment-id-for-azure", } @@ -181,8 +181,8 @@ azure_api_type: azure azure_api_base: https://dummy.openai.azure.com azure_api_version: 2023-06-01-preview azure_model_map: - fast_llm_model_deployment_id: gpt-3.5-turbo - smart_llm_model_deployment_id: gpt-4 + fast_llm_deployment_id: gpt-3.5-turbo + smart_llm_deployment_id: gpt-4 embedding_model_deployment_id: embedding-deployment-id-for-azure """ config_file = workspace.get_path("azure.yaml") @@ -191,15 +191,15 @@ azure_model_map: os.environ["AZURE_CONFIG_FILE"] = str(config_file) config = ConfigBuilder.build_config_from_env() - config.fast_llm_model = "fast_llm_model" - config.smart_llm_model = "smart_llm_model" + config.fast_llm = "fast_llm" + config.smart_llm = "smart_llm" def _get_deployment_id(model): kwargs = config.get_azure_kwargs(model) return kwargs.get("deployment_id", kwargs.get("engine")) - assert _get_deployment_id(config.fast_llm_model) == "gpt-3.5-turbo" - assert _get_deployment_id(config.smart_llm_model) == "gpt-4" + assert _get_deployment_id(config.fast_llm) == "gpt-3.5-turbo" + assert _get_deployment_id(config.smart_llm) == "gpt-4" assert ( _get_deployment_id("text-embedding-ada-002") == "embedding-deployment-id-for-azure" @@ -211,8 +211,8 @@ azure_model_map: def test_create_config_gpt4only(config: Config) -> None: - fast_llm_model = config.fast_llm_model - smart_llm_model = config.smart_llm_model + fast_llm = config.fast_llm + smart_llm = config.smart_llm with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models: mock_get_models.return_value = [{"id": GPT_4_MODEL}] create_config( @@ -231,17 +231,17 @@ def test_create_config_gpt4only(config: Config) -> None: allow_downloads=False, skip_news=False, ) - assert config.fast_llm_model == GPT_4_MODEL - assert config.smart_llm_model == GPT_4_MODEL + assert config.fast_llm == GPT_4_MODEL + assert config.smart_llm == GPT_4_MODEL # Reset config - config.fast_llm_model = fast_llm_model - config.smart_llm_model = smart_llm_model + config.fast_llm = fast_llm + config.smart_llm = smart_llm def test_create_config_gpt3only(config: Config) -> None: - fast_llm_model = config.fast_llm_model - smart_llm_model = config.smart_llm_model + fast_llm = config.fast_llm + smart_llm = config.smart_llm with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models: mock_get_models.return_value = [{"id": GPT_3_MODEL}] create_config( @@ -260,9 +260,9 @@ def test_create_config_gpt3only(config: Config) -> None: allow_downloads=False, skip_news=False, ) - assert config.fast_llm_model == GPT_3_MODEL - assert config.smart_llm_model == GPT_3_MODEL + assert config.fast_llm == GPT_3_MODEL + assert config.smart_llm == GPT_3_MODEL # Reset config - config.fast_llm_model = fast_llm_model - config.smart_llm_model = smart_llm_model + config.fast_llm = fast_llm + config.smart_llm = smart_llm diff --git a/tests/unit/test_message_history.py b/tests/unit/test_message_history.py index a3650005..8ceee63f 100644 --- a/tests/unit/test_message_history.py +++ b/tests/unit/test_message_history.py @@ -40,7 +40,7 @@ def agent(config: Config): def test_message_history_batch_summary(mocker, agent, config): history = MessageHistory(agent) - model = config.fast_llm_model + model = config.fast_llm message_tlength = 0 message_count = 0 @@ -73,7 +73,7 @@ def test_message_history_batch_summary(mocker, agent, config): assistant_reply = '{\n "thoughts": {\n "text": "I will use the \'google_search\' command to find more websites with job openings for software engineering manager role.",\n "reasoning": "Since the previous website did not provide any relevant information, I will use the \'google_search\' command to find more websites with job openings for software engineer role.",\n "plan": "- Use \'google_search\' command to find more websites with job openings for software engineer role",\n "criticism": "I need to ensure that I am able to extract the relevant information from each website and job opening.",\n "speak": "I will now use the \'google_search\' command to find more websites with job openings for software engineer role."\n },\n "command": {\n "name": "google_search",\n "args": {\n "query": "software engineer job openings"\n }\n }\n}' msg = Message("assistant", assistant_reply, "ai_response") history.append(msg) - message_tlength += count_string_tokens(str(msg), config.fast_llm_model) + message_tlength += count_string_tokens(str(msg), config.fast_llm) message_count += 1 # mock some websites returned from google search command in the past @@ -83,7 +83,7 @@ def test_message_history_batch_summary(mocker, agent, config): result += "]" msg = Message("system", result, "action_result") history.append(msg) - message_tlength += count_string_tokens(str(msg), config.fast_llm_model) + message_tlength += count_string_tokens(str(msg), config.fast_llm) message_count += 1 user_input = "Determine which next command to use, and respond using the format specified above:'" @@ -99,7 +99,7 @@ def test_message_history_batch_summary(mocker, agent, config): ) msg = Message("assistant", assistant_reply, "ai_response") history.append(msg) - message_tlength += count_string_tokens(str(msg), config.fast_llm_model) + message_tlength += count_string_tokens(str(msg), config.fast_llm) message_count += 1 result = ( @@ -109,7 +109,7 @@ def test_message_history_batch_summary(mocker, agent, config): ) msg = Message("system", result, "action_result") history.append(msg) - message_tlength += count_string_tokens(str(msg), config.fast_llm_model) + message_tlength += count_string_tokens(str(msg), config.fast_llm) message_count += 1 user_input = "Determine which next command to use, and respond using the format specified above:'" @@ -125,7 +125,7 @@ def test_message_history_batch_summary(mocker, agent, config): # count the expected token length of the trimmed message by reducing the token length of messages in the last cycle for message in messages_to_add: if message.role != "user": - message_tlength -= count_string_tokens(str(message), config.fast_llm_model) + message_tlength -= count_string_tokens(str(message), config.fast_llm) message_count -= 1 # test the main trim_message function @@ -134,7 +134,7 @@ def test_message_history_batch_summary(mocker, agent, config): ) expected_call_count = math.ceil( - message_tlength / (OPEN_AI_CHAT_MODELS.get(config.fast_llm_model).max_tokens) + message_tlength / (OPEN_AI_CHAT_MODELS.get(config.fast_llm).max_tokens) ) # Expecting 2 batches because of over max token assert mock_summary.call_count == expected_call_count # 2 at the time of writing From 9706ff8c26608966235982f244859a9f4ff19cbd Mon Sep 17 00:00:00 2001 From: Auto-GPT-Bot Date: Fri, 7 Jul 2023 01:47:53 +0000 Subject: [PATCH 21/34] Update cassette submodule --- tests/Auto-GPT-test-cassettes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Auto-GPT-test-cassettes b/tests/Auto-GPT-test-cassettes index acb8c992..f75a16de 160000 --- a/tests/Auto-GPT-test-cassettes +++ b/tests/Auto-GPT-test-cassettes @@ -1 +1 @@ -Subproject commit acb8c9925b961f5c55299aee905ab4b1c6eb6b79 +Subproject commit f75a16de114bae13b7b49cfa376475fbdc674560 From 9e5492bd1338ecfc8a802eb81b2e5dd156957cf2 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 6 Jul 2023 19:19:18 -0700 Subject: [PATCH 22/34] Add CLI args for `ai_name`, `ai_role`, and `ai_goals` (#3250) * add capability to specify AI config at cmd line * Make `--ai-goal` multi-parameter * Fix argument forwarding in run.sh --------- Co-authored-by: Reinier van der Leer --- autogpt/cli.py | 24 ++++++ autogpt/config/ai_config.py | 13 ++- autogpt/config/config.py | 2 +- autogpt/main.py | 15 +++- autogpt/prompts/prompt.py | 28 ++++++- autogpt/setup.py | 121 +++++++++++++++++----------- run.sh | 2 +- tests/unit/data/test_ai_config.yaml | 5 ++ 8 files changed, 147 insertions(+), 63 deletions(-) create mode 100644 tests/unit/data/test_ai_config.yaml diff --git a/autogpt/cli.py b/autogpt/cli.py index 3b45b501..690c1626 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -1,4 +1,6 @@ """Main script for the autogpt package.""" +from typing import Optional + import click @@ -65,6 +67,22 @@ import click is_flag=True, help="Installs external dependencies for 3rd party plugins.", ) +@click.option( + "--ai-name", + type=str, + help="AI name override", +) +@click.option( + "--ai-role", + type=str, + help="AI role override", +) +@click.option( + "--ai-goal", + type=str, + multiple=True, + help="AI goal override; may be used multiple times to pass multiple goals", +) @click.pass_context def main( ctx: click.Context, @@ -83,6 +101,9 @@ def main( skip_news: bool, workspace_directory: str, install_plugin_deps: bool, + ai_name: Optional[str], + ai_role: Optional[str], + ai_goal: tuple[str], ) -> None: """ Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI. @@ -109,6 +130,9 @@ def main( skip_news, workspace_directory, install_plugin_deps, + ai_name, + ai_role, + ai_goal, ) diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index 3c645abe..a2952c9d 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -35,7 +35,7 @@ class AIConfig: self, ai_name: str = "", ai_role: str = "", - ai_goals: list | None = None, + ai_goals: list[str] = [], api_budget: float = 0.0, ) -> None: """ @@ -49,8 +49,6 @@ class AIConfig: Returns: None """ - if ai_goals is None: - ai_goals = [] self.ai_name = ai_name self.ai_role = ai_role self.ai_goals = ai_goals @@ -61,13 +59,12 @@ class AIConfig: @staticmethod def load(ai_settings_file: str = SAVE_FILE) -> "AIConfig": """ - Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from - yaml file if yaml file exists, - else returns class with no parameters. + Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) + loaded from yaml file if yaml file exists, else returns class with no parameters. Parameters: - ai_settings_file (int): The path to the config yaml file. - DEFAULT: "../ai_settings.yaml" + ai_settings_file (int): The path to the config yaml file. + DEFAULT: "../ai_settings.yaml" Returns: cls (object): An instance of given cls object diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 4224cee4..fc76d084 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -4,7 +4,7 @@ from __future__ import annotations import contextlib import os import re -from typing import Dict, Union +from typing import Dict, Optional, Union import yaml from colorama import Fore diff --git a/autogpt/main.py b/autogpt/main.py index 30587029..c1b79c78 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -2,6 +2,7 @@ import logging import sys from pathlib import Path +from typing import Optional from colorama import Fore, Style @@ -48,6 +49,9 @@ def run_auto_gpt( skip_news: bool, workspace_directory: str | Path, install_plugin_deps: bool, + ai_name: Optional[str] = None, + ai_role: Optional[str] = None, + ai_goals: tuple[str] = tuple(), ): # Configure logging before we do anything else. logger.set_level(logging.DEBUG if debug else logging.INFO) @@ -154,11 +158,14 @@ def run_auto_gpt( f"reason - {command.disabled_reason or 'Disabled by current config.'}" ) - ai_name = "" - ai_config = construct_main_ai_config(config) + ai_config = construct_main_ai_config( + config, + name=ai_name, + role=ai_role, + goals=ai_goals, + ) ai_config.command_registry = command_registry - if ai_config.ai_name: - ai_name = ai_config.ai_name + ai_name = ai_config.ai_name # print(prompt) # Initialize variables next_action_count = 0 diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index 16d5c7e7..b5a0ec88 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -1,3 +1,5 @@ +from typing import Optional + from colorama import Fore from autogpt.config.ai_config import AIConfig @@ -42,14 +44,32 @@ def build_default_prompt_generator(config: Config) -> PromptGenerator: return prompt_generator -def construct_main_ai_config(config: Config) -> AIConfig: +def construct_main_ai_config( + config: Config, + name: Optional[str] = None, + role: Optional[str] = None, + goals: tuple[str] = tuple(), +) -> AIConfig: """Construct the prompt for the AI to respond to Returns: str: The prompt string """ ai_config = AIConfig.load(config.ai_settings_file) - if config.skip_reprompt and ai_config.ai_name: + + # Apply overrides + if name: + ai_config.ai_name = name + if role: + ai_config.ai_role = role + if goals: + ai_config.ai_goals = list(goals) + + if ( + all([name, role, goals]) + or config.skip_reprompt + and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]) + ): logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name) logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role) logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}") @@ -58,7 +78,7 @@ def construct_main_ai_config(config: Config) -> AIConfig: Fore.GREEN, "infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}", ) - elif ai_config.ai_name: + elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]): logger.typewriter_log( "Welcome back! ", Fore.GREEN, @@ -77,7 +97,7 @@ Continue ({config.authorise_key}/{config.exit_key}): """, if should_continue.lower() == config.exit_key: ai_config = AIConfig() - if not ai_config.ai_name: + if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]): ai_config = prompt_user(config) ai_config.save(config.ai_settings_file) diff --git a/autogpt/setup.py b/autogpt/setup.py index fd8d33d8..fc429243 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -1,5 +1,6 @@ """Set up the AI and its goals""" import re +from typing import Optional from colorama import Fore, Style from jinja2 import Template @@ -17,14 +18,18 @@ from autogpt.prompts.default_prompts import ( ) -def prompt_user(config: Config) -> AIConfig: +def prompt_user( + config: Config, ai_config_template: Optional[AIConfig] = None +) -> AIConfig: """Prompt the user for input + Params: + config (Config): The Config object + ai_config_template (AIConfig): The AIConfig object to use as a template + Returns: AIConfig: The AIConfig object tailored to the user's input """ - ai_name = "" - ai_config = None # Construct the prompt logger.typewriter_log( @@ -34,29 +39,39 @@ def prompt_user(config: Config) -> AIConfig: speak_text=True, ) - # Get user desire - logger.typewriter_log( - "Create an AI-Assistant:", - Fore.GREEN, - "input '--manual' to enter manual mode.", - speak_text=True, + ai_config_template_provided = ai_config_template is not None and any( + [ + ai_config_template.ai_goals, + ai_config_template.ai_name, + ai_config_template.ai_role, + ] ) - user_desire = utils.clean_input( - config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: " - ) + user_desire = "" + if not ai_config_template_provided: + # Get user desire if command line overrides have not been passed in + logger.typewriter_log( + "Create an AI-Assistant:", + Fore.GREEN, + "input '--manual' to enter manual mode.", + speak_text=True, + ) - if user_desire == "": + user_desire = utils.clean_input( + config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: " + ) + + if user_desire.strip() == "": user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt - # If user desire contains "--manual" - if "--manual" in user_desire: + # If user desire contains "--manual" or we have overridden any of the AI configuration + if "--manual" in user_desire or ai_config_template_provided: logger.typewriter_log( "Manual Mode Selected", Fore.GREEN, speak_text=True, ) - return generate_aiconfig_manual(config) + return generate_aiconfig_manual(config, ai_config_template) else: try: @@ -72,7 +87,9 @@ def prompt_user(config: Config) -> AIConfig: return generate_aiconfig_manual(config) -def generate_aiconfig_manual(config: Config) -> AIConfig: +def generate_aiconfig_manual( + config: Config, ai_config_template: Optional[AIConfig] = None +) -> AIConfig: """ Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI. @@ -80,6 +97,10 @@ def generate_aiconfig_manual(config: Config) -> AIConfig: an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five goals. If the user does not provide a value for any of the fields, default values will be used. + Params: + config (Config): The Config object + ai_config_template (AIConfig): The AIConfig object to use as a template + Returns: AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals. """ @@ -93,11 +114,15 @@ def generate_aiconfig_manual(config: Config) -> AIConfig: speak_text=True, ) - # Get AI Name from User - logger.typewriter_log( - "Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'" - ) - ai_name = utils.clean_input(config, "AI Name: ") + if ai_config_template and ai_config_template.ai_name: + ai_name = ai_config_template.ai_name + else: + ai_name = "" + # Get AI Name from User + logger.typewriter_log( + "Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'" + ) + ai_name = utils.clean_input(config, "AI Name: ") if ai_name == "": ai_name = "Entrepreneur-GPT" @@ -105,34 +130,40 @@ def generate_aiconfig_manual(config: Config) -> AIConfig: f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True ) - # Get AI Role from User - logger.typewriter_log( - "Describe your AI's role: ", - Fore.GREEN, - "For example, 'an AI designed to autonomously develop and run businesses with" - " the sole goal of increasing your net worth.'", - ) - ai_role = utils.clean_input(config, f"{ai_name} is: ") + if ai_config_template and ai_config_template.ai_role: + ai_role = ai_config_template.ai_role + else: + # Get AI Role from User + logger.typewriter_log( + "Describe your AI's role: ", + Fore.GREEN, + "For example, 'an AI designed to autonomously develop and run businesses with" + " the sole goal of increasing your net worth.'", + ) + ai_role = utils.clean_input(config, f"{ai_name} is: ") if ai_role == "": ai_role = "an AI designed to autonomously develop and run businesses with the" " sole goal of increasing your net worth." - # Enter up to 5 goals for the AI - logger.typewriter_log( - "Enter up to 5 goals for your AI: ", - Fore.GREEN, - "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage" - " multiple businesses autonomously'", - ) - logger.info("Enter nothing to load defaults, enter nothing when finished.") - ai_goals = [] - for i in range(5): - ai_goal = utils.clean_input( - config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: " + if ai_config_template and ai_config_template.ai_goals: + ai_goals = ai_config_template.ai_goals + else: + # Enter up to 5 goals for the AI + logger.typewriter_log( + "Enter up to 5 goals for your AI: ", + Fore.GREEN, + "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage" + " multiple businesses autonomously'", ) - if ai_goal == "": - break - ai_goals.append(ai_goal) + logger.info("Enter nothing to load defaults, enter nothing when finished.") + ai_goals = [] + for i in range(5): + ai_goal = utils.clean_input( + config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: " + ) + if ai_goal == "": + break + ai_goals.append(ai_goal) if not ai_goals: ai_goals = [ "Increase net worth", diff --git a/run.sh b/run.sh index 287499f8..29ded78a 100755 --- a/run.sh +++ b/run.sh @@ -22,7 +22,7 @@ if $PYTHON_CMD -c "import sys; sys.exit(sys.version_info < (3, 10))"; then echo Installing missing packages... $PYTHON_CMD -m pip install -r requirements.txt fi - $PYTHON_CMD -m autogpt $@ + $PYTHON_CMD -m autogpt "$@" read -p "Press any key to continue..." else echo "Python 3.10 or higher is required to run Auto GPT." diff --git a/tests/unit/data/test_ai_config.yaml b/tests/unit/data/test_ai_config.yaml new file mode 100644 index 00000000..b6bc7cd9 --- /dev/null +++ b/tests/unit/data/test_ai_config.yaml @@ -0,0 +1,5 @@ +ai_goals: +- Test goal 1 +ai_name: testGPT +ai_role: testRole +api_budget: 1.0 \ No newline at end of file From 053caaa22270d18254d7766dcb97f327ad25b618 Mon Sep 17 00:00:00 2001 From: zachRadack Date: Thu, 6 Jul 2023 20:49:59 -0700 Subject: [PATCH 23/34] Bugfix fixtts (#4902) * changing configs names to tts_provider * accidently triggered circular importing. * added config to places it needs other than the logger * got it to work on windows * did all the formatting stuff --------- Co-authored-by: James Collins --- autogpt/agent/agent.py | 2 +- autogpt/logs.py | 8 ++++++-- autogpt/main.py | 3 +++ autogpt/speech/base.py | 2 +- autogpt/speech/gtts.py | 3 ++- autogpt/speech/macos_tts.py | 3 ++- autogpt/speech/say.py | 8 ++++---- autogpt/speech/stream_elements_speech.py | 3 ++- 8 files changed, 21 insertions(+), 11 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 10051956..fd476e56 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -165,7 +165,7 @@ class Agent: assistant_reply_json, assistant_reply, self.config ) if self.config.speak_mode: - say_text(f"I want to execute {command_name}") + say_text(f"I want to execute {command_name}", self.config) arguments = self._resolve_pathlike_command_args(arguments) diff --git a/autogpt/logs.py b/autogpt/logs.py index 9d99f274..535cce32 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -16,7 +16,6 @@ if TYPE_CHECKING: from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter from autogpt.singleton import Singleton -from autogpt.speech import say_text class Logger(metaclass=Singleton): @@ -87,13 +86,16 @@ class Logger(metaclass=Singleton): self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False + self.config = None self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): + from autogpt.speech import say_text + if speak_text and self.speak_mode: - say_text(f"{title}. {content}") + say_text(f"{title}. {content}", self.config) for plugin in self.chat_plugins: plugin.report(f"{title}. {content}") @@ -265,6 +267,8 @@ def print_assistant_thoughts( assistant_reply_json_valid: object, config: Config, ) -> None: + from autogpt.speech import say_text + assistant_thoughts_reasoning = None assistant_thoughts_plan = None assistant_thoughts_speak = None diff --git a/autogpt/main.py b/autogpt/main.py index c1b79c78..26e8e331 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -58,6 +58,9 @@ def run_auto_gpt( logger.speak_mode = speak config = ConfigBuilder.build_config_from_env() + # HACK: This is a hack to allow the config into the logger without having to pass it around everywhere + # or import it directly. + logger.config = config # TODO: fill in llm values here check_openai_api_key(config) diff --git a/autogpt/speech/base.py b/autogpt/speech/base.py index 44e90eaa..b3dd03bd 100644 --- a/autogpt/speech/base.py +++ b/autogpt/speech/base.py @@ -45,7 +45,7 @@ class VoiceBase(AbstractSingleton): return self._speech(text, voice_index) @abc.abstractmethod - def _setup(self) -> None: + def _setup(self, config: Config) -> None: """ Setup the voices, API key, etc. """ diff --git a/autogpt/speech/gtts.py b/autogpt/speech/gtts.py index 1c3e9cae..deef4f27 100644 --- a/autogpt/speech/gtts.py +++ b/autogpt/speech/gtts.py @@ -4,13 +4,14 @@ import os import gtts from playsound import playsound +from autogpt.config import Config from autogpt.speech.base import VoiceBase class GTTSVoice(VoiceBase): """GTTS Voice.""" - def _setup(self) -> None: + def _setup(self, config: Config) -> None: pass def _speech(self, text: str, _: int = 0) -> bool: diff --git a/autogpt/speech/macos_tts.py b/autogpt/speech/macos_tts.py index 4c072ce2..e48522cf 100644 --- a/autogpt/speech/macos_tts.py +++ b/autogpt/speech/macos_tts.py @@ -1,13 +1,14 @@ """ MacOS TTS Voice. """ import os +from autogpt.config import Config from autogpt.speech.base import VoiceBase class MacOSTTS(VoiceBase): """MacOS TTS Voice.""" - def _setup(self) -> None: + def _setup(self, config: Config) -> None: pass def _speech(self, text: str, voice_index: int = 0) -> bool: diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py index 3d71a392..5d04c76f 100644 --- a/autogpt/speech/say.py +++ b/autogpt/speech/say.py @@ -41,10 +41,10 @@ def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]: if tts_provider == "elevenlabs": voice_engine = ElevenLabsSpeech(config) elif tts_provider == "macos": - voice_engine = MacOSTTS() + voice_engine = MacOSTTS(config) elif tts_provider == "streamelements": - voice_engine = StreamElementsSpeech() + voice_engine = StreamElementsSpeech(config) else: - voice_engine = GTTSVoice() + voice_engine = GTTSVoice(config) - return GTTSVoice(), voice_engine + return GTTSVoice(config), voice_engine diff --git a/autogpt/speech/stream_elements_speech.py b/autogpt/speech/stream_elements_speech.py index 9019cf09..e4e4e8bf 100644 --- a/autogpt/speech/stream_elements_speech.py +++ b/autogpt/speech/stream_elements_speech.py @@ -4,13 +4,14 @@ import os import requests from playsound import playsound +from autogpt.config import Config from autogpt.speech.base import VoiceBase class StreamElementsSpeech(VoiceBase): """Streamelements speech module for autogpt""" - def _setup(self) -> None: + def _setup(self, config: Config) -> None: """Setup the voices, API key, etc.""" def _speech(self, text: str, voice: str, _: int = 0) -> bool: From 3b7e1014f64cc75cee8ba6b8f0d3da0d452299e4 Mon Sep 17 00:00:00 2001 From: Ian Date: Fri, 7 Jul 2023 12:08:47 +0800 Subject: [PATCH 24/34] Fix `--gpt3only` and `--gpt4only` for Azure (#4098) * Fix --gpt3only and --gpt4only * Fix and consolidate test_config.py::test_azure_config (x2) --------- Co-authored-by: Luke K (pr-0f3t) <2609441+lc0rp@users.noreply.github.com> Co-authored-by: Ryan Co-authored-by: Reinier van der Leer --- autogpt/config/config.py | 37 ++++++++++++++------ autogpt/configurator.py | 4 +-- azure.yaml.template | 2 +- tests/unit/test_config.py | 71 +++++++++++---------------------------- 4 files changed, 48 insertions(+), 66 deletions(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index fc76d084..05590eb6 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -13,7 +13,8 @@ from autogpt.core.configuration.schema import Configurable, SystemSettings from autogpt.plugins.plugins_config import PluginsConfig AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml") -from typing import Optional +GPT_4_MODEL = "gpt-4" +GPT_3_MODEL = "gpt-3.5-turbo" class Config(SystemSettings): @@ -87,20 +88,39 @@ class Config(SystemSettings): def get_azure_kwargs(self, model: str) -> dict[str, str]: """Get the kwargs for the Azure API.""" + + # Fix --gpt3only and --gpt4only in combination with Azure + fast_llm = ( + self.fast_llm + if not ( + self.fast_llm == self.smart_llm + and self.fast_llm.startswith(GPT_4_MODEL) + ) + else f"not_{self.fast_llm}" + ) + smart_llm = ( + self.smart_llm + if not ( + self.smart_llm == self.fast_llm + and self.smart_llm.startswith(GPT_3_MODEL) + ) + else f"not_{self.smart_llm}" + ) + deployment_id = { - self.fast_llm: self.azure_model_to_deployment_id_map.get( + fast_llm: self.azure_model_to_deployment_id_map.get( "fast_llm_deployment_id", self.azure_model_to_deployment_id_map.get( "fast_llm_model_deployment_id" # backwards compatibility ), ), - self.smart_llm: self.azure_model_to_deployment_id_map.get( + smart_llm: self.azure_model_to_deployment_id_map.get( "smart_llm_deployment_id", self.azure_model_to_deployment_id_map.get( "smart_llm_model_deployment_id" # backwards compatibility ), ), - "text-embedding-ada-002": self.azure_model_to_deployment_id_map.get( + self.embedding_model: self.azure_model_to_deployment_id_map.get( "embedding_model_deployment_id" ), }.get(model, None) @@ -110,7 +130,7 @@ class Config(SystemSettings): "api_base": self.openai_api_base, "api_version": self.openai_api_version, } - if model == "text-embedding-ada-002": + if model == self.embedding_model: kwargs["engine"] = deployment_id else: kwargs["deployment_id"] = deployment_id @@ -272,12 +292,7 @@ class ConfigBuilder(Configurable[Config]): if config_dict["use_azure"]: azure_config = cls.load_azure_config(config_dict["azure_config_file"]) - config_dict["openai_api_type"] = azure_config["openai_api_type"] - config_dict["openai_api_base"] = azure_config["openai_api_base"] - config_dict["openai_api_version"] = azure_config["openai_api_version"] - config_dict["azure_model_to_deployment_id_map"] = azure_config[ - "azure_model_to_deployment_id_map" - ] + config_dict.update(azure_config) elif os.getenv("OPENAI_API_BASE_URL"): config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL") diff --git a/autogpt/configurator.py b/autogpt/configurator.py index 9d22f092..2da5c58b 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -7,6 +7,7 @@ import click from colorama import Back, Fore, Style from autogpt import utils +from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL from autogpt.llm.utils import check_model from autogpt.logs import logger from autogpt.memory.vector import get_supported_memory_backends @@ -14,9 +15,6 @@ from autogpt.memory.vector import get_supported_memory_backends if TYPE_CHECKING: from autogpt.config import Config -GPT_4_MODEL = "gpt-4" -GPT_3_MODEL = "gpt-3.5-turbo" - def create_config( config: Config, diff --git a/azure.yaml.template b/azure.yaml.template index 6fe2af7a..685b7087 100644 --- a/azure.yaml.template +++ b/azure.yaml.template @@ -3,5 +3,5 @@ azure_api_base: your-base-url-for-azure azure_api_version: api-version-for-azure azure_model_map: fast_llm_deployment_id: gpt35-deployment-id-for-azure - smart_llm_deployment_id: gpt4-deployment-id-for-azure + smart_llm_deployment_id: gpt4-deployment-id-for-azure embedding_model_deployment_id: embedding-deployment-id-for-azure diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index d5c9d97d..b441aa94 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -146,18 +146,19 @@ def test_missing_azure_config(workspace: Workspace): assert azure_config["azure_model_to_deployment_id_map"] == {} -def test_azure_config(workspace: Workspace) -> None: - yaml_content = """ +def test_azure_config(config: Config, workspace: Workspace) -> None: + config_file = workspace.get_path("azure_config.yaml") + yaml_content = f""" azure_api_type: azure azure_api_base: https://dummy.openai.azure.com azure_api_version: 2023-06-01-preview azure_model_map: - fast_llm_deployment_id: gpt-3.5-turbo - smart_llm_deployment_id: gpt-4 + fast_llm_deployment_id: FAST-LLM_ID + smart_llm_deployment_id: SMART-LLM_ID embedding_model_deployment_id: embedding-deployment-id-for-azure """ - config_file = workspace.get_path("azure.yaml") config_file.write_text(yaml_content) + os.environ["USE_AZURE"] = "True" os.environ["AZURE_CONFIG_FILE"] = str(config_file) config = ConfigBuilder.build_config_from_env() @@ -166,53 +167,31 @@ azure_model_map: assert config.openai_api_base == "https://dummy.openai.azure.com" assert config.openai_api_version == "2023-06-01-preview" assert config.azure_model_to_deployment_id_map == { - "fast_llm_deployment_id": "gpt-3.5-turbo", - "smart_llm_deployment_id": "gpt-4", + "fast_llm_deployment_id": "FAST-LLM_ID", + "smart_llm_deployment_id": "SMART-LLM_ID", "embedding_model_deployment_id": "embedding-deployment-id-for-azure", } - del os.environ["USE_AZURE"] - del os.environ["AZURE_CONFIG_FILE"] + fast_llm = config.fast_llm + smart_llm = config.smart_llm + assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "FAST-LLM_ID" + assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "SMART-LLM_ID" + # Emulate --gpt4only + config.fast_llm = smart_llm + assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "SMART-LLM_ID" + assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "SMART-LLM_ID" -def test_azure_deployment_id_for_model(workspace: Workspace) -> None: - yaml_content = """ -azure_api_type: azure -azure_api_base: https://dummy.openai.azure.com -azure_api_version: 2023-06-01-preview -azure_model_map: - fast_llm_deployment_id: gpt-3.5-turbo - smart_llm_deployment_id: gpt-4 - embedding_model_deployment_id: embedding-deployment-id-for-azure -""" - config_file = workspace.get_path("azure.yaml") - config_file.write_text(yaml_content) - os.environ["USE_AZURE"] = "True" - os.environ["AZURE_CONFIG_FILE"] = str(config_file) - config = ConfigBuilder.build_config_from_env() - - config.fast_llm = "fast_llm" - config.smart_llm = "smart_llm" - - def _get_deployment_id(model): - kwargs = config.get_azure_kwargs(model) - return kwargs.get("deployment_id", kwargs.get("engine")) - - assert _get_deployment_id(config.fast_llm) == "gpt-3.5-turbo" - assert _get_deployment_id(config.smart_llm) == "gpt-4" - assert ( - _get_deployment_id("text-embedding-ada-002") - == "embedding-deployment-id-for-azure" - ) - assert _get_deployment_id("dummy") is None + # Emulate --gpt3only + config.fast_llm = config.smart_llm = fast_llm + assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "FAST-LLM_ID" + assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "FAST-LLM_ID" del os.environ["USE_AZURE"] del os.environ["AZURE_CONFIG_FILE"] def test_create_config_gpt4only(config: Config) -> None: - fast_llm = config.fast_llm - smart_llm = config.smart_llm with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models: mock_get_models.return_value = [{"id": GPT_4_MODEL}] create_config( @@ -234,14 +213,8 @@ def test_create_config_gpt4only(config: Config) -> None: assert config.fast_llm == GPT_4_MODEL assert config.smart_llm == GPT_4_MODEL - # Reset config - config.fast_llm = fast_llm - config.smart_llm = smart_llm - def test_create_config_gpt3only(config: Config) -> None: - fast_llm = config.fast_llm - smart_llm = config.smart_llm with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models: mock_get_models.return_value = [{"id": GPT_3_MODEL}] create_config( @@ -262,7 +235,3 @@ def test_create_config_gpt3only(config: Config) -> None: ) assert config.fast_llm == GPT_3_MODEL assert config.smart_llm == GPT_3_MODEL - - # Reset config - config.fast_llm = fast_llm - config.smart_llm = smart_llm From 35b072f7e8862bec4990c47e6cbb1339c93c5e7c Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 7 Jul 2023 06:53:44 +0200 Subject: [PATCH 25/34] Fix PLAIN_OUTPUT for normal execution (#4904) --- autogpt/logs.py | 18 ++++++++++++++---- autogpt/main.py | 1 - tests/conftest.py | 13 ------------- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/autogpt/logs.py b/autogpt/logs.py index 535cce32..7ff80542 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -7,7 +7,7 @@ import random import re import time from logging import LogRecord -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Optional from colorama import Fore, Style @@ -85,16 +85,26 @@ class Logger(metaclass=Singleton): self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) - self.speak_mode = False - self.config = None + self._config: Optional[Config] = None self.chat_plugins = [] + @property + def config(self) -> Config | None: + return self._config + + @config.setter + def config(self, config: Config): + self._config = config + if config.plain_output: + self.typing_logger.removeHandler(self.typing_console_handler) + self.typing_logger.addHandler(self.console_handler) + def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): from autogpt.speech import say_text - if speak_text and self.speak_mode: + if speak_text and self.config and self.config.speak_mode: say_text(f"{title}. {content}", self.config) for plugin in self.chat_plugins: diff --git a/autogpt/main.py b/autogpt/main.py index 26e8e331..73d5ea98 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -55,7 +55,6 @@ def run_auto_gpt( ): # Configure logging before we do anything else. logger.set_level(logging.DEBUG if debug else logging.INFO) - logger.speak_mode = speak config = ConfigBuilder.build_config_from_env() # HACK: This is a hack to allow the config into the logger without having to pass it around everywhere diff --git a/tests/conftest.py b/tests/conftest.py index f2ca5904..14f6af78 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,7 +10,6 @@ from autogpt.agent.agent import Agent from autogpt.config import AIConfig, Config, ConfigBuilder from autogpt.config.ai_config import AIConfig from autogpt.llm.api_manager import ApiManager -from autogpt.logs import TypingConsoleHandler from autogpt.memory.vector import get_memory from autogpt.models.command_registry import CommandRegistry from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT @@ -81,18 +80,6 @@ def api_manager() -> ApiManager: return ApiManager() -@pytest.fixture(autouse=True) -def patch_emit(monkeypatch): - # convert plain_output to a boolean - - if bool(os.environ.get("PLAIN_OUTPUT")): - - def quick_emit(self, record: str): - print(self.format(record)) - - monkeypatch.setattr(TypingConsoleHandler, "emit", quick_emit) - - @pytest.fixture def agent(config: Config, workspace: Workspace) -> Agent: ai_config = AIConfig( From 0f538f6e2c434ab035644e7377ff3ac9e09c6389 Mon Sep 17 00:00:00 2001 From: Venkat Teja Date: Fri, 7 Jul 2023 10:26:29 +0530 Subject: [PATCH 26/34] Fix `summarize_text` usages and self-calls (#4863) * Fix summarize_text function usage * Update memory_item.py --- autogpt/memory/vector/memory_item.py | 1 + autogpt/processing/text.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/autogpt/memory/vector/memory_item.py b/autogpt/memory/vector/memory_item.py index cf00cc87..587a915b 100644 --- a/autogpt/memory/vector/memory_item.py +++ b/autogpt/memory/vector/memory_item.py @@ -74,6 +74,7 @@ class MemoryItem: if len(chunks) == 1 else summarize_text( "\n\n".join(chunk_summaries), + config, instruction=how_to_summarize, question=question_for_summary, )[0] diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 6eecbde9..ddb64df1 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -131,12 +131,12 @@ def summarize_text( logger.info( f"Summarizing chunk {i + 1} / {len(chunks)} of length {chunk_length} tokens" ) - summary, _ = summarize_text(chunk, instruction) + summary, _ = summarize_text(chunk, config, instruction) summaries.append(summary) logger.info(f"Summarized {len(chunks)} chunks") - summary, _ = summarize_text("\n\n".join(summaries)) + summary, _ = summarize_text("\n\n".join(summaries), config) return summary.strip(), [ (summaries[i], chunks[i][0]) for i in range(0, len(chunks)) From 9a2a9f743976ba4b99cfcc451bee645ce347049a Mon Sep 17 00:00:00 2001 From: sagarishere <5121817+sagarishere@users.noreply.github.com> Date: Fri, 7 Jul 2023 08:39:36 +0300 Subject: [PATCH 27/34] Add docstring to function `get_memory()` in `autogpt.memory.vector` (#1296) * Document function get_memory in ./scripts/memory/init.py * Update get_memory docstring to current format --------- Co-authored-by: Reinier van der Leer --- autogpt/memory/vector/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/autogpt/memory/vector/__init__.py b/autogpt/memory/vector/__init__.py index 72abbb00..414a2800 100644 --- a/autogpt/memory/vector/__init__.py +++ b/autogpt/memory/vector/__init__.py @@ -40,6 +40,21 @@ supported_memory = ["json_file", "no_memory"] def get_memory(config: Config) -> VectorMemory: + """Returns a memory object corresponding to the memory backend specified in the config. + + The type of memory object returned depends on the value of the `memory_backend` + attribute in the configuration. E.g. if `memory_backend` is set to "pinecone", a + `PineconeMemory` object is returned. If it is set to "redis", a `RedisMemory` + object is returned. + By default, a `JSONFileMemory` object is returned. + + Params: + config: A configuration object that contains information about the memory backend + to be used and other relevant parameters. + + Returns: + VectorMemory: an instance of a memory object based on the configuration provided. + """ memory = None match config.memory_backend: From 57315bddfb499d536b6dbdf00c1db4f740b31b7f Mon Sep 17 00:00:00 2001 From: James Collins Date: Fri, 7 Jul 2023 19:42:26 -0700 Subject: [PATCH 28/34] Bugfix/broken azure config (#4912) --- autogpt/configurator.py | 11 +++++++---- autogpt/llm/api_manager.py | 4 ++-- autogpt/llm/utils/__init__.py | 14 ++++++++++++-- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/autogpt/configurator.py b/autogpt/configurator.py index 2da5c58b..fa6b4c58 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -88,15 +88,18 @@ def create_config( # --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config config.fast_llm = GPT_3_MODEL config.smart_llm = GPT_3_MODEL - - elif gpt4only and check_model(GPT_4_MODEL, model_type="smart_llm") == GPT_4_MODEL: + elif ( + gpt4only + and check_model(GPT_4_MODEL, model_type="smart_llm", config=config) + == GPT_4_MODEL + ): logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") # --gpt4only should always use gpt-4, despite user's SMART_LLM config config.fast_llm = GPT_4_MODEL config.smart_llm = GPT_4_MODEL else: - config.fast_llm = check_model(config.fast_llm, "fast_llm") - config.smart_llm = check_model(config.smart_llm, "smart_llm") + config.fast_llm = check_model(config.fast_llm, "fast_llm", config=config) + config.smart_llm = check_model(config.smart_llm, "smart_llm", config=config) if memory_type: supported_memory = get_supported_memory_backends() diff --git a/autogpt/llm/api_manager.py b/autogpt/llm/api_manager.py index 4e2aba9d..04e67db6 100644 --- a/autogpt/llm/api_manager.py +++ b/autogpt/llm/api_manager.py @@ -95,7 +95,7 @@ class ApiManager(metaclass=Singleton): """ return self.total_budget - def get_models(self) -> List[Model]: + def get_models(self, **openai_credentials) -> List[Model]: """ Get list of available GPT models. @@ -104,7 +104,7 @@ class ApiManager(metaclass=Singleton): """ if self.models is None: - all_models = openai.Model.list()["data"] + all_models = openai.Model.list(**openai_credentials)["data"] self.models = [model for model in all_models if "gpt" in model["id"]] return self.models diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index a3f53c33..3c2835b7 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -173,10 +173,20 @@ def create_chat_completion( ) -def check_model(model_name: str, model_type: Literal["smart_llm", "fast_llm"]) -> str: +def check_model( + model_name: str, + model_type: Literal["smart_llm", "fast_llm"], + config: Config, +) -> str: """Check if model is available for use. If not, return gpt-3.5-turbo.""" + openai_credentials = { + "api_key": config.openai_api_key, + } + if config.use_azure: + openai_credentials.update(config.get_azure_kwargs(model_name)) + api_manager = ApiManager() - models = api_manager.get_models() + models = api_manager.get_models(**openai_credentials) if any(model_name in m["id"] for m in models): return model_name From 8bce02736b7fcc3ef14809fa935c9f147ccc2e0a Mon Sep 17 00:00:00 2001 From: James Collins Date: Fri, 7 Jul 2023 19:51:01 -0700 Subject: [PATCH 29/34] Fix bugs running the core cli-app (#4905) Co-authored-by: Luke <2609441+lc0rp@users.noreply.github.com> --- autogpt/core/README.md | 14 +++++++++++--- autogpt/core/ability/schema.py | 4 ++++ autogpt/core/agent/simple.py | 9 +-------- autogpt/core/planning/strategies/next_ability.py | 4 ++-- autogpt/core/runner/cli_app/cli.py | 2 -- autogpt/core/runner/cli_app/main.py | 4 +++- requirements.txt | 1 + 7 files changed, 22 insertions(+), 16 deletions(-) diff --git a/autogpt/core/README.md b/autogpt/core/README.md index 49a87a09..f7bdf2d7 100644 --- a/autogpt/core/README.md +++ b/autogpt/core/README.md @@ -11,18 +11,26 @@ The first app is a straight CLI application. I have not done anything yet to po - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py) -To run, you first need a settings file. Run +Auto-GPT must be installed in your python environment to run this application. To do so, run + +``` +pip install -e REPOSITORY_ROOT +``` + +where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. + +You'll then need a settings file. Run ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` -where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. +This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. You can then run Auto-GPT with ``` -python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings +python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run ``` to launch the interaction loop. diff --git a/autogpt/core/ability/schema.py b/autogpt/core/ability/schema.py index 5bba5b7f..3d20a7b9 100644 --- a/autogpt/core/ability/schema.py +++ b/autogpt/core/ability/schema.py @@ -24,3 +24,7 @@ class AbilityResult(BaseModel): success: bool message: str new_knowledge: Knowledge = None + + def summary(self) -> str: + kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items()) + return f"{self.ability_name}({kwargs}): {self.message}" diff --git a/autogpt/core/agent/simple.py b/autogpt/core/agent/simple.py index bb986b9f..de99c135 100644 --- a/autogpt/core/agent/simple.py +++ b/autogpt/core/agent/simple.py @@ -26,7 +26,6 @@ from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings class AgentSystems(SystemConfiguration): ability_registry: PluginLocation memory: PluginLocation - embedding_model: PluginLocation openai_provider: PluginLocation planning: PluginLocation workspace: PluginLocation @@ -148,12 +147,6 @@ class SimpleAgent(Agent, Configurable): agent_settings, logger, ) - agent_args["embedding_model"] = cls._get_system_instance( - "embedding_model", - agent_settings, - logger, - model_providers={"openai": agent_args["openai_provider"]}, - ) agent_args["planning"] = cls._get_system_instance( "planning", agent_settings, @@ -226,7 +219,7 @@ class SimpleAgent(Agent, Configurable): self._current_task = None self._next_ability = None - return ability_response + return ability_response.dict() else: raise NotImplementedError diff --git a/autogpt/core/planning/strategies/next_ability.py b/autogpt/core/planning/strategies/next_ability.py index 70ea458a..dff310c3 100644 --- a/autogpt/core/planning/strategies/next_ability.py +++ b/autogpt/core/planning/strategies/next_ability.py @@ -120,12 +120,12 @@ class NextAbility(PromptStrategy): ) template_kwargs["additional_info"] = to_numbered_list( [memory.summary() for memory in task.context.memories] - + [info.summary() for info in task.context.supplementary_info], + + [info for info in task.context.supplementary_info], no_items_response="There is no additional information available at this time.", **template_kwargs, ) template_kwargs["user_input"] = to_numbered_list( - [user_input.summary() for user_input in task.context.user_input], + [user_input for user_input in task.context.user_input], no_items_response="There are no additional considerations at this time.", **template_kwargs, ) diff --git a/autogpt/core/runner/cli_app/cli.py b/autogpt/core/runner/cli_app/cli.py index 8d33c560..56fca975 100644 --- a/autogpt/core/runner/cli_app/cli.py +++ b/autogpt/core/runner/cli_app/cli.py @@ -7,7 +7,6 @@ from autogpt.core.runner.cli_app.main import run_auto_gpt from autogpt.core.runner.client_lib.shared_click_commands import ( DEFAULT_SETTINGS_FILE, make_settings, - status, ) from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions @@ -19,7 +18,6 @@ def autogpt(): autogpt.add_command(make_settings) -autogpt.add_command(status) @autogpt.command() diff --git a/autogpt/core/runner/cli_app/main.py b/autogpt/core/runner/cli_app/main.py index a8ce6d7f..60af24be 100644 --- a/autogpt/core/runner/cli_app/main.py +++ b/autogpt/core/runner/cli_app/main.py @@ -102,7 +102,9 @@ def parse_next_ability(current_task, next_ability: dict) -> str: def parse_ability_result(ability_result) -> str: + parsed_response = f"Ability: {ability_result['ability_name']}\n" + parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n" parsed_response = f"Ability Result: {ability_result['success']}\n" parsed_response += f"Message: {ability_result['message']}\n" - parsed_response += f"Data: {ability_result['data']}\n" + parsed_response += f"Data: {ability_result['new_knowledge']}\n" return parsed_response diff --git a/requirements.txt b/requirements.txt index 30ae8399..47aa08a6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,6 +29,7 @@ spacy>=3.0.0,<4.0.0 en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl prompt_toolkit>=3.0.38 pydantic +inflection # web server fastapi From 8b8b3a2cdd3ef6df5bca66f9f1d373661d6ab470 Mon Sep 17 00:00:00 2001 From: lengweiping1983 Date: Sat, 8 Jul 2023 23:29:55 +0800 Subject: [PATCH 30/34] Improve command system; add aliases for commands (#2635) * Command name supports multiple names * Separate CommandRegistry.commands and .command_aliases * Update test_commands.py * Add __contains__ operator to CommandRegistry * Update error message for unknown commands --------- Co-authored-by: Reinier van der Leer --- autogpt/agent/agent.py | 4 +- autogpt/app.py | 40 +--- autogpt/command_decorator.py | 2 + autogpt/commands/file_operations.py | 1 + autogpt/commands/web_search.py | 4 +- autogpt/main.py | 2 +- autogpt/models/command.py | 2 + autogpt/models/command_registry.py | 42 ++-- autogpt/prompts/generator.py | 54 +++-- tests/unit/test_commands.py | 348 +++++++++++++++------------- tests/unit/test_execute_command.py | 7 +- tests/unit/test_prompt_generator.py | 65 +----- 12 files changed, 278 insertions(+), 293 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index fd476e56..01a1b133 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -85,7 +85,7 @@ class Agent: def start_interaction_loop(self): # Avoid circular imports - from autogpt.app import execute_command, get_command + from autogpt.app import execute_command, extract_command # Interaction Loop self.cycle_count = 0 @@ -161,7 +161,7 @@ class Agent: print_assistant_thoughts( self.ai_name, assistant_reply_json, self.config ) - command_name, arguments = get_command( + command_name, arguments = extract_command( assistant_reply_json, assistant_reply, self.config ) if self.config.speak_mode: diff --git a/autogpt/app.py b/autogpt/app.py index 06db7938..ea5072f8 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -23,7 +23,7 @@ def is_valid_int(value: str) -> bool: return False -def get_command( +def extract_command( assistant_reply_json: Dict, assistant_reply: ChatModelResponse, config: Config ): """Parse the response and return the command name and arguments @@ -78,21 +78,6 @@ def get_command( return "Error:", str(e) -def map_command_synonyms(command_name: str): - """Takes the original command name given by the AI, and checks if the - string matches a list of common/known hallucinations - """ - synonyms = [ - ("write_file", "write_to_file"), - ("create_file", "write_to_file"), - ("search", "google"), - ] - for seen_command, actual_command_name in synonyms: - if command_name == seen_command: - return actual_command_name - return command_name - - def execute_command( command_name: str, arguments: dict[str, str], @@ -109,28 +94,21 @@ def execute_command( str: The result of the command """ try: - cmd = agent.command_registry.commands.get(command_name) + # Execute a native command with the same name or alias, if it exists + if command := agent.command_registry.get_command(command_name): + return command(**arguments, agent=agent) - # If the command is found, call it with the provided arguments - if cmd: - return cmd(**arguments, agent=agent) - - # TODO: Remove commands below after they are moved to the command registry. - command_name = map_command_synonyms(command_name.lower()) - - # TODO: Change these to take in a file rather than pasted code, if - # non-file is given, return instructions "Input should be a python - # filepath, write your code to file and try again + # Handle non-native commands (e.g. from plugins) for command in agent.ai_config.prompt_generator.commands: if ( command_name == command["label"].lower() or command_name == command["name"].lower() ): return command["function"](**arguments) - return ( - f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" - " list for available commands and only respond in the specified JSON" - " format." + + raise RuntimeError( + f"Cannot execute '{command_name}': unknown command." + " Do not try to use this command again." ) except Exception as e: return f"Error: {str(e)}" diff --git a/autogpt/command_decorator.py b/autogpt/command_decorator.py index f179f978..d082d9bf 100644 --- a/autogpt/command_decorator.py +++ b/autogpt/command_decorator.py @@ -20,6 +20,7 @@ def command( parameters: dict[str, CommandParameterSpec], enabled: bool | Callable[[Config], bool] = True, disabled_reason: Optional[str] = None, + aliases: list[str] = [], ) -> Callable[..., Any]: """The command decorator is used to create Command objects from ordinary functions.""" @@ -40,6 +41,7 @@ def command( parameters=typed_parameters, enabled=enabled, disabled_reason=disabled_reason, + aliases=aliases, ) @functools.wraps(func) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index ca248743..1d044822 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -189,6 +189,7 @@ def ingest_file( "required": True, }, }, + aliases=["write_file", "create_file"], ) def write_to_file(filename: str, text: str, agent: Agent) -> str: """Write text to a file diff --git a/autogpt/commands/web_search.py b/autogpt/commands/web_search.py index 5af81058..d47d680b 100644 --- a/autogpt/commands/web_search.py +++ b/autogpt/commands/web_search.py @@ -23,6 +23,7 @@ DUCKDUCKGO_MAX_ATTEMPTS = 3 "required": True, } }, + aliases=["search"], ) def web_search(query: str, agent: Agent, num_results: int = 8) -> str: """Return the results of a Google search @@ -67,6 +68,7 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str: lambda config: bool(config.google_api_key) and bool(config.google_custom_search_engine_id), "Configure google_api_key and custom_search_engine_id.", + aliases=["search"], ) def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]: """Return the results of a Google search using the official Google API @@ -124,7 +126,7 @@ def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]: def safe_google_results(results: str | list) -> str: """ - Return the results of a google search in a safe format. + Return the results of a Google search in a safe format. Args: results (str | list): The search results. diff --git a/autogpt/main.py b/autogpt/main.py index 73d5ea98..08ac4b40 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -154,7 +154,7 @@ def run_auto_gpt( incompatible_commands.append(command) for command in incompatible_commands: - command_registry.unregister(command.name) + command_registry.unregister(command) logger.debug( f"Unregistering incompatible command: {command.name}, " f"reason - {command.disabled_reason or 'Disabled by current config.'}" diff --git a/autogpt/models/command.py b/autogpt/models/command.py index 92cf414a..61469786 100644 --- a/autogpt/models/command.py +++ b/autogpt/models/command.py @@ -22,6 +22,7 @@ class Command: parameters: list[CommandParameter], enabled: bool | Callable[[Config], bool] = True, disabled_reason: Optional[str] = None, + aliases: list[str] = [], ): self.name = name self.description = description @@ -29,6 +30,7 @@ class Command: self.parameters = parameters self.enabled = enabled self.disabled_reason = disabled_reason + self.aliases = aliases def __call__(self, *args, **kwargs) -> Any: if hasattr(kwargs, "config") and callable(self.enabled): diff --git a/autogpt/models/command_registry.py b/autogpt/models/command_registry.py index 96418d26..59d3ae77 100644 --- a/autogpt/models/command_registry.py +++ b/autogpt/models/command_registry.py @@ -1,6 +1,6 @@ import importlib import inspect -from typing import Any, Callable +from typing import Any from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER from autogpt.logs import logger @@ -15,10 +15,11 @@ class CommandRegistry: directory. """ - commands: dict[str, Command] + commands: dict[str, Command] = {} + commands_aliases: dict[str, Command] = {} - def __init__(self): - self.commands = {} + def __contains__(self, command_name: str): + return command_name in self.commands or command_name in self.commands_aliases def _import_module(self, module_name: str) -> Any: return importlib.import_module(module_name) @@ -33,11 +34,21 @@ class CommandRegistry: ) self.commands[cmd.name] = cmd - def unregister(self, command_name: str): - if command_name in self.commands: - del self.commands[command_name] + if cmd.name in self.commands_aliases: + logger.warn( + f"Command '{cmd.name}' will overwrite alias with the same name of " + f"'{self.commands_aliases[cmd.name]}'!" + ) + for alias in cmd.aliases: + self.commands_aliases[alias] = cmd + + def unregister(self, command: Command) -> None: + if command.name in self.commands: + del self.commands[command.name] + for alias in command.aliases: + del self.commands_aliases[alias] else: - raise KeyError(f"Command '{command_name}' not found in registry.") + raise KeyError(f"Command '{command.name}' not found in registry.") def reload_commands(self) -> None: """Reloads all loaded command plugins.""" @@ -48,14 +59,17 @@ class CommandRegistry: if hasattr(reloaded_module, "register"): reloaded_module.register(self) - def get_command(self, name: str) -> Callable[..., Any]: - return self.commands[name] + def get_command(self, name: str) -> Command | None: + if name in self.commands: + return self.commands[name] + + if name in self.commands_aliases: + return self.commands_aliases[name] def call(self, command_name: str, **kwargs) -> Any: - if command_name not in self.commands: - raise KeyError(f"Command '{command_name}' not found in registry.") - command = self.commands[command_name] - return command(**kwargs) + if command := self.get_command(command_name): + return command(**kwargs) + raise KeyError(f"Command '{command_name}' not found in registry") def command_prompt(self) -> str: """ diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py index 3fff9536..bc836f30 100644 --- a/autogpt/prompts/generator.py +++ b/autogpt/prompts/generator.py @@ -1,6 +1,8 @@ """ A module for generating custom prompt strings.""" +from __future__ import annotations + import json -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypedDict from autogpt.config import Config from autogpt.json_utils.utilities import llm_response_schema @@ -15,19 +17,33 @@ class PromptGenerator: resources, and performance evaluations. """ - def __init__(self) -> None: - """ - Initialize the PromptGenerator object with empty lists of constraints, - commands, resources, and performance evaluations. - """ + class Command(TypedDict): + label: str + name: str + params: dict[str, str] + function: Optional[Callable] + + constraints: list[str] + commands: list[Command] + resources: list[str] + performance_evaluation: list[str] + command_registry: CommandRegistry | None + + # TODO: replace with AIConfig + name: str + role: str + goals: list[str] + + def __init__(self): self.constraints = [] self.commands = [] self.resources = [] self.performance_evaluation = [] - self.goals = [] - self.command_registry: CommandRegistry | None = None + self.command_registry = None + self.name = "Bob" self.role = "AI" + self.goals = [] def add_constraint(self, constraint: str) -> None: """ @@ -42,29 +58,29 @@ class PromptGenerator: self, command_label: str, command_name: str, - args=None, + params: dict[str, str] = {}, function: Optional[Callable] = None, ) -> None: """ Add a command to the commands list with a label, name, and optional arguments. + *Should only be used by plugins.* Native commands should be added + directly to the CommandRegistry. + Args: command_label (str): The label of the command. command_name (str): The name of the command. - args (dict, optional): A dictionary containing argument names and their + params (dict, optional): A dictionary containing argument names and their values. Defaults to None. function (callable, optional): A callable function to be called when the command is executed. Defaults to None. """ - if args is None: - args = {} + command_params = {name: type for name, type in params.items()} - command_args = {arg_key: arg_value for arg_key, arg_value in args.items()} - - command = { + command: PromptGenerator.Command = { "label": command_label, "name": command_name, - "args": command_args, + "params": command_params, "function": function, } @@ -80,10 +96,10 @@ class PromptGenerator: Returns: str: The formatted command string. """ - args_string = ", ".join( - f'"{key}": "{value}"' for key, value in command["args"].items() + params_string = ", ".join( + f'"{key}": "{value}"' for key, value in command["params"].items() ) - return f'{command["label"]}: "{command["name"]}", args: {args_string}' + return f'{command["label"]}: "{command["name"]}", params: {params_string}' def add_resource(self, resource: str) -> None: """ diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index 9b52cead..2cdf8701 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -14,196 +14,218 @@ PARAMETERS = [ ] -class TestCommand: - """Test cases for the Command class.""" - - @staticmethod - def example_command_method(arg1: int, arg2: str) -> str: - """Example function for testing the Command class.""" - # This function is static because it is not used by any other test cases. - return f"{arg1} - {arg2}" - - def test_command_creation(self): - """Test that a Command object can be created with the correct attributes.""" - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=PARAMETERS, - ) - - assert cmd.name == "example" - assert cmd.description == "Example command" - assert cmd.method == self.example_command_method - assert ( - str(cmd) - == "example: Example command, params: (arg1: int, arg2: Optional[str])" - ) - - def test_command_call(self): - """Test that Command(*args) calls and returns the result of method(*args).""" - # Create a Command object with the example_command_method. - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=[ - CommandParameter( - name="prompt", - type="string", - description="The prompt used to generate the image", - required=True, - ), - ], - ) - result = cmd(arg1=1, arg2="test") - assert result == "1 - test" - - def test_command_call_with_invalid_arguments(self): - """Test that calling a Command object with invalid arguments raises a TypeError.""" - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=PARAMETERS, - ) - with pytest.raises(TypeError): - cmd(arg1="invalid", does_not_exist="test") +def example_command_method(arg1: int, arg2: str) -> str: + """Example function for testing the Command class.""" + # This function is static because it is not used by any other test cases. + return f"{arg1} - {arg2}" -class TestCommandRegistry: - @staticmethod - def example_command_method(arg1: int, arg2: str) -> str: - return f"{arg1} - {arg2}" +def test_command_creation(): + """Test that a Command object can be created with the correct attributes.""" + cmd = Command( + name="example", + description="Example command", + method=example_command_method, + parameters=PARAMETERS, + ) - def test_register_command(self): - """Test that a command can be registered to the registry.""" - registry = CommandRegistry() - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=PARAMETERS, - ) + assert cmd.name == "example" + assert cmd.description == "Example command" + assert cmd.method == example_command_method + assert ( + str(cmd) == "example: Example command, params: (arg1: int, arg2: Optional[str])" + ) - registry.register(cmd) - assert cmd.name in registry.commands - assert registry.commands[cmd.name] == cmd +@pytest.fixture +def example_command(): + yield Command( + name="example", + description="Example command", + method=example_command_method, + parameters=PARAMETERS, + ) - def test_unregister_command(self): - """Test that a command can be unregistered from the registry.""" - registry = CommandRegistry() - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=PARAMETERS, - ) - registry.register(cmd) - registry.unregister(cmd.name) +def test_command_call(example_command: Command): + """Test that Command(*args) calls and returns the result of method(*args).""" + result = example_command(arg1=1, arg2="test") + assert result == "1 - test" - assert cmd.name not in registry.commands - def test_get_command(self): - """Test that a command can be retrieved from the registry.""" - registry = CommandRegistry() - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=PARAMETERS, - ) +def test_command_call_with_invalid_arguments(example_command: Command): + """Test that calling a Command object with invalid arguments raises a TypeError.""" + with pytest.raises(TypeError): + example_command(arg1="invalid", does_not_exist="test") - registry.register(cmd) - retrieved_cmd = registry.get_command(cmd.name) - assert retrieved_cmd == cmd +def test_register_command(example_command: Command): + """Test that a command can be registered to the registry.""" + registry = CommandRegistry() - def test_get_nonexistent_command(self): - """Test that attempting to get a nonexistent command raises a KeyError.""" - registry = CommandRegistry() + registry.register(example_command) - with pytest.raises(KeyError): - registry.get_command("nonexistent_command") + assert registry.get_command(example_command.name) == example_command + assert len(registry.commands) == 1 - def test_call_command(self): - """Test that a command can be called through the registry.""" - registry = CommandRegistry() - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=PARAMETERS, - ) - registry.register(cmd) - result = registry.call("example", arg1=1, arg2="test") +def test_unregister_command(example_command: Command): + """Test that a command can be unregistered from the registry.""" + registry = CommandRegistry() - assert result == "1 - test" + registry.register(example_command) + registry.unregister(example_command) - def test_call_nonexistent_command(self): - """Test that attempting to call a nonexistent command raises a KeyError.""" - registry = CommandRegistry() + assert len(registry.commands) == 0 + assert example_command.name not in registry - with pytest.raises(KeyError): - registry.call("nonexistent_command", arg1=1, arg2="test") - def test_get_command_prompt(self): - """Test that the command prompt is correctly formatted.""" - registry = CommandRegistry() - cmd = Command( - name="example", - description="Example command", - method=self.example_command_method, - parameters=PARAMETERS, - ) +@pytest.fixture +def example_command_with_aliases(example_command: Command): + example_command.aliases = ["example_alias", "example_alias_2"] + return example_command - registry.register(cmd) - command_prompt = registry.command_prompt() - assert f"(arg1: int, arg2: Optional[str])" in command_prompt +def test_register_command_aliases(example_command_with_aliases: Command): + """Test that a command can be registered to the registry.""" + registry = CommandRegistry() + command = example_command_with_aliases - def test_import_mock_commands_module(self): - """Test that the registry can import a module with mock command plugins.""" - registry = CommandRegistry() - mock_commands_module = "tests.mocks.mock_commands" + registry.register(command) - registry.import_commands(mock_commands_module) + assert command.name in registry + assert registry.get_command(command.name) == command + for alias in command.aliases: + assert registry.get_command(alias) == command + assert len(registry.commands) == 1 - assert "function_based" in registry.commands - assert registry.commands["function_based"].name == "function_based" - assert ( - registry.commands["function_based"].description - == "Function-based test command" - ) - def test_import_temp_command_file_module(self, tmp_path): - """ - Test that the registry can import a command plugins module from a temp file. - Args: - tmp_path (pathlib.Path): Path to a temporary directory. - """ - registry = CommandRegistry() +def test_unregister_command_aliases(example_command_with_aliases: Command): + """Test that a command can be unregistered from the registry.""" + registry = CommandRegistry() + command = example_command_with_aliases - # Create a temp command file - src = Path(os.getcwd()) / "tests/mocks/mock_commands.py" - temp_commands_file = tmp_path / "mock_commands.py" - shutil.copyfile(src, temp_commands_file) + registry.register(command) + registry.unregister(command) - # Add the temp directory to sys.path to make the module importable - sys.path.append(str(tmp_path)) + assert len(registry.commands) == 0 + assert command.name not in registry + for alias in command.aliases: + assert alias not in registry - temp_commands_module = "mock_commands" - registry.import_commands(temp_commands_module) - # Remove the temp directory from sys.path - sys.path.remove(str(tmp_path)) +def test_command_in_registry(example_command_with_aliases: Command): + """Test that `command_name in registry` works.""" + registry = CommandRegistry() + command = example_command_with_aliases - assert "function_based" in registry.commands - assert registry.commands["function_based"].name == "function_based" - assert ( - registry.commands["function_based"].description - == "Function-based test command" - ) + assert command.name not in registry + assert "nonexistent_command" not in registry + + registry.register(command) + + assert command.name in registry + assert "nonexistent_command" not in registry + for alias in command.aliases: + assert alias in registry + + +def test_get_command(example_command: Command): + """Test that a command can be retrieved from the registry.""" + registry = CommandRegistry() + + registry.register(example_command) + retrieved_cmd = registry.get_command(example_command.name) + + assert retrieved_cmd == example_command + + +def test_get_nonexistent_command(): + """Test that attempting to get a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + assert registry.get_command("nonexistent_command") is None + assert "nonexistent_command" not in registry + + +def test_call_command(): + """Test that a command can be called through the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=example_command_method, + parameters=PARAMETERS, + ) + + registry.register(cmd) + result = registry.call("example", arg1=1, arg2="test") + + assert result == "1 - test" + + +def test_call_nonexistent_command(): + """Test that attempting to call a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.call("nonexistent_command", arg1=1, arg2="test") + + +def test_get_command_prompt(): + """Test that the command prompt is correctly formatted.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=example_command_method, + parameters=PARAMETERS, + ) + + registry.register(cmd) + command_prompt = registry.command_prompt() + + assert f"(arg1: int, arg2: Optional[str])" in command_prompt + + +def test_import_mock_commands_module(): + """Test that the registry can import a module with mock command plugins.""" + registry = CommandRegistry() + mock_commands_module = "tests.mocks.mock_commands" + + registry.import_commands(mock_commands_module) + + assert "function_based" in registry + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description == "Function-based test command" + ) + + +def test_import_temp_command_file_module(tmp_path: Path): + """ + Test that the registry can import a command plugins module from a temp file. + Args: + tmp_path (pathlib.Path): Path to a temporary directory. + """ + registry = CommandRegistry() + + # Create a temp command file + src = Path(os.getcwd()) / "tests/mocks/mock_commands.py" + temp_commands_file = tmp_path / "mock_commands.py" + shutil.copyfile(src, temp_commands_file) + + # Add the temp directory to sys.path to make the module importable + sys.path.append(str(tmp_path)) + + temp_commands_module = "mock_commands" + registry.import_commands(temp_commands_module) + + # Remove the temp directory from sys.path + sys.path.remove(str(tmp_path)) + + assert "function_based" in registry + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description == "Function-based test command" + ) diff --git a/tests/unit/test_execute_command.py b/tests/unit/test_execute_command.py index fb3f043a..21fb0b66 100644 --- a/tests/unit/test_execute_command.py +++ b/tests/unit/test_execute_command.py @@ -8,17 +8,16 @@ def check_plan(): def test_execute_command_plugin(agent: Agent): """Test that executing a command that came from a plugin works as expected""" + command_name = "check_plan" agent.ai_config.prompt_generator.add_command( - "check_plan", + command_name, "Read the plan.md with the next goals to achieve", {}, check_plan, ) - command_name = "check_plan" - arguments = {} command_result = execute_command( command_name=command_name, - arguments=arguments, + arguments={}, agent=agent, ) assert command_result == "hi" diff --git a/tests/unit/test_prompt_generator.py b/tests/unit/test_prompt_generator.py index c5ffaf78..44147e6d 100644 --- a/tests/unit/test_prompt_generator.py +++ b/tests/unit/test_prompt_generator.py @@ -17,13 +17,13 @@ def test_add_command(): """ command_label = "Command Label" command_name = "command_name" - args = {"arg1": "value1", "arg2": "value2"} + params = {"arg1": "value1", "arg2": "value2"} generator = PromptGenerator() - generator.add_command(command_label, command_name, args) + generator.add_command(command_label, command_name, params) command = { "label": command_label, "name": command_name, - "args": args, + "params": params, "function": None, } assert command in generator.commands @@ -62,12 +62,12 @@ def test_generate_prompt_string(config): { "label": "Command1", "name": "command_name1", - "args": {"arg1": "value1"}, + "params": {"arg1": "value1"}, }, { "label": "Command2", "name": "command_name2", - "args": {}, + "params": {}, }, ] resources = ["Resource1", "Resource2"] @@ -78,7 +78,7 @@ def test_generate_prompt_string(config): for constraint in constraints: generator.add_constraint(constraint) for command in commands: - generator.add_command(command["label"], command["name"], command["args"]) + generator.add_command(command["label"], command["name"], command["params"]) for resource in resources: generator.add_resource(resource) for evaluation in evaluations: @@ -93,58 +93,7 @@ def test_generate_prompt_string(config): assert constraint in prompt_string for command in commands: assert command["name"] in prompt_string - for key, value in command["args"].items(): - assert f'"{key}": "{value}"' in prompt_string - for resource in resources: - assert resource in prompt_string - for evaluation in evaluations: - assert evaluation in prompt_string - - -def test_generate_prompt_string(config): - """ - Test if the generate_prompt_string() method generates a prompt string with all the added - constraints, commands, resources, and evaluations. - """ - - # Define the test data - constraints = ["Constraint1", "Constraint2"] - commands = [ - { - "label": "Command1", - "name": "command_name1", - "args": {"arg1": "value1"}, - }, - { - "label": "Command2", - "name": "command_name2", - "args": {}, - }, - ] - resources = ["Resource1", "Resource2"] - evaluations = ["Evaluation1", "Evaluation2"] - - # Add test data to the generator - generator = PromptGenerator() - for constraint in constraints: - generator.add_constraint(constraint) - for command in commands: - generator.add_command(command["label"], command["name"], command["args"]) - for resource in resources: - generator.add_resource(resource) - for evaluation in evaluations: - generator.add_performance_evaluation(evaluation) - - # Generate the prompt string and verify its correctness - prompt_string = generator.generate_prompt_string(config) - assert prompt_string is not None - - # Check if all constraints, commands, resources, and evaluations are present in the prompt string - for constraint in constraints: - assert constraint in prompt_string - for command in commands: - assert command["name"] in prompt_string - for key, value in command["args"].items(): + for key, value in command["params"].items(): assert f'"{key}": "{value}"' in prompt_string for resource in resources: assert resource in prompt_string From 6cf8ec93150996a1badf65a8064729134067945e Mon Sep 17 00:00:00 2001 From: Auto-GPT-Bot Date: Sat, 8 Jul 2023 15:33:59 +0000 Subject: [PATCH 31/34] Update cassette submodule --- tests/Auto-GPT-test-cassettes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Auto-GPT-test-cassettes b/tests/Auto-GPT-test-cassettes index f75a16de..4485d191 160000 --- a/tests/Auto-GPT-test-cassettes +++ b/tests/Auto-GPT-test-cassettes @@ -1 +1 @@ -Subproject commit f75a16de114bae13b7b49cfa376475fbdc674560 +Subproject commit 4485d191a4c989053af99d56470d9e805e4d3d47 From 9adcad8b8aefd20ae62d0826f5c17394b352d09c Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 9 Jul 2023 19:32:04 -0700 Subject: [PATCH 32/34] Fix regression: restore api_base and organization configurability (#4933) --- autogpt/config/config.py | 13 ++++++++++++- autogpt/llm/utils/__init__.py | 18 ++++-------------- autogpt/memory/vector/utils.py | 7 ++----- tests/unit/test_config.py | 26 ++++++++++++++++++++------ 4 files changed, 38 insertions(+), 26 deletions(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 05590eb6..b1ff0a0a 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -86,7 +86,18 @@ class Config(SystemSettings): plugins: list[str] authorise_key: str - def get_azure_kwargs(self, model: str) -> dict[str, str]: + def get_openai_credentials(self, model: str) -> dict[str, str]: + credentials = { + "api_key": self.openai_api_key, + "api_base": self.openai_api_base, + "organization": self.openai_organization, + } + if self.use_azure: + azure_credentials = self.get_azure_credentials(model) + credentials.update(azure_credentials) + return credentials + + def get_azure_credentials(self, model: str) -> dict[str, str]: """Get the kwargs for the Azure API.""" # Fix --gpt3only and --gpt4only in combination with Azure diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index 3c2835b7..e0ff1473 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -71,17 +71,14 @@ def create_text_completion( if temperature is None: temperature = config.temperature - if config.use_azure: - kwargs = config.get_azure_kwargs(model) - else: - kwargs = {"model": model} + kwargs = {"model": model} + kwargs.update(config.get_openai_credentials(model)) response = iopenai.create_text_completion( prompt=prompt, **kwargs, temperature=temperature, max_tokens=max_output_tokens, - api_key=config.openai_api_key, ) logger.debug(f"Response: {response}") @@ -137,9 +134,7 @@ def create_chat_completion( if message is not None: return message - chat_completion_kwargs["api_key"] = config.openai_api_key - if config.use_azure: - chat_completion_kwargs.update(config.get_azure_kwargs(model)) + chat_completion_kwargs.update(config.get_openai_credentials(model)) if functions: chat_completion_kwargs["functions"] = [ @@ -179,12 +174,7 @@ def check_model( config: Config, ) -> str: """Check if model is available for use. If not, return gpt-3.5-turbo.""" - openai_credentials = { - "api_key": config.openai_api_key, - } - if config.use_azure: - openai_credentials.update(config.get_azure_kwargs(model_name)) - + openai_credentials = config.get_openai_credentials(model_name) api_manager = ApiManager() models = api_manager.get_models(**openai_credentials) diff --git a/autogpt/memory/vector/utils.py b/autogpt/memory/vector/utils.py index 74438f28..eb691256 100644 --- a/autogpt/memory/vector/utils.py +++ b/autogpt/memory/vector/utils.py @@ -41,10 +41,8 @@ def get_embedding( input = [text.replace("\n", " ") for text in input] model = config.embedding_model - if config.use_azure: - kwargs = config.get_azure_kwargs(model) - else: - kwargs = {"model": model} + kwargs = {"model": model} + kwargs.update(config.get_openai_credentials(model)) logger.debug( f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}" @@ -57,7 +55,6 @@ def get_embedding( embeddings = iopenai.create_embedding( input, **kwargs, - api_key=config.openai_api_key, ).data if not multiple: diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index b441aa94..7abbfcd5 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -174,18 +174,32 @@ azure_model_map: fast_llm = config.fast_llm smart_llm = config.smart_llm - assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "FAST-LLM_ID" - assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "SMART-LLM_ID" + assert ( + config.get_azure_credentials(config.fast_llm)["deployment_id"] == "FAST-LLM_ID" + ) + assert ( + config.get_azure_credentials(config.smart_llm)["deployment_id"] + == "SMART-LLM_ID" + ) # Emulate --gpt4only config.fast_llm = smart_llm - assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "SMART-LLM_ID" - assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "SMART-LLM_ID" + assert ( + config.get_azure_credentials(config.fast_llm)["deployment_id"] == "SMART-LLM_ID" + ) + assert ( + config.get_azure_credentials(config.smart_llm)["deployment_id"] + == "SMART-LLM_ID" + ) # Emulate --gpt3only config.fast_llm = config.smart_llm = fast_llm - assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "FAST-LLM_ID" - assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "FAST-LLM_ID" + assert ( + config.get_azure_credentials(config.fast_llm)["deployment_id"] == "FAST-LLM_ID" + ) + assert ( + config.get_azure_credentials(config.smart_llm)["deployment_id"] == "FAST-LLM_ID" + ) del os.environ["USE_AZURE"] del os.environ["AZURE_CONFIG_FILE"] From 4d514694738eb1a9a581136e85cb6aeb0ba27d63 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 10 Jul 2023 18:13:59 +0200 Subject: [PATCH 33/34] Fix CI cassette checkout --- .github/workflows/ci.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3e21d1d7..dde98cf9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,22 +108,27 @@ jobs: if: ${{ startsWith(github.event_name, 'pull_request') }} run: | cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}" + cassette_base_branch="${{ github.event.pull_request.base.ref }}" cd tests/Auto-GPT-test-cassettes + if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then + cassette_base_branch="master" + fi + if git ls-remote --exit-code --heads origin $cassette_branch ; then git fetch origin $cassette_branch - git fetch origin ${{ github.event.pull_request.base.ref }} + git fetch origin $cassette_base_branch git checkout $cassette_branch # Pick non-conflicting cassette updates from the base branch - git merge --no-commit --strategy-option=ours origin/${{ github.event.pull_request.base.ref }} + git merge --no-commit --strategy-option=ours origin/$cassette_base_branch echo "Using cassettes from mirror branch '$cassette_branch'," \ - "synced to upstream branch '${{ github.event.pull_request.base.ref }}'." + "synced to upstream branch '$cassette_base_branch'." else git checkout -b $cassette_branch echo "Branch '$cassette_branch' does not exist in cassette submodule." \ - "Using cassettes from '${{ github.event.pull_request.base.ref }}'." + "Using cassettes from '$cassette_base_branch'." fi - name: Set up Python ${{ matrix.python-version }} From 46f31cb643a4803c04f0a1cb5af8bde6afd0a90e Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Tue, 11 Jul 2023 07:40:33 -0400 Subject: [PATCH 34/34] Bulletin & version update for 0.4.4 (#4937) Co-authored-by: Reinier van der Leer Co-authored-by: lc0rp <2609411+lc0rp@users.noreply.github.com> --- BULLETIN.md | 43 +++++++++++++++++++++++++------------------ pyproject.toml | 2 +- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/BULLETIN.md b/BULLETIN.md index 0b8afeba..117a436a 100644 --- a/BULLETIN.md +++ b/BULLETIN.md @@ -1,22 +1,29 @@ -# Website and Documentation Site 📰📖 -Check out *https://agpt.co*, the official news & updates site for Auto-GPT! -The documentation also has a place here, at *https://docs.agpt.co* +# QUICK LINKS 🔗 +# -------------- +🌎 *Official Website*: https://agpt.co. +📖 *User Guide*: https://docs.agpt.co. +👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing. -# For contributors 👷🏼 -Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core to make it more extensible and make room for structural performance-oriented R&D. +# v0.4.4 RELEASE HIGHLIGHTS! 🚀 +# ----------------------------- +## GPT-4 is back! +Following OpenAI's recent GPT-4 GA announcement, the SMART_LLM .env setting +now defaults to GPT-4, and Auto-GPT will use GPT-4 by default in its main loop. -Check out the contribution guide on our wiki: -https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing +### !! High Costs Warning !! 💰💀🚨 +GPT-4 costs ~20x more than GPT-3.5-turbo. +Please take note of this before using SMART_LLM. You can use `--gpt3only` +or `--gpt4only` to force the use of GPT-3.5-turbo or GPT-4, respectively, +at runtime. -# 🚀 v0.4.3 Release 🚀 -We're happy to announce the 0.4.3 maintenance release, which primarily focuses on refining the LLM command execution, -extending support for OpenAI's latest models (including the powerful GPT-3 16k model), and laying the groundwork -for future compatibility with OpenAI's function calling feature. +## Re-arch v1 preview release! +We've released a preview version of the re-arch code, under `autogpt/core`. +This is a major milestone for us, and we're excited to continue working on it. +We look forward to your feedback. Follow the process here: +https://github.com/Significant-Gravitas/Auto-GPT/issues/4770. -Key Highlights: -- OpenAI API Key Prompt: Auto-GPT will now courteously prompt users for their OpenAI API key, if it's not already provided. -- Summarization Enhancements: We've optimized Auto-GPT's use of the LLM context window even further. -- JSON Memory Reading: Support for reading memories from JSON files has been improved, resulting in enhanced task execution. -- Deprecated commands, removed for a leaner, more performant LLM: analyze_code, write_tests, improve_code, audio_text, web_playwright, web_requests -## Take a look at the Release Notes on Github for the full changelog! -https://github.com/Significant-Gravitas/Auto-GPT/releases +## Other highlights +Other fixes include plugins regressions, Azure config and security patches. + +Take a look at the Release Notes on Github for the full changelog! +https://github.com/Significant-Gravitas/Auto-GPT/releases. diff --git a/pyproject.toml b/pyproject.toml index b0aea625..06b2f87f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "agpt" -version = "0.4.3" +version = "0.4.4" authors = [ { name="Torantulino", email="support@agpt.co" }, ]