From 8bce02736b7fcc3ef14809fa935c9f147ccc2e0a Mon Sep 17 00:00:00 2001 From: James Collins Date: Fri, 7 Jul 2023 19:51:01 -0700 Subject: [PATCH] Fix bugs running the core cli-app (#4905) Co-authored-by: Luke <2609441+lc0rp@users.noreply.github.com> --- autogpt/core/README.md | 14 +++++++++++--- autogpt/core/ability/schema.py | 4 ++++ autogpt/core/agent/simple.py | 9 +-------- autogpt/core/planning/strategies/next_ability.py | 4 ++-- autogpt/core/runner/cli_app/cli.py | 2 -- autogpt/core/runner/cli_app/main.py | 4 +++- requirements.txt | 1 + 7 files changed, 22 insertions(+), 16 deletions(-) diff --git a/autogpt/core/README.md b/autogpt/core/README.md index 49a87a09..f7bdf2d7 100644 --- a/autogpt/core/README.md +++ b/autogpt/core/README.md @@ -11,18 +11,26 @@ The first app is a straight CLI application. I have not done anything yet to po - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py) -To run, you first need a settings file. Run +Auto-GPT must be installed in your python environment to run this application. To do so, run + +``` +pip install -e REPOSITORY_ROOT +``` + +where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. + +You'll then need a settings file. Run ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` -where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. +This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. You can then run Auto-GPT with ``` -python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings +python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run ``` to launch the interaction loop. diff --git a/autogpt/core/ability/schema.py b/autogpt/core/ability/schema.py index 5bba5b7f..3d20a7b9 100644 --- a/autogpt/core/ability/schema.py +++ b/autogpt/core/ability/schema.py @@ -24,3 +24,7 @@ class AbilityResult(BaseModel): success: bool message: str new_knowledge: Knowledge = None + + def summary(self) -> str: + kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items()) + return f"{self.ability_name}({kwargs}): {self.message}" diff --git a/autogpt/core/agent/simple.py b/autogpt/core/agent/simple.py index bb986b9f..de99c135 100644 --- a/autogpt/core/agent/simple.py +++ b/autogpt/core/agent/simple.py @@ -26,7 +26,6 @@ from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings class AgentSystems(SystemConfiguration): ability_registry: PluginLocation memory: PluginLocation - embedding_model: PluginLocation openai_provider: PluginLocation planning: PluginLocation workspace: PluginLocation @@ -148,12 +147,6 @@ class SimpleAgent(Agent, Configurable): agent_settings, logger, ) - agent_args["embedding_model"] = cls._get_system_instance( - "embedding_model", - agent_settings, - logger, - model_providers={"openai": agent_args["openai_provider"]}, - ) agent_args["planning"] = cls._get_system_instance( "planning", agent_settings, @@ -226,7 +219,7 @@ class SimpleAgent(Agent, Configurable): self._current_task = None self._next_ability = None - return ability_response + return ability_response.dict() else: raise NotImplementedError diff --git a/autogpt/core/planning/strategies/next_ability.py b/autogpt/core/planning/strategies/next_ability.py index 70ea458a..dff310c3 100644 --- a/autogpt/core/planning/strategies/next_ability.py +++ b/autogpt/core/planning/strategies/next_ability.py @@ -120,12 +120,12 @@ class NextAbility(PromptStrategy): ) template_kwargs["additional_info"] = to_numbered_list( [memory.summary() for memory in task.context.memories] - + [info.summary() for info in task.context.supplementary_info], + + [info for info in task.context.supplementary_info], no_items_response="There is no additional information available at this time.", **template_kwargs, ) template_kwargs["user_input"] = to_numbered_list( - [user_input.summary() for user_input in task.context.user_input], + [user_input for user_input in task.context.user_input], no_items_response="There are no additional considerations at this time.", **template_kwargs, ) diff --git a/autogpt/core/runner/cli_app/cli.py b/autogpt/core/runner/cli_app/cli.py index 8d33c560..56fca975 100644 --- a/autogpt/core/runner/cli_app/cli.py +++ b/autogpt/core/runner/cli_app/cli.py @@ -7,7 +7,6 @@ from autogpt.core.runner.cli_app.main import run_auto_gpt from autogpt.core.runner.client_lib.shared_click_commands import ( DEFAULT_SETTINGS_FILE, make_settings, - status, ) from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions @@ -19,7 +18,6 @@ def autogpt(): autogpt.add_command(make_settings) -autogpt.add_command(status) @autogpt.command() diff --git a/autogpt/core/runner/cli_app/main.py b/autogpt/core/runner/cli_app/main.py index a8ce6d7f..60af24be 100644 --- a/autogpt/core/runner/cli_app/main.py +++ b/autogpt/core/runner/cli_app/main.py @@ -102,7 +102,9 @@ def parse_next_ability(current_task, next_ability: dict) -> str: def parse_ability_result(ability_result) -> str: + parsed_response = f"Ability: {ability_result['ability_name']}\n" + parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n" parsed_response = f"Ability Result: {ability_result['success']}\n" parsed_response += f"Message: {ability_result['message']}\n" - parsed_response += f"Data: {ability_result['data']}\n" + parsed_response += f"Data: {ability_result['new_knowledge']}\n" return parsed_response diff --git a/requirements.txt b/requirements.txt index 30ae8399..47aa08a6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,6 +29,7 @@ spacy>=3.0.0,<4.0.0 en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl prompt_toolkit>=3.0.38 pydantic +inflection # web server fastapi