mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-26 10:24:30 +01:00
Fix bugs running the core cli-app (#4905)
Co-authored-by: Luke <2609441+lc0rp@users.noreply.github.com>
This commit is contained in:
@@ -11,18 +11,26 @@ The first app is a straight CLI application. I have not done anything yet to po
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py)
|
||||
|
||||
To run, you first need a settings file. Run
|
||||
Auto-GPT must be installed in your python environment to run this application. To do so, run
|
||||
|
||||
```
|
||||
pip install -e REPOSITORY_ROOT
|
||||
```
|
||||
|
||||
where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine.
|
||||
|
||||
You'll then need a settings file. Run
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings
|
||||
```
|
||||
|
||||
where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model.
|
||||
This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model.
|
||||
|
||||
You can then run Auto-GPT with
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run
|
||||
```
|
||||
|
||||
to launch the interaction loop.
|
||||
|
||||
@@ -24,3 +24,7 @@ class AbilityResult(BaseModel):
|
||||
success: bool
|
||||
message: str
|
||||
new_knowledge: Knowledge = None
|
||||
|
||||
def summary(self) -> str:
|
||||
kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items())
|
||||
return f"{self.ability_name}({kwargs}): {self.message}"
|
||||
|
||||
@@ -26,7 +26,6 @@ from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
class AgentSystems(SystemConfiguration):
|
||||
ability_registry: PluginLocation
|
||||
memory: PluginLocation
|
||||
embedding_model: PluginLocation
|
||||
openai_provider: PluginLocation
|
||||
planning: PluginLocation
|
||||
workspace: PluginLocation
|
||||
@@ -148,12 +147,6 @@ class SimpleAgent(Agent, Configurable):
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["embedding_model"] = cls._get_system_instance(
|
||||
"embedding_model",
|
||||
agent_settings,
|
||||
logger,
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
agent_args["planning"] = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
@@ -226,7 +219,7 @@ class SimpleAgent(Agent, Configurable):
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
return ability_response
|
||||
return ability_response.dict()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@@ -120,12 +120,12 @@ class NextAbility(PromptStrategy):
|
||||
)
|
||||
template_kwargs["additional_info"] = to_numbered_list(
|
||||
[memory.summary() for memory in task.context.memories]
|
||||
+ [info.summary() for info in task.context.supplementary_info],
|
||||
+ [info for info in task.context.supplementary_info],
|
||||
no_items_response="There is no additional information available at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["user_input"] = to_numbered_list(
|
||||
[user_input.summary() for user_input in task.context.user_input],
|
||||
[user_input for user_input in task.context.user_input],
|
||||
no_items_response="There are no additional considerations at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
@@ -7,7 +7,6 @@ from autogpt.core.runner.cli_app.main import run_auto_gpt
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
status,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions
|
||||
|
||||
@@ -19,7 +18,6 @@ def autogpt():
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
autogpt.add_command(status)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
|
||||
@@ -102,7 +102,9 @@ def parse_next_ability(current_task, next_ability: dict) -> str:
|
||||
|
||||
|
||||
def parse_ability_result(ability_result) -> str:
|
||||
parsed_response = f"Ability: {ability_result['ability_name']}\n"
|
||||
parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n"
|
||||
parsed_response = f"Ability Result: {ability_result['success']}\n"
|
||||
parsed_response += f"Message: {ability_result['message']}\n"
|
||||
parsed_response += f"Data: {ability_result['data']}\n"
|
||||
parsed_response += f"Data: {ability_result['new_knowledge']}\n"
|
||||
return parsed_response
|
||||
|
||||
@@ -29,6 +29,7 @@ spacy>=3.0.0,<4.0.0
|
||||
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl
|
||||
prompt_toolkit>=3.0.38
|
||||
pydantic
|
||||
inflection
|
||||
|
||||
# web server
|
||||
fastapi
|
||||
|
||||
Reference in New Issue
Block a user