mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-02 22:04:30 +01:00
Rough sketching out of a hello world using our refactored autogpt library. See the tracking issue here: #4770. # Run instructions There are two client applications for Auto-GPT included. ## CLI Application 🌟 **This is the reference application I'm working with for now** 🌟 The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic. - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_app/main.py) To run, you first need a settings file. Run ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. You can then run Auto-GPT with ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` to launch the interaction loop. ## CLI Web App The second app is still a CLI, but it sets up a local webserver that the client application talks to rather than invoking calls to the Agent library code directly. This application is essentially a sketch at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed. - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/client/client.py) - [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/server/api.py) To run, you still need to generate a default configuration. You can do ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings ``` It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key. To run, do ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client ``` This will launch a webserver and then start the client cli application to communicate with it. ⚠️ I am not actively developing this application. It is a very good place to get involved if you have web application design experience and are looking to get involved in the re-arch. --------- Co-authored-by: David Wurtz <davidjwurtz@gmail.com> Co-authored-by: Media <12145726+rihp@users.noreply.github.com> Co-authored-by: Richard Beales <rich@richbeales.net> Co-authored-by: Daryl Rodrigo <darylrodrigo@gmail.com> Co-authored-by: Daryl Rodrigo <daryl@orkestro.com> Co-authored-by: Swifty <craigswift13@gmail.com> Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: Merwane Hamadi <merwanehamadi@gmail.com>
109 lines
4.2 KiB
Python
109 lines
4.2 KiB
Python
import click
|
|
|
|
from autogpt.core.agent import AgentSettings, SimpleAgent
|
|
from autogpt.core.runner.client_lib.logging import get_client_logger
|
|
|
|
|
|
async def run_auto_gpt(user_configuration: dict):
|
|
"""Run the Auto-GPT CLI client."""
|
|
|
|
client_logger = get_client_logger()
|
|
client_logger.debug("Getting agent settings")
|
|
|
|
agent_workspace = (
|
|
user_configuration.get("workspace", {}).get("configuration", {}).get("root", "")
|
|
)
|
|
|
|
if not agent_workspace: # We don't have an agent yet.
|
|
#################
|
|
# Bootstrapping #
|
|
#################
|
|
# Step 1. Collate the user's settings with the default system settings.
|
|
agent_settings: AgentSettings = SimpleAgent.compile_settings(
|
|
client_logger,
|
|
user_configuration,
|
|
)
|
|
|
|
# Step 2. Get a name and goals for the agent.
|
|
# First we need to figure out what the user wants to do with the agent.
|
|
# We'll do this by asking the user for a prompt.
|
|
user_objective = click.prompt("What do you want Auto-GPT to do?")
|
|
# Ask a language model to determine a name and goals for a suitable agent.
|
|
name_and_goals = await SimpleAgent.determine_agent_name_and_goals(
|
|
user_objective,
|
|
agent_settings,
|
|
client_logger,
|
|
)
|
|
print(parse_agent_name_and_goals(name_and_goals))
|
|
# Finally, update the agent settings with the name and goals.
|
|
agent_settings.update_agent_name_and_goals(name_and_goals)
|
|
|
|
# Step 3. Provision the agent.
|
|
agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger)
|
|
print("agent is provisioned")
|
|
|
|
# launch agent interaction loop
|
|
agent = SimpleAgent.from_workspace(
|
|
agent_workspace,
|
|
client_logger,
|
|
)
|
|
print("agent is loaded")
|
|
|
|
plan = await agent.build_initial_plan()
|
|
print(parse_agent_plan(plan))
|
|
|
|
while True:
|
|
current_task, next_ability = await agent.determine_next_ability(plan)
|
|
print(parse_next_ability(current_task, next_ability))
|
|
user_input = click.prompt(
|
|
"Should the agent proceed with this ability?",
|
|
default="y",
|
|
)
|
|
ability_result = await agent.execute_next_ability(user_input)
|
|
print(parse_ability_result(ability_result))
|
|
|
|
|
|
def parse_agent_name_and_goals(name_and_goals: dict) -> str:
|
|
parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n"
|
|
parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n"
|
|
parsed_response += "Agent Goals:\n"
|
|
for i, goal in enumerate(name_and_goals["agent_goals"]):
|
|
parsed_response += f"{i+1}. {goal}\n"
|
|
return parsed_response
|
|
|
|
|
|
def parse_agent_plan(plan: dict) -> str:
|
|
parsed_response = f"Agent Plan:\n"
|
|
for i, task in enumerate(plan["task_list"]):
|
|
parsed_response += f"{i+1}. {task['objective']}\n"
|
|
parsed_response += f"Task type: {task['type']} "
|
|
parsed_response += f"Priority: {task['priority']}\n"
|
|
parsed_response += f"Ready Criteria:\n"
|
|
for j, criteria in enumerate(task["ready_criteria"]):
|
|
parsed_response += f" {j+1}. {criteria}\n"
|
|
parsed_response += f"Acceptance Criteria:\n"
|
|
for j, criteria in enumerate(task["acceptance_criteria"]):
|
|
parsed_response += f" {j+1}. {criteria}\n"
|
|
parsed_response += "\n"
|
|
|
|
return parsed_response
|
|
|
|
|
|
def parse_next_ability(current_task, next_ability: dict) -> str:
|
|
parsed_response = f"Current Task: {current_task.objective}\n"
|
|
ability_args = ", ".join(
|
|
f"{k}={v}" for k, v in next_ability["ability_arguments"].items()
|
|
)
|
|
parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n"
|
|
parsed_response += f"Motivation: {next_ability['motivation']}\n"
|
|
parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n"
|
|
parsed_response += f"Reasoning: {next_ability['reasoning']}\n"
|
|
return parsed_response
|
|
|
|
|
|
def parse_ability_result(ability_result) -> str:
|
|
parsed_response = f"Ability Result: {ability_result['success']}\n"
|
|
parsed_response += f"Message: {ability_result['message']}\n"
|
|
parsed_response += f"Data: {ability_result['data']}\n"
|
|
return parsed_response
|