mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-02-09 00:04:31 +01:00
AutoGPT: Move all the Agent's prompt generation code into a PromptStrategy (#5363)
* AutoGPT: fix execution of async commands * AutoGPT: Move all the Agent's prompt building code into OneShotAgentPromptStrategy * Remove generic type annotations from PromptStrategy * Fix tests * Fix lint
This commit is contained in:
committed by
GitHub
parent
747f27d26f
commit
d466689c94
@@ -42,6 +42,8 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
|
||||
ai_goals=[task],
|
||||
)
|
||||
|
||||
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
|
||||
agent_prompt_config.use_functions_api = config.openai_functions
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
@@ -52,6 +54,7 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
|
||||
use_functions_api=config.openai_functions,
|
||||
plugins=config.plugins,
|
||||
),
|
||||
prompt_config=agent_prompt_config,
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config import AIConfig
|
||||
from autogpt.core.configuration import Configurable
|
||||
from autogpt.core.prompting import ChatPrompt
|
||||
from autogpt.core.resource.model_providers import (
|
||||
@@ -19,8 +19,6 @@ from autogpt.core.resource.model_providers import (
|
||||
ChatModelProvider,
|
||||
ChatModelResponse,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.json_utils.utilities import extract_dict_from_response
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs.log_cycle import (
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
@@ -42,12 +40,11 @@ from .base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from .features.context import ContextMixin
|
||||
from .features.watchdog import WatchdogMixin
|
||||
from .features.workspace import WorkspaceMixin
|
||||
from .utils.exceptions import (
|
||||
AgentException,
|
||||
CommandExecutionError,
|
||||
InvalidAgentResponseError,
|
||||
UnknownCommandError,
|
||||
from .prompt_strategies.one_shot import (
|
||||
OneShotAgentPromptConfiguration,
|
||||
OneShotAgentPromptStrategy,
|
||||
)
|
||||
from .utils.exceptions import AgentException, CommandExecutionError, UnknownCommandError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -58,6 +55,7 @@ class AgentConfiguration(BaseAgentConfiguration):
|
||||
|
||||
class AgentSettings(BaseAgentSettings):
|
||||
config: AgentConfiguration
|
||||
prompt_config: OneShotAgentPromptConfiguration
|
||||
|
||||
|
||||
class Agent(
|
||||
@@ -69,11 +67,12 @@ class Agent(
|
||||
):
|
||||
"""AutoGPT's primary Agent; uses one-shot prompting."""
|
||||
|
||||
default_settings = AgentSettings(
|
||||
default_settings: AgentSettings = AgentSettings(
|
||||
name="Agent",
|
||||
description=__doc__,
|
||||
ai_config=AIConfig(ai_name="AutoGPT"),
|
||||
config=AgentConfiguration(),
|
||||
prompt_config=OneShotAgentPromptStrategy.default_configuration,
|
||||
history=BaseAgent.default_settings.history,
|
||||
)
|
||||
|
||||
@@ -85,9 +84,14 @@ class Agent(
|
||||
memory: VectorMemory,
|
||||
legacy_config: Config,
|
||||
):
|
||||
prompt_strategy = OneShotAgentPromptStrategy(
|
||||
configuration=settings.prompt_config,
|
||||
logger=logger,
|
||||
)
|
||||
super().__init__(
|
||||
settings=settings,
|
||||
llm_provider=llm_provider,
|
||||
prompt_strategy=prompt_strategy,
|
||||
command_registry=command_registry,
|
||||
legacy_config=legacy_config,
|
||||
)
|
||||
@@ -101,12 +105,15 @@ class Agent(
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
"""LogCycleHandler for structured debug logging."""
|
||||
|
||||
def construct_base_prompt(self, *args, **kwargs) -> ChatPrompt:
|
||||
if kwargs.get("prepend_messages") is None:
|
||||
kwargs["prepend_messages"] = []
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
*args,
|
||||
extra_messages: list[ChatMessage] = [],
|
||||
include_os_info: Optional[bool] = None,
|
||||
**kwargs,
|
||||
) -> ChatPrompt:
|
||||
# Clock
|
||||
kwargs["prepend_messages"].append(
|
||||
extra_messages.append(
|
||||
ChatMessage.system(f"The current time and date is {time.strftime('%c')}"),
|
||||
)
|
||||
|
||||
@@ -132,12 +139,17 @@ class Agent(
|
||||
),
|
||||
)
|
||||
logger.debug(budget_msg)
|
||||
extra_messages.append(budget_msg)
|
||||
|
||||
if kwargs.get("append_messages") is None:
|
||||
kwargs["append_messages"] = []
|
||||
kwargs["append_messages"].append(budget_msg)
|
||||
if include_os_info is None:
|
||||
include_os_info = self.legacy_config.execute_local_commands
|
||||
|
||||
return super().construct_base_prompt(*args, **kwargs)
|
||||
return super().build_prompt(
|
||||
*args,
|
||||
extra_messages=extra_messages,
|
||||
include_os_info=include_os_info,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_before_think(self, *args, **kwargs) -> ChatPrompt:
|
||||
prompt = super().on_before_think(*args, **kwargs)
|
||||
@@ -152,6 +164,40 @@ class Agent(
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_and_process_response(
|
||||
self, llm_response: ChatModelResponse, *args, **kwargs
|
||||
) -> Agent.ThoughtProcessOutput:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
llm_response.response["content"] = plugin.post_planning(
|
||||
llm_response.response.get("content", "")
|
||||
)
|
||||
|
||||
(
|
||||
command_name,
|
||||
arguments,
|
||||
assistant_reply_dict,
|
||||
) = self.prompt_strategy.parse_response_content(llm_response.response)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.config.cycle_count,
|
||||
assistant_reply_dict,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
|
||||
self.event_history.register_action(
|
||||
Action(
|
||||
name=command_name,
|
||||
args=arguments,
|
||||
reasoning=assistant_reply_dict["thoughts"]["reasoning"],
|
||||
)
|
||||
)
|
||||
|
||||
return command_name, arguments, assistant_reply_dict
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
command_name: str,
|
||||
@@ -174,10 +220,12 @@ class Agent(
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(command_name, command_args)
|
||||
command_name, command_args = plugin.pre_command(
|
||||
command_name, command_args
|
||||
)
|
||||
|
||||
try:
|
||||
return_value = execute_command(
|
||||
return_value = await execute_command(
|
||||
command_name=command_name,
|
||||
arguments=command_args,
|
||||
agent=self,
|
||||
@@ -199,10 +247,7 @@ class Agent(
|
||||
result = ActionErrorResult(reason=e.message, error=e)
|
||||
|
||||
result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
|
||||
history_tlength = self.llm_provider.count_tokens(
|
||||
self.event_history.fmt_paragraph(), self.llm.name
|
||||
)
|
||||
if result_tlength + history_tlength > self.send_token_limit:
|
||||
if result_tlength > self.send_token_limit // 3:
|
||||
result = ActionErrorResult(
|
||||
reason=f"Command {command_name} returned too much output. "
|
||||
"Do not execute this command again with the same arguments."
|
||||
@@ -221,162 +266,13 @@ class Agent(
|
||||
|
||||
return result
|
||||
|
||||
def parse_and_process_response(
|
||||
self, llm_response: ChatModelResponse, *args, **kwargs
|
||||
) -> Agent.ThoughtProcessOutput:
|
||||
if "content" not in llm_response.response:
|
||||
raise InvalidAgentResponseError("Assistant response has no text content")
|
||||
|
||||
response_content = llm_response.response["content"]
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
response_content = plugin.post_planning(response_content)
|
||||
|
||||
assistant_reply_dict = extract_dict_from_response(response_content)
|
||||
|
||||
_, errors = RESPONSE_SCHEMA.validate_object(assistant_reply_dict, logger)
|
||||
if errors:
|
||||
raise InvalidAgentResponseError(
|
||||
"Validation of response failed:\n "
|
||||
+ ";\n ".join([str(e) for e in errors])
|
||||
)
|
||||
|
||||
# Get command name and arguments
|
||||
command_name, arguments = extract_command(
|
||||
assistant_reply_dict, llm_response, self.config.use_functions_api
|
||||
)
|
||||
response = command_name, arguments, assistant_reply_dict
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.config.cycle_count,
|
||||
assistant_reply_dict,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
|
||||
self.event_history.register_action(
|
||||
Action(
|
||||
name=command_name,
|
||||
args=arguments,
|
||||
reasoning=assistant_reply_dict["thoughts"]["reasoning"],
|
||||
)
|
||||
)
|
||||
|
||||
return response
|
||||
#############
|
||||
# Utilities #
|
||||
#############
|
||||
|
||||
|
||||
RESPONSE_SCHEMA = JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"thoughts": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"text": JSONSchema(
|
||||
description="thoughts",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"reasoning": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"plan": JSONSchema(
|
||||
description="- short bulleted\n- list that conveys\n- long-term plan",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"criticism": JSONSchema(
|
||||
description="constructive self-criticism",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"speak": JSONSchema(
|
||||
description="thoughts summary to say to user",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
"command": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"args": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def extract_command(
|
||||
assistant_reply_json: dict,
|
||||
assistant_reply: ChatModelResponse,
|
||||
use_openai_functions_api: bool,
|
||||
) -> tuple[str, dict[str, str]]:
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
assistant_reply_json (dict): The response object from the AI
|
||||
assistant_reply (ChatModelResponse): The model response from the AI
|
||||
config (Config): The config object
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
if use_openai_functions_api:
|
||||
if "function_call" not in assistant_reply.response:
|
||||
raise InvalidAgentResponseError("No 'function_call' in assistant reply")
|
||||
assistant_reply_json["command"] = {
|
||||
"name": assistant_reply.response["function_call"]["name"],
|
||||
"args": json.loads(assistant_reply.response["function_call"]["arguments"]),
|
||||
}
|
||||
try:
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
raise InvalidAgentResponseError(
|
||||
f"The previous message sent was not a dictionary {assistant_reply_json}"
|
||||
)
|
||||
|
||||
if "command" not in assistant_reply_json:
|
||||
raise InvalidAgentResponseError("Missing 'command' object in JSON")
|
||||
|
||||
command = assistant_reply_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
raise InvalidAgentResponseError("'command' object is not a dictionary")
|
||||
|
||||
if "name" not in command:
|
||||
raise InvalidAgentResponseError("Missing 'name' field in 'command' object")
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise InvalidAgentResponseError("Invalid JSON")
|
||||
|
||||
except Exception as e:
|
||||
raise InvalidAgentResponseError(str(e))
|
||||
|
||||
|
||||
def execute_command(
|
||||
async def execute_command(
|
||||
command_name: str,
|
||||
arguments: dict[str, str],
|
||||
agent: Agent,
|
||||
@@ -394,21 +290,28 @@ def execute_command(
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
if command := agent.command_registry.get_command(command_name):
|
||||
try:
|
||||
return command(**arguments, agent=agent)
|
||||
result = command(**arguments, agent=agent)
|
||||
if inspect.isawaitable(result):
|
||||
return await result
|
||||
return result
|
||||
except AgentException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise CommandExecutionError(str(e))
|
||||
|
||||
# Handle non-native commands (e.g. from plugins)
|
||||
for name, command in agent.prompt_generator.commands.items():
|
||||
if command_name == name or command_name.lower() == command.description.lower():
|
||||
try:
|
||||
return command.function(**arguments)
|
||||
except AgentException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise CommandExecutionError(str(e))
|
||||
if agent._prompt_scratchpad:
|
||||
for name, command in agent._prompt_scratchpad.commands.items():
|
||||
if (
|
||||
command_name == name
|
||||
or command_name.lower() == command.description.lower()
|
||||
):
|
||||
try:
|
||||
return command.method(**arguments)
|
||||
except AgentException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise CommandExecutionError(str(e))
|
||||
|
||||
raise UnknownCommandError(
|
||||
f"Cannot execute command '{command_name}': unknown command."
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||
|
||||
@@ -10,6 +9,7 @@ from pydantic import Field, validator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.prompting.base import PromptStrategy
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
ChatModelInfo,
|
||||
ChatModelProvider,
|
||||
@@ -17,6 +17,7 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.agents.utils.prompt_scratchpad import PromptScratchpad
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.ai_directives import AIDirectives
|
||||
from autogpt.core.configuration import (
|
||||
@@ -25,7 +26,11 @@ from autogpt.core.configuration import (
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.prompting.schema import ChatMessage, ChatPrompt
|
||||
from autogpt.core.prompting.schema import (
|
||||
ChatMessage,
|
||||
ChatPrompt,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.openai import (
|
||||
OPEN_AI_CHAT_MODELS,
|
||||
OpenAIModelName,
|
||||
@@ -33,7 +38,6 @@ from autogpt.core.resource.model_providers.openai import (
|
||||
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
|
||||
from autogpt.llm.providers.openai import get_openai_command_specs
|
||||
from autogpt.models.action_history import ActionResult, EpisodicActionHistory
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -142,41 +146,34 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
self,
|
||||
settings: BaseAgentSettings,
|
||||
llm_provider: ChatModelProvider,
|
||||
prompt_strategy: PromptStrategy,
|
||||
command_registry: CommandRegistry,
|
||||
legacy_config: Config,
|
||||
):
|
||||
self.ai_config = settings.ai_config
|
||||
self.ai_directives = AIDirectives.from_file(legacy_config.prompt_settings_file)
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
|
||||
self.prompt_strategy = prompt_strategy
|
||||
|
||||
self.command_registry = command_registry
|
||||
"""The registry containing all commands available to the agent."""
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
|
||||
self.prompt_generator = PromptGenerator(
|
||||
ai_config=settings.ai_config,
|
||||
ai_directives=AIDirectives.from_file(legacy_config.prompt_settings_file),
|
||||
command_registry=command_registry,
|
||||
)
|
||||
"""The prompt generator used for generating the system prompt."""
|
||||
|
||||
self.legacy_config = legacy_config
|
||||
self.config = settings.config
|
||||
"""The applicable application configuration."""
|
||||
|
||||
self.event_history = settings.history
|
||||
|
||||
self._prompt_scratchpad: PromptScratchpad | None = None
|
||||
|
||||
# Support multi-inheritance and mixins for subclasses
|
||||
super(BaseAgent, self).__init__()
|
||||
|
||||
@property
|
||||
def system_prompt(self) -> str:
|
||||
"""
|
||||
The system prompt sets up the AI's personality and explains its goals,
|
||||
available resources, and restrictions.
|
||||
"""
|
||||
return self.prompt_generator.construct_system_prompt(self)
|
||||
logger.debug(f"Created {__class__} '{self.ai_config.ai_name}'")
|
||||
|
||||
@property
|
||||
def llm(self) -> ChatModelInfo:
|
||||
@@ -190,11 +187,7 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
def send_token_limit(self) -> int:
|
||||
return self.config.send_token_limit or self.llm.max_tokens * 3 // 4
|
||||
|
||||
async def think(
|
||||
self,
|
||||
instruction: Optional[str] = None,
|
||||
thought_process_id: ThoughtProcessID = "one-shot",
|
||||
) -> ThoughtProcessOutput:
|
||||
async def propose_action(self) -> ThoughtProcessOutput:
|
||||
"""Runs the agent for one cycle.
|
||||
|
||||
Params:
|
||||
@@ -204,22 +197,30 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
|
||||
instruction = instruction or self.config.default_cycle_instruction
|
||||
# Scratchpad as surrogate PromptGenerator for plugin hooks
|
||||
self._prompt_scratchpad = PromptScratchpad()
|
||||
|
||||
prompt: ChatPrompt = self.construct_prompt(instruction, thought_process_id)
|
||||
prompt = self.on_before_think(prompt, thought_process_id, instruction)
|
||||
prompt: ChatPrompt = self.build_prompt(scratchpad=self._prompt_scratchpad)
|
||||
prompt = self.on_before_think(prompt, scratchpad=self._prompt_scratchpad)
|
||||
|
||||
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
|
||||
raw_response = await self.llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
functions=get_openai_command_specs(self.command_registry)
|
||||
functions=get_openai_command_specs(
|
||||
self.command_registry.list_available_commands(self)
|
||||
)
|
||||
+ list(self._prompt_scratchpad.commands.values())
|
||||
if self.config.use_functions_api
|
||||
else [],
|
||||
model_name=self.llm.name,
|
||||
)
|
||||
self.config.cycle_count += 1
|
||||
|
||||
return self.on_response(raw_response, thought_process_id, prompt, instruction)
|
||||
return self.on_response(
|
||||
llm_response=raw_response,
|
||||
prompt=prompt,
|
||||
scratchpad=self._prompt_scratchpad,
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
async def execute(
|
||||
@@ -240,46 +241,12 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
"""
|
||||
...
|
||||
|
||||
def construct_base_prompt(
|
||||
def build_prompt(
|
||||
self,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
prepend_messages: list[ChatMessage] = [],
|
||||
append_messages: list[ChatMessage] = [],
|
||||
reserve_tokens: int = 0,
|
||||
) -> ChatPrompt:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. `prepend_messages`
|
||||
3. `append_messages`
|
||||
|
||||
Params:
|
||||
prepend_messages: Messages to insert between the system prompt and message history
|
||||
append_messages: Messages to insert after the message history
|
||||
reserve_tokens: Number of tokens to reserve for content that is added later
|
||||
"""
|
||||
|
||||
if self.event_history:
|
||||
prepend_messages.insert(
|
||||
0,
|
||||
ChatMessage.system(
|
||||
"## Progress\n\n" f"{self.event_history.fmt_paragraph()}"
|
||||
),
|
||||
)
|
||||
|
||||
prompt = ChatPrompt(
|
||||
messages=[
|
||||
ChatMessage.system(self.system_prompt),
|
||||
]
|
||||
+ prepend_messages
|
||||
+ (append_messages or []),
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
def construct_prompt(
|
||||
self,
|
||||
cycle_instruction: str,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
scratchpad: PromptScratchpad,
|
||||
extra_commands: list[CompletionModelFunction] = [],
|
||||
extra_messages: list[ChatMessage] = [],
|
||||
**extras,
|
||||
) -> ChatPrompt:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
@@ -290,92 +257,40 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
cycle_instruction: The final instruction for a thinking cycle
|
||||
"""
|
||||
|
||||
if not cycle_instruction:
|
||||
raise ValueError("No instruction given")
|
||||
# Apply additions from plugins
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
plugin.post_prompt(scratchpad)
|
||||
ai_directives = self.ai_directives.copy(deep=True)
|
||||
ai_directives.resources += scratchpad.resources
|
||||
ai_directives.constraints += scratchpad.constraints
|
||||
ai_directives.best_practices += scratchpad.best_practices
|
||||
extra_commands += list(scratchpad.commands.values())
|
||||
|
||||
cycle_instruction_msg = ChatMessage.user(cycle_instruction)
|
||||
cycle_instruction_tlength = self.llm_provider.count_message_tokens(
|
||||
cycle_instruction_msg, self.llm.name
|
||||
prompt = self.prompt_strategy.build_prompt(
|
||||
ai_config=self.ai_config,
|
||||
ai_directives=ai_directives,
|
||||
commands=get_openai_command_specs(
|
||||
self.command_registry.list_available_commands(self)
|
||||
)
|
||||
+ extra_commands,
|
||||
event_history=self.event_history,
|
||||
max_prompt_tokens=self.send_token_limit,
|
||||
count_tokens=lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
count_message_tokens=lambda x: self.llm_provider.count_message_tokens(
|
||||
x, self.llm.name
|
||||
),
|
||||
extra_messages=extra_messages,
|
||||
**extras,
|
||||
)
|
||||
|
||||
append_messages: list[ChatMessage] = []
|
||||
|
||||
response_format_instr = self.response_format_instruction(thought_process_id)
|
||||
if response_format_instr:
|
||||
append_messages.append(ChatMessage.system(response_format_instr))
|
||||
|
||||
prompt = self.construct_base_prompt(
|
||||
thought_process_id,
|
||||
append_messages=append_messages,
|
||||
reserve_tokens=cycle_instruction_tlength,
|
||||
)
|
||||
|
||||
# ADD user input message ("triggering prompt")
|
||||
prompt.messages.append(cycle_instruction_msg)
|
||||
|
||||
return prompt
|
||||
|
||||
# This can be expanded to support multiple types of (inter)actions within an agent
|
||||
def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str:
|
||||
if thought_process_id != "one-shot":
|
||||
raise NotImplementedError(f"Unknown thought process '{thought_process_id}'")
|
||||
|
||||
RESPONSE_FORMAT_WITH_COMMAND = """```ts
|
||||
interface Response {
|
||||
thoughts: {
|
||||
// Thoughts
|
||||
text: string;
|
||||
reasoning: string;
|
||||
// Short markdown-style bullet list that conveys the long-term plan
|
||||
plan: string;
|
||||
// Constructive self-criticism
|
||||
criticism: string;
|
||||
// Summary of thoughts to say to the user
|
||||
speak: string;
|
||||
};
|
||||
command: {
|
||||
name: string;
|
||||
args: Record<string, any>;
|
||||
};
|
||||
}
|
||||
```"""
|
||||
|
||||
RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts
|
||||
interface Response {
|
||||
thoughts: {
|
||||
// Thoughts
|
||||
text: string;
|
||||
reasoning: string;
|
||||
// Short markdown-style bullet list that conveys the long-term plan
|
||||
plan: string;
|
||||
// Constructive self-criticism
|
||||
criticism: string;
|
||||
// Summary of thoughts to say to the user
|
||||
speak: string;
|
||||
};
|
||||
}
|
||||
```"""
|
||||
|
||||
response_format = re.sub(
|
||||
r"\n\s+",
|
||||
"\n",
|
||||
RESPONSE_FORMAT_WITHOUT_COMMAND
|
||||
if self.config.use_functions_api
|
||||
else RESPONSE_FORMAT_WITH_COMMAND,
|
||||
)
|
||||
|
||||
use_functions = self.config.use_functions_api and self.command_registry.commands
|
||||
return (
|
||||
f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. "
|
||||
"The JSON should be compatible with the TypeScript type `Response` from the following:\n"
|
||||
f"{response_format}"
|
||||
)
|
||||
|
||||
def on_before_think(
|
||||
self,
|
||||
prompt: ChatPrompt,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
instruction: str,
|
||||
scratchpad: PromptScratchpad,
|
||||
) -> ChatPrompt:
|
||||
"""Called after constructing the prompt but before executing it.
|
||||
|
||||
@@ -395,7 +310,7 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(self.prompt_generator, prompt.raw())
|
||||
plugin_response = plugin.on_planning(scratchpad, prompt.raw())
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
message_to_add = ChatMessage.system(plugin_response)
|
||||
@@ -415,9 +330,8 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
def on_response(
|
||||
self,
|
||||
llm_response: ChatModelResponse,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
prompt: ChatPrompt,
|
||||
instruction: str,
|
||||
scratchpad: PromptScratchpad,
|
||||
) -> ThoughtProcessOutput:
|
||||
"""Called upon receiving a response from the chat model.
|
||||
|
||||
@@ -434,7 +348,9 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
"""
|
||||
|
||||
return self.parse_and_process_response(
|
||||
llm_response, thought_process_id, prompt, instruction
|
||||
llm_response,
|
||||
prompt,
|
||||
scratchpad=scratchpad,
|
||||
)
|
||||
|
||||
# TODO: update memory/context
|
||||
@@ -443,9 +359,8 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
def parse_and_process_response(
|
||||
self,
|
||||
llm_response: ChatModelResponse,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
prompt: ChatPrompt,
|
||||
instruction: str,
|
||||
scratchpad: PromptScratchpad,
|
||||
) -> ThoughtProcessOutput:
|
||||
"""Validate, parse & process the LLM's response.
|
||||
|
||||
|
||||
@@ -46,13 +46,15 @@ class ContextMixin:
|
||||
|
||||
super(ContextMixin, self).__init__(**kwargs)
|
||||
|
||||
def construct_base_prompt(self, *args: Any, **kwargs: Any) -> ChatPrompt:
|
||||
if kwargs.get("append_messages") is None:
|
||||
kwargs["append_messages"] = []
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
*args: Any,
|
||||
extra_messages: list[ChatMessage] = [],
|
||||
**kwargs: Any,
|
||||
) -> ChatPrompt:
|
||||
# Add context section to prompt
|
||||
if self.context:
|
||||
kwargs["append_messages"].insert(
|
||||
extra_messages.insert(
|
||||
0,
|
||||
ChatMessage.system(
|
||||
"## Context\n"
|
||||
@@ -63,7 +65,11 @@ class ContextMixin:
|
||||
),
|
||||
)
|
||||
|
||||
return super(ContextMixin, self).construct_base_prompt(*args, **kwargs) # type: ignore
|
||||
return super(ContextMixin, self).build_prompt(
|
||||
*args,
|
||||
extra_messages=extra_messages,
|
||||
**kwargs,
|
||||
) # type: ignore
|
||||
|
||||
|
||||
def get_agent_context(agent: BaseAgent) -> AgentContext | None:
|
||||
|
||||
@@ -32,10 +32,10 @@ class WatchdogMixin:
|
||||
f"{__class__.__name__} can only be applied to BaseAgent derivatives"
|
||||
)
|
||||
|
||||
async def think(self, *args, **kwargs) -> BaseAgent.ThoughtProcessOutput:
|
||||
command_name, command_args, thoughts = await super(WatchdogMixin, self).think(
|
||||
*args, **kwargs
|
||||
)
|
||||
async def propose_action(self, *args, **kwargs) -> BaseAgent.ThoughtProcessOutput:
|
||||
command_name, command_args, thoughts = await super(
|
||||
WatchdogMixin, self
|
||||
).propose_action(*args, **kwargs)
|
||||
|
||||
if (
|
||||
not self.config.big_brain
|
||||
@@ -63,6 +63,6 @@ class WatchdogMixin:
|
||||
|
||||
# Switch to SMART_LLM and re-think
|
||||
self.big_brain = True
|
||||
return await self.think(*args, **kwargs)
|
||||
return await self.propose_action(*args, **kwargs)
|
||||
|
||||
return command_name, command_args, thoughts
|
||||
|
||||
508
autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
Normal file
508
autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
Normal file
@@ -0,0 +1,508 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
from logging import Logger
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
|
||||
import distro
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.models.action_history import Episode
|
||||
|
||||
from autogpt.agents.utils.exceptions import InvalidAgentResponseError
|
||||
from autogpt.config import AIConfig, AIDirectives
|
||||
from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.prompting import (
|
||||
ChatPrompt,
|
||||
LanguageModelClassification,
|
||||
PromptStrategy,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
AssistantChatMessageDict,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.json_utils.utilities import extract_dict_from_response
|
||||
from autogpt.prompts.utils import format_numbered_list, indent
|
||||
|
||||
RESPONSE_SCHEMA = JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"thoughts": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"text": JSONSchema(
|
||||
description="Thoughts",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"reasoning": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"plan": JSONSchema(
|
||||
description="Short markdown-style bullet list that conveys the long-term plan",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"criticism": JSONSchema(
|
||||
description="Constructive self-criticism",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"speak": JSONSchema(
|
||||
description="Summary of thoughts, to say to user",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
"command": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"args": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class OneShotAgentPromptConfiguration(SystemConfiguration):
|
||||
DEFAULT_BODY_TEMPLATE: str = (
|
||||
"## Constraints\n"
|
||||
"You operate within the following constraints:\n"
|
||||
"{constraints}\n"
|
||||
"\n"
|
||||
"## Resources\n"
|
||||
"You can leverage access to the following resources:\n"
|
||||
"{resources}\n"
|
||||
"\n"
|
||||
"## Commands\n"
|
||||
"You have access to the following commands:\n"
|
||||
"{commands}\n"
|
||||
"\n"
|
||||
"## Best practices\n"
|
||||
"{best_practices}"
|
||||
)
|
||||
|
||||
DEFAULT_CHOOSE_ACTION_INSTRUCTION: str = (
|
||||
"Determine exactly one command to use next based on the given goals "
|
||||
"and the progress you have made so far, "
|
||||
"and respond using the JSON schema specified previously:"
|
||||
)
|
||||
|
||||
DEFAULT_RESPONSE_SCHEMA = JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"thoughts": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"text": JSONSchema(
|
||||
description="Thoughts",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"reasoning": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"plan": JSONSchema(
|
||||
description="Short markdown-style bullet list that conveys the long-term plan",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"criticism": JSONSchema(
|
||||
description="Constructive self-criticism",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"speak": JSONSchema(
|
||||
description="Summary of thoughts, to say to user",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
"command": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"args": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
body_template: str = UserConfigurable(default=DEFAULT_BODY_TEMPLATE)
|
||||
response_schema: dict = UserConfigurable(
|
||||
default_factory=DEFAULT_RESPONSE_SCHEMA.to_dict
|
||||
)
|
||||
choose_action_instruction: str = UserConfigurable(
|
||||
default=DEFAULT_CHOOSE_ACTION_INSTRUCTION
|
||||
)
|
||||
use_functions_api: bool = UserConfigurable(default=False)
|
||||
|
||||
#########
|
||||
# State #
|
||||
#########
|
||||
progress_summaries: dict[tuple[int, int], str] = {(0, 0): ""}
|
||||
|
||||
|
||||
class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
default_configuration: OneShotAgentPromptConfiguration = (
|
||||
OneShotAgentPromptConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
configuration: OneShotAgentPromptConfiguration,
|
||||
logger: Logger,
|
||||
):
|
||||
self.config = configuration
|
||||
self.response_schema = JSONSchema.from_dict(configuration.response_schema)
|
||||
self.logger = logger
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
*,
|
||||
ai_config: AIConfig,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
event_history: list[Episode],
|
||||
include_os_info: bool,
|
||||
max_prompt_tokens: int,
|
||||
count_tokens: Callable[[str], int],
|
||||
count_message_tokens: Callable[[ChatMessage | list[ChatMessage]], int],
|
||||
extra_messages: list[ChatMessage] = [],
|
||||
**extras,
|
||||
) -> ChatPrompt:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. Message history of the agent, truncated & prepended with running summary as needed
|
||||
3. `cycle_instruction`
|
||||
|
||||
Params:
|
||||
cycle_instruction: The final instruction for a thinking cycle
|
||||
"""
|
||||
|
||||
system_prompt = self.build_system_prompt(
|
||||
ai_config,
|
||||
ai_directives,
|
||||
commands,
|
||||
include_os_info,
|
||||
)
|
||||
system_prompt_tlength = count_message_tokens(ChatMessage.system(system_prompt))
|
||||
|
||||
response_format_instr = self.response_format_instruction(
|
||||
self.config.use_functions_api
|
||||
)
|
||||
extra_messages.append(ChatMessage.system(response_format_instr))
|
||||
|
||||
final_instruction_msg = ChatMessage.user(self.config.choose_action_instruction)
|
||||
final_instruction_tlength = count_message_tokens(final_instruction_msg)
|
||||
|
||||
if event_history:
|
||||
progress = self.compile_progress(
|
||||
event_history,
|
||||
count_tokens=count_tokens,
|
||||
max_tokens=(
|
||||
max_prompt_tokens
|
||||
- system_prompt_tlength
|
||||
- final_instruction_tlength
|
||||
- count_message_tokens(extra_messages)
|
||||
),
|
||||
)
|
||||
extra_messages.insert(
|
||||
0,
|
||||
ChatMessage.system(f"## Progress\n\n{progress}"),
|
||||
)
|
||||
|
||||
prompt = ChatPrompt(
|
||||
messages=[
|
||||
ChatMessage.system(system_prompt),
|
||||
*extra_messages,
|
||||
final_instruction_msg,
|
||||
],
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
def build_system_prompt(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
) -> str:
|
||||
system_prompt_parts = (
|
||||
self._generate_intro_prompt(ai_config)
|
||||
+ (self._generate_os_info() if include_os_info else [])
|
||||
+ [
|
||||
self.config.body_template.format(
|
||||
constraints=format_numbered_list(
|
||||
ai_directives.constraints
|
||||
+ self._generate_budget_constraint(ai_config.api_budget)
|
||||
),
|
||||
resources=format_numbered_list(ai_directives.resources),
|
||||
commands=self._generate_commands_list(commands),
|
||||
best_practices=format_numbered_list(ai_directives.best_practices),
|
||||
)
|
||||
]
|
||||
+ self._generate_goals_info(ai_config.ai_goals)
|
||||
)
|
||||
|
||||
# Join non-empty parts together into paragraph format
|
||||
return "\n\n".join(filter(None, system_prompt_parts)).strip("\n")
|
||||
|
||||
def compile_progress(
|
||||
self,
|
||||
episode_history: list[Episode],
|
||||
max_tokens: Optional[int] = None,
|
||||
count_tokens: Optional[Callable[[str], int]] = None,
|
||||
) -> str:
|
||||
if max_tokens and not count_tokens:
|
||||
raise ValueError("count_tokens is required if max_tokens is set")
|
||||
|
||||
steps: list[str] = []
|
||||
tokens: int = 0
|
||||
start: int = len(episode_history)
|
||||
|
||||
for i, c in reversed(list(enumerate(episode_history))):
|
||||
step = f"### Step {i+1}: Executed `{c.action.format_call()}`\n"
|
||||
step += f'- **Reasoning:** "{c.action.reasoning}"\n'
|
||||
step += (
|
||||
f"- **Status:** `{c.result.status if c.result else 'did_not_finish'}`\n"
|
||||
)
|
||||
if c.result:
|
||||
if c.result.status == "success":
|
||||
result = str(c.result)
|
||||
result = "\n" + indent(result) if "\n" in result else result
|
||||
step += f"- **Output:** {result}"
|
||||
elif c.result.status == "error":
|
||||
step += f"- **Reason:** {c.result.reason}\n"
|
||||
if c.result.error:
|
||||
step += f"- **Error:** {c.result.error}\n"
|
||||
elif c.result.status == "interrupted_by_human":
|
||||
step += f"- **Feedback:** {c.result.feedback}\n"
|
||||
|
||||
if max_tokens and count_tokens:
|
||||
step_tokens = count_tokens(step)
|
||||
if tokens + step_tokens > max_tokens:
|
||||
break
|
||||
tokens += step_tokens
|
||||
|
||||
steps.insert(0, step)
|
||||
start = i
|
||||
|
||||
# TODO: summarize remaining
|
||||
|
||||
part = slice(0, start)
|
||||
|
||||
return "\n\n".join(steps)
|
||||
|
||||
def response_format_instruction(self, use_functions_api: bool) -> str:
|
||||
response_schema = RESPONSE_SCHEMA.copy(deep=True)
|
||||
if (
|
||||
use_functions_api
|
||||
and response_schema.properties
|
||||
and "command" in response_schema.properties
|
||||
):
|
||||
del response_schema.properties["command"]
|
||||
|
||||
# Unindent for performance
|
||||
response_format = re.sub(
|
||||
r"\n\s+",
|
||||
"\n",
|
||||
response_schema.to_typescript_object_interface("Response"),
|
||||
)
|
||||
|
||||
return (
|
||||
f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions_api else ''}. "
|
||||
"The JSON should be compatible with the TypeScript type `Response` from the following:\n"
|
||||
f"{response_format}"
|
||||
)
|
||||
|
||||
def _generate_intro_prompt(self, ai_config: AIConfig) -> list[str]:
|
||||
"""Generates the introduction part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of strings forming the introduction part of the prompt.
|
||||
"""
|
||||
return [
|
||||
f"You are {ai_config.ai_name}, {ai_config.ai_role.rstrip('.')}.",
|
||||
"Your decisions must always be made independently without seeking "
|
||||
"user assistance. Play to your strengths as an LLM and pursue "
|
||||
"simple strategies with no legal complications.",
|
||||
]
|
||||
|
||||
def _generate_os_info(self) -> list[str]:
|
||||
"""Generates the OS information part of the prompt.
|
||||
|
||||
Params:
|
||||
config (Config): The configuration object.
|
||||
|
||||
Returns:
|
||||
str: The OS information part of the prompt.
|
||||
"""
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return [f"The OS you are running on is: {os_info}"]
|
||||
|
||||
def _generate_budget_constraint(self, api_budget: float) -> list[str]:
|
||||
"""Generates the budget information part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: The budget information part of the prompt, or an empty list.
|
||||
"""
|
||||
if api_budget > 0.0:
|
||||
return [
|
||||
f"It takes money to let you run. "
|
||||
f"Your API budget is ${api_budget:.3f}"
|
||||
]
|
||||
return []
|
||||
|
||||
def _generate_goals_info(self, goals: list[str]) -> list[str]:
|
||||
"""Generates the goals information part of the prompt.
|
||||
|
||||
Returns:
|
||||
str: The goals information part of the prompt.
|
||||
"""
|
||||
if goals:
|
||||
return [
|
||||
"\n".join(
|
||||
[
|
||||
"## Goals",
|
||||
"For your task, you must fulfill the following goals:",
|
||||
*[f"{i+1}. {goal}" for i, goal in enumerate(goals)],
|
||||
]
|
||||
)
|
||||
]
|
||||
return []
|
||||
|
||||
def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str:
|
||||
"""Lists the commands available to the agent.
|
||||
|
||||
Params:
|
||||
agent: The agent for which the commands are being listed.
|
||||
|
||||
Returns:
|
||||
str: A string containing a numbered list of commands.
|
||||
"""
|
||||
try:
|
||||
return format_numbered_list([cmd.fmt_line() for cmd in commands])
|
||||
except AttributeError:
|
||||
self.logger.warn(f"Formatting commands failed. {commands}")
|
||||
raise
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response: AssistantChatMessageDict,
|
||||
) -> Agent.ThoughtProcessOutput:
|
||||
if "content" not in response:
|
||||
raise InvalidAgentResponseError("Assistant response has no text content")
|
||||
|
||||
assistant_reply_dict = extract_dict_from_response(response["content"])
|
||||
|
||||
_, errors = RESPONSE_SCHEMA.validate_object(assistant_reply_dict, self.logger)
|
||||
if errors:
|
||||
raise InvalidAgentResponseError(
|
||||
"Validation of response failed:\n "
|
||||
+ ";\n ".join([str(e) for e in errors])
|
||||
)
|
||||
|
||||
# Get command name and arguments
|
||||
command_name, arguments = extract_command(
|
||||
assistant_reply_dict, response, self.config.use_functions_api
|
||||
)
|
||||
return command_name, arguments, assistant_reply_dict
|
||||
|
||||
|
||||
#############
|
||||
# Utilities #
|
||||
#############
|
||||
|
||||
|
||||
def extract_command(
|
||||
assistant_reply_json: dict,
|
||||
assistant_reply: AssistantChatMessageDict,
|
||||
use_openai_functions_api: bool,
|
||||
) -> tuple[str, dict[str, str]]:
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
assistant_reply_json (dict): The response object from the AI
|
||||
assistant_reply (ChatModelResponse): The model response from the AI
|
||||
config (Config): The config object
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
if use_openai_functions_api:
|
||||
if "function_call" not in assistant_reply:
|
||||
raise InvalidAgentResponseError("No 'function_call' in assistant reply")
|
||||
assistant_reply_json["command"] = {
|
||||
"name": assistant_reply["function_call"]["name"],
|
||||
"args": json.loads(assistant_reply["function_call"]["arguments"]),
|
||||
}
|
||||
try:
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
raise InvalidAgentResponseError(
|
||||
f"The previous message sent was not a dictionary {assistant_reply_json}"
|
||||
)
|
||||
|
||||
if "command" not in assistant_reply_json:
|
||||
raise InvalidAgentResponseError("Missing 'command' object in JSON")
|
||||
|
||||
command = assistant_reply_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
raise InvalidAgentResponseError("'command' object is not a dictionary")
|
||||
|
||||
if "name" not in command:
|
||||
raise InvalidAgentResponseError("Missing 'name' field in 'command' object")
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise InvalidAgentResponseError("Invalid JSON")
|
||||
|
||||
except Exception as e:
|
||||
raise InvalidAgentResponseError(str(e))
|
||||
102
autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py
Normal file
102
autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import logging
|
||||
from typing import Callable
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.core.resource.model_providers.schema import CompletionModelFunction
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
logger = logging.getLogger("PromptScratchpad")
|
||||
|
||||
|
||||
class CallableCompletionModelFunction(CompletionModelFunction):
|
||||
method: Callable
|
||||
|
||||
|
||||
class PromptScratchpad(BaseModel):
|
||||
commands: dict[str, CallableCompletionModelFunction] = Field(default_factory=dict)
|
||||
resources: list[str] = Field(default_factory=list)
|
||||
constraints: list[str] = Field(default_factory=list)
|
||||
best_practices: list[str] = Field(default_factory=list)
|
||||
|
||||
def add_constraint(self, constraint: str) -> None:
|
||||
"""
|
||||
Add a constraint to the constraints list.
|
||||
|
||||
Params:
|
||||
constraint (str): The constraint to be added.
|
||||
"""
|
||||
if constraint not in self.constraints:
|
||||
self.constraints.append(constraint)
|
||||
|
||||
def add_command(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
params: dict[str, str],
|
||||
function: Callable,
|
||||
) -> None:
|
||||
"""
|
||||
Registers a command.
|
||||
|
||||
*Should only be used by plugins.* Native commands should be added
|
||||
directly to the CommandRegistry.
|
||||
|
||||
Params:
|
||||
name (str): The name of the command (e.g. `command_name`).
|
||||
description (str): The description of the command.
|
||||
params (dict, optional): A dictionary containing argument names and their
|
||||
types. Defaults to an empty dictionary.
|
||||
function (callable, optional): A callable function to be called when
|
||||
the command is executed. Defaults to None.
|
||||
"""
|
||||
for p, t in params.items():
|
||||
invalid = False
|
||||
if t not in JSONSchema.Type._value2member_map_:
|
||||
invalid = True
|
||||
logger.warning(
|
||||
f"Cannot add command '{name}':"
|
||||
f" parameter '{p}' has invalid type '{t}'."
|
||||
f" Valid types are: {JSONSchema.Type._value2member_map_.keys()}"
|
||||
)
|
||||
if invalid:
|
||||
return
|
||||
|
||||
command = CallableCompletionModelFunction(
|
||||
name=name,
|
||||
description=description,
|
||||
parameters={
|
||||
# TODO: require plugins to specify parameters as a JSON schema
|
||||
name: JSONSchema(type=JSONSchema.Type._value2member_map_[type])
|
||||
for name, type in params.items()
|
||||
},
|
||||
method=function,
|
||||
)
|
||||
|
||||
if name in self.commands:
|
||||
if description == self.commands[name].description:
|
||||
return
|
||||
logger.warning(
|
||||
f"Replacing command {self.commands[name]} with conflicting {command}"
|
||||
)
|
||||
self.commands[name] = command
|
||||
|
||||
def add_resource(self, resource: str) -> None:
|
||||
"""
|
||||
Add a resource to the resources list.
|
||||
|
||||
Params:
|
||||
resource (str): The resource to be added.
|
||||
"""
|
||||
if resource not in self.resources:
|
||||
self.resources.append(resource)
|
||||
|
||||
def add_best_practice(self, best_practice: str) -> None:
|
||||
"""
|
||||
Add an item to the list of best practices.
|
||||
|
||||
Params:
|
||||
best_practice (str): The best practice item to be added.
|
||||
"""
|
||||
if best_practice not in self.best_practices:
|
||||
self.best_practices.append(best_practice)
|
||||
@@ -175,6 +175,9 @@ async def run_auto_gpt(
|
||||
|
||||
print_attribute("Configured Browser", config.selenium_web_browser)
|
||||
|
||||
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
|
||||
agent_prompt_config.use_functions_api = config.openai_functions
|
||||
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
@@ -185,6 +188,7 @@ async def run_auto_gpt(
|
||||
use_functions_api=config.openai_functions,
|
||||
plugins=config.plugins,
|
||||
),
|
||||
prompt_config=agent_prompt_config,
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
@@ -263,8 +267,6 @@ async def run_interaction_loop(
|
||||
ai_config = agent.ai_config
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
logger.debug(f"{ai_config.ai_name} System Prompt:\n{agent.system_prompt}")
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
legacy_config.continuous_mode, legacy_config.continuous_limit
|
||||
)
|
||||
@@ -306,7 +308,11 @@ async def run_interaction_loop(
|
||||
# Have the agent determine the next action to take.
|
||||
with spinner:
|
||||
try:
|
||||
command_name, command_args, assistant_reply_dict = await agent.think()
|
||||
(
|
||||
command_name,
|
||||
command_args,
|
||||
assistant_reply_dict,
|
||||
) = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warn(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
|
||||
@@ -1,28 +1,24 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, TypedDict
|
||||
import inspect
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.base import BaseAgent
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.models.command import Command, CommandOutput, CommandParameter
|
||||
|
||||
# Unique identifier for AutoGPT commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class CommandParameterSpec(TypedDict):
|
||||
type: str
|
||||
description: str
|
||||
required: bool
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
parameters: dict[str, CommandParameterSpec],
|
||||
parameters: dict[str, JSONSchema],
|
||||
enabled: Literal[True] | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
aliases: list[str] = [],
|
||||
@@ -34,11 +30,9 @@ def command(
|
||||
typed_parameters = [
|
||||
CommandParameter(
|
||||
name=param_name,
|
||||
description=parameter.get("description"),
|
||||
type=parameter.get("type", "string"),
|
||||
required=parameter.get("required", False),
|
||||
spec=spec,
|
||||
)
|
||||
for param_name, parameter in parameters.items()
|
||||
for param_name, spec in parameters.items()
|
||||
]
|
||||
cmd = Command(
|
||||
name=name,
|
||||
@@ -51,9 +45,17 @@ def command(
|
||||
available=available,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
if inspect.iscoroutinefunction(func):
|
||||
|
||||
@functools.wraps(func)
|
||||
async def wrapper(*args, **kwargs) -> Any:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
else:
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
setattr(wrapper, "command", cmd)
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
@@ -22,6 +22,7 @@ from autogpt.agents.utils.exceptions import (
|
||||
)
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
from .decorators import sanitize_path_arg
|
||||
|
||||
@@ -36,11 +37,11 @@ DENYLIST_CONTROL = "denylist"
|
||||
"Executes the given Python code inside a single-use Docker container"
|
||||
" with access to your workspace folder",
|
||||
{
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code to run",
|
||||
"required": True,
|
||||
},
|
||||
"code": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The Python code to run",
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
)
|
||||
def execute_python_code(code: str, agent: Agent) -> str:
|
||||
@@ -74,16 +75,17 @@ def execute_python_code(code: str, agent: Agent) -> str:
|
||||
"Execute an existing Python file inside a single-use Docker container"
|
||||
" with access to your workspace folder",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of te file to execute",
|
||||
"required": True,
|
||||
},
|
||||
"args": {
|
||||
"type": "list[str]",
|
||||
"description": "The (command line) arguments to pass to the script",
|
||||
"required": False,
|
||||
},
|
||||
"filename": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The name of the file to execute",
|
||||
required=True,
|
||||
),
|
||||
"args": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
description="The (command line) arguments to pass to the script",
|
||||
required=False,
|
||||
items=JSONSchema(type=JSONSchema.Type.STRING),
|
||||
),
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
@@ -218,11 +220,11 @@ def validate_command(command: str, config: Config) -> bool:
|
||||
"execute_shell",
|
||||
"Execute a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"command_line": {
|
||||
"type": "string",
|
||||
"description": "The command line to execute",
|
||||
"required": True,
|
||||
}
|
||||
"command_line": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The command line to execute",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
enabled=lambda config: config.execute_local_commands,
|
||||
disabled_reason="You are not allowed to run local shell commands. To execute"
|
||||
@@ -264,11 +266,11 @@ def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
"execute_shell_popen",
|
||||
"Execute a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"command_line": {
|
||||
"type": "string",
|
||||
"description": "The command line to execute",
|
||||
"required": True,
|
||||
}
|
||||
"command_line": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The command line to execute",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
lambda config: config.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
|
||||
@@ -18,6 +18,7 @@ from autogpt.agents.utils.exceptions import (
|
||||
DuplicateOperationError,
|
||||
)
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.models.context_item import FileContextItem, FolderContextItem
|
||||
|
||||
from .decorators import sanitize_path_arg
|
||||
@@ -32,11 +33,11 @@ def agent_implements_context(agent: BaseAgent) -> bool:
|
||||
"Open a file for editing or continued viewing; create it if it does not exist yet."
|
||||
" Note: if you only need to read or write a file once, use `write_to_file` instead.",
|
||||
{
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "The path of the file to open",
|
||||
"required": True,
|
||||
}
|
||||
"file_path": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The path of the file to open",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
available=agent_implements_context,
|
||||
)
|
||||
@@ -84,11 +85,11 @@ def open_file(file_path: Path, agent: Agent) -> tuple[str, FileContextItem]:
|
||||
"open_folder",
|
||||
"Open a folder to keep track of its content",
|
||||
{
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "The path of the folder to open",
|
||||
"required": True,
|
||||
}
|
||||
"path": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The path of the folder to open",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
available=agent_implements_context,
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ from typing import Iterator, Literal
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.agents.utils.exceptions import DuplicateOperationError
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
|
||||
from .decorators import sanitize_path_arg
|
||||
@@ -136,11 +137,11 @@ def log_operation(
|
||||
"read_file",
|
||||
"Read an existing file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The path of the file to read",
|
||||
"required": True,
|
||||
}
|
||||
"filename": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The path of the file to read",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
@@ -193,18 +194,18 @@ def ingest_file(
|
||||
"write_file",
|
||||
"Write a file, creating it if necessary. If the file exists, it is overwritten.",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write to",
|
||||
"required": True,
|
||||
},
|
||||
"contents": {
|
||||
"type": "string",
|
||||
"description": "The contents to write to the file",
|
||||
"required": True,
|
||||
},
|
||||
"filename": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The name of the file to write to",
|
||||
required=True,
|
||||
),
|
||||
"contents": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The contents to write to the file",
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
aliases=["write_file", "create_file"],
|
||||
aliases=["create_file"],
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def write_to_file(filename: Path, contents: str, agent: Agent) -> str:
|
||||
@@ -255,11 +256,11 @@ def append_to_file(
|
||||
"list_folder",
|
||||
"List the items in a folder",
|
||||
{
|
||||
"folder": {
|
||||
"type": "string",
|
||||
"description": "The folder to list files in",
|
||||
"required": True,
|
||||
}
|
||||
"folder": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The folder to list files in",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("folder")
|
||||
|
||||
@@ -10,6 +10,7 @@ from git.repo import Repo
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.agents.utils.exceptions import CommandExecutionError
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
from .decorators import sanitize_path_arg
|
||||
@@ -19,16 +20,16 @@ from .decorators import sanitize_path_arg
|
||||
"clone_repository",
|
||||
"Clones a Repository",
|
||||
{
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the repository to clone",
|
||||
"required": True,
|
||||
},
|
||||
"clone_path": {
|
||||
"type": "string",
|
||||
"description": "The path to clone the repository to",
|
||||
"required": True,
|
||||
},
|
||||
"url": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The URL of the repository to clone",
|
||||
required=True,
|
||||
),
|
||||
"clone_path": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The path to clone the repository to",
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
lambda config: bool(config.github_username and config.github_api_key),
|
||||
"Configure github_username and github_api_key.",
|
||||
|
||||
@@ -16,6 +16,7 @@ from PIL import Image
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,11 +25,11 @@ logger = logging.getLogger(__name__)
|
||||
"generate_image",
|
||||
"Generates an Image",
|
||||
{
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "The prompt used to generate the image",
|
||||
"required": True,
|
||||
},
|
||||
"prompt": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The prompt used to generate the image",
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
lambda config: bool(config.image_provider),
|
||||
"Requires a image provider to be set.",
|
||||
|
||||
@@ -14,6 +14,7 @@ if TYPE_CHECKING:
|
||||
from autogpt.agents.features.context import get_agent_context
|
||||
from autogpt.agents.utils.exceptions import InvalidArgumentError
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,11 +25,11 @@ logger = logging.getLogger(__name__)
|
||||
" or when there are insurmountable problems that make it impossible"
|
||||
" for you to finish your task.",
|
||||
{
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "A summary to the user of how the goals were accomplished",
|
||||
"required": True,
|
||||
}
|
||||
"reason": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="A summary to the user of how the goals were accomplished",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
)
|
||||
def finish(reason: str, agent: Agent) -> None:
|
||||
@@ -49,11 +50,11 @@ def finish(reason: str, agent: Agent) -> None:
|
||||
"hide_context_item",
|
||||
"Hide an open file, folder or other context item, to save memory.",
|
||||
{
|
||||
"number": {
|
||||
"type": "integer",
|
||||
"description": "The 1-based index of the context item to hide",
|
||||
"required": True,
|
||||
}
|
||||
"number": JSONSchema(
|
||||
type=JSONSchema.Type.INTEGER,
|
||||
description="The 1-based index of the context item to hide",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
available=lambda a: bool(get_agent_context(a)),
|
||||
)
|
||||
|
||||
@@ -8,6 +8,7 @@ COMMAND_CATEGORY_TITLE = "User Interaction"
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.app.utils import clean_input
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
|
||||
@command(
|
||||
@@ -17,11 +18,11 @@ from autogpt.command_decorator import command
|
||||
" you can ask the user for input"
|
||||
),
|
||||
{
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "The question or prompt to the user",
|
||||
"required": True,
|
||||
}
|
||||
"question": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The question or prompt to the user",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
enabled=lambda config: not config.noninteractive_mode,
|
||||
)
|
||||
|
||||
@@ -14,6 +14,7 @@ from duckduckgo_search import DDGS
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.agents.utils.exceptions import ConfigurationError
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
|
||||
@@ -22,11 +23,11 @@ DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
"web_search",
|
||||
"Searches the web",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
"query": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The search query",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
aliases=["search"],
|
||||
)
|
||||
@@ -64,11 +65,11 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
"google",
|
||||
"Google Search",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
"query": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The search query",
|
||||
required=True,
|
||||
)
|
||||
},
|
||||
lambda config: bool(config.google_api_key)
|
||||
and bool(config.google_custom_search_engine_id),
|
||||
|
||||
@@ -39,6 +39,7 @@ if TYPE_CHECKING:
|
||||
|
||||
from autogpt.agents.utils.exceptions import CommandExecutionError
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
@@ -60,12 +61,16 @@ class BrowsingError(CommandExecutionError):
|
||||
" If you are looking to extract specific information from the webpage, you should"
|
||||
" specify a question.",
|
||||
{
|
||||
"url": {"type": "string", "description": "The URL to visit", "required": True},
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "A question that you want to answer using the content of the webpage.",
|
||||
"required": False,
|
||||
},
|
||||
"url": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="The URL to visit",
|
||||
required=True,
|
||||
),
|
||||
"question": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="A question that you want to answer using the content of the webpage.",
|
||||
required=False,
|
||||
),
|
||||
},
|
||||
)
|
||||
@validate_url
|
||||
|
||||
@@ -9,6 +9,7 @@ from pydantic import BaseModel, Field
|
||||
@functools.wraps(Field)
|
||||
def UserConfigurable(*args, **kwargs):
|
||||
return Field(*args, **kwargs, user_configurable=True)
|
||||
# TODO: use this to auto-generate docs for the application configuration
|
||||
|
||||
|
||||
class SystemConfiguration(BaseModel):
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
import abc
|
||||
from typing import Generic, TypeVar
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.resource.model_providers import AssistantChatMessageDict
|
||||
|
||||
from .schema import ChatPrompt, LanguageModelClassification
|
||||
|
||||
IN = TypeVar("IN", bound=dict)
|
||||
OUT = TypeVar("OUT")
|
||||
|
||||
|
||||
class PromptStrategy(abc.ABC, Generic[IN, OUT]):
|
||||
class PromptStrategy(abc.ABC):
|
||||
default_configuration: SystemConfiguration
|
||||
|
||||
@property
|
||||
@@ -19,9 +15,9 @@ class PromptStrategy(abc.ABC, Generic[IN, OUT]):
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def build_prompt(self, *_, **kwargs: IN) -> ChatPrompt:
|
||||
def build_prompt(self, *_, **kwargs) -> ChatPrompt:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def parse_response_content(self, response_content: AssistantChatMessageDict) -> OUT:
|
||||
def parse_response_content(self, response_content: AssistantChatMessageDict):
|
||||
...
|
||||
|
||||
@@ -122,6 +122,12 @@ class CompletionModelFunction(BaseModel):
|
||||
parameters=JSONSchema.parse_properties(schema["parameters"]),
|
||||
)
|
||||
|
||||
def fmt_line(self) -> str:
|
||||
params = ", ".join(
|
||||
f"{name}: {p.type.value}" for name, p in self.parameters.items()
|
||||
)
|
||||
return f"{self.name}: {self.description}. Params: ({params})"
|
||||
|
||||
|
||||
class ModelInfo(BaseModel):
|
||||
"""Struct for model information.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import enum
|
||||
import json
|
||||
from logging import Logger
|
||||
from textwrap import indent
|
||||
from typing import Literal, Optional
|
||||
|
||||
from jsonschema import Draft7Validator
|
||||
@@ -114,3 +115,40 @@ class JSONSchema(BaseModel):
|
||||
logger.debug("The JSON object is valid.")
|
||||
|
||||
return True, None
|
||||
|
||||
def to_typescript_object_interface(self, interface_name: str = "") -> str:
|
||||
if self.type != JSONSchema.Type.OBJECT:
|
||||
raise NotImplementedError("Only `object` schemas are supported")
|
||||
|
||||
if self.properties:
|
||||
attributes: list[str] = []
|
||||
for name, property in self.properties.items():
|
||||
if property.description:
|
||||
attributes.append(f"// {property.description}")
|
||||
attributes.append(f"{name}: {property.typescript_type};")
|
||||
attributes_string = "\n".join(attributes)
|
||||
else:
|
||||
attributes_string = "[key: string]: any"
|
||||
|
||||
return (
|
||||
f"interface {interface_name} " if interface_name else ""
|
||||
) + f"{{\n{indent(attributes_string, ' ')}\n}}"
|
||||
|
||||
@property
|
||||
def typescript_type(self) -> str:
|
||||
if self.type == JSONSchema.Type.BOOLEAN:
|
||||
return "boolean"
|
||||
elif self.type in {JSONSchema.Type.INTEGER, JSONSchema.Type.NUMBER}:
|
||||
return "number"
|
||||
elif self.type == JSONSchema.Type.STRING:
|
||||
return "string"
|
||||
elif self.type == JSONSchema.Type.ARRAY:
|
||||
return f"Array<{self.items.typescript_type}>" if self.items else "Array"
|
||||
elif self.type == JSONSchema.Type.OBJECT:
|
||||
if not self.properties:
|
||||
return "Record<string, any>"
|
||||
return self.to_typescript_object_interface()
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"JSONSchema.typescript_type does not support Type.{self.type.name} yet"
|
||||
)
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
import functools
|
||||
import logging
|
||||
import time
|
||||
from typing import Callable, TypeVar
|
||||
from typing import Callable, Iterable, TypeVar
|
||||
from unittest.mock import patch
|
||||
|
||||
import openai
|
||||
@@ -16,7 +15,7 @@ from openai.openai_object import OpenAIObject
|
||||
from autogpt.core.resource.model_providers import CompletionModelFunction
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.logs.helpers import request_user_double_check
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.models.command import Command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -160,7 +159,7 @@ def format_openai_function_for_prompt(func: CompletionModelFunction) -> str:
|
||||
|
||||
|
||||
def get_openai_command_specs(
|
||||
command_registry: CommandRegistry,
|
||||
commands: Iterable[Command],
|
||||
) -> list[CompletionModelFunction]:
|
||||
"""Get OpenAI-consumable function specs for the agent's available commands.
|
||||
see https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
@@ -169,19 +168,9 @@ def get_openai_command_specs(
|
||||
CompletionModelFunction(
|
||||
name=command.name,
|
||||
description=command.description,
|
||||
parameters={
|
||||
param.name: JSONSchema(
|
||||
type=param.type if type(param.type) == JSONSchema.Type else None,
|
||||
enum=[v.value for v in type(param.type)]
|
||||
if type(param.type) == enum.Enum
|
||||
else None,
|
||||
required=param.required,
|
||||
description=param.description,
|
||||
)
|
||||
for param in command.parameters
|
||||
},
|
||||
parameters={param.name: param.spec for param in command.parameters},
|
||||
)
|
||||
for command in command_registry.commands.values()
|
||||
for command in commands
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -42,8 +43,12 @@ class Command:
|
||||
self.aliases = aliases
|
||||
self.available = available
|
||||
|
||||
@property
|
||||
def is_async(self) -> bool:
|
||||
return inspect.iscoroutinefunction(self.method)
|
||||
|
||||
def __call__(self, *args, agent: BaseAgent, **kwargs) -> Any:
|
||||
if callable(self.enabled) and not self.enabled(agent.config):
|
||||
if callable(self.enabled) and not self.enabled(agent.legacy_config):
|
||||
if self.disabled_reason:
|
||||
raise RuntimeError(
|
||||
f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
@@ -57,7 +62,7 @@ class Command:
|
||||
|
||||
def __str__(self) -> str:
|
||||
params = [
|
||||
f"{param.name}: {param.type if param.required else f'Optional[{param.type}]'}"
|
||||
f"{param.name}: {param.spec.type.value if param.spec.required else f'Optional[{param.spec.type.value}]'}"
|
||||
for param in self.parameters
|
||||
]
|
||||
return f"{self.name}: {self.description.rstrip('.')}. Params: ({', '.join(params)})"
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import dataclasses
|
||||
import enum
|
||||
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
@@ -7,9 +6,7 @@ from autogpt.core.utils.json_schema import JSONSchema
|
||||
@dataclasses.dataclass
|
||||
class CommandParameter:
|
||||
name: str
|
||||
type: JSONSchema.Type | enum.Enum
|
||||
description: str
|
||||
required: bool
|
||||
spec: JSONSchema
|
||||
|
||||
def __repr__(self):
|
||||
return f"CommandParameter('{self.name}', '{self.type}', '{self.description}', {self.required})"
|
||||
return f"CommandParameter('{self.name}', '{self.spec.type}', '{self.spec.description}', {self.spec.required})"
|
||||
|
||||
@@ -1,278 +0,0 @@
|
||||
""" A module for generating custom prompt strings."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import platform
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
|
||||
import distro
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.base import BaseAgent
|
||||
from autogpt.config import AIConfig, AIDirectives, Config
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from .utils import format_numbered_list
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PromptGenerator:
|
||||
"""
|
||||
A class for generating custom prompt strings based on constraints, commands,
|
||||
resources, and performance evaluations.
|
||||
"""
|
||||
|
||||
ai_config: AIConfig
|
||||
|
||||
best_practices: list[str]
|
||||
constraints: list[str]
|
||||
resources: list[str]
|
||||
|
||||
commands: dict[str, Command]
|
||||
command_registry: CommandRegistry
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
ai_directives: AIDirectives,
|
||||
command_registry: CommandRegistry,
|
||||
):
|
||||
self.ai_config = ai_config
|
||||
self.best_practices = ai_directives.best_practices
|
||||
self.constraints = ai_directives.constraints
|
||||
self.resources = ai_directives.resources
|
||||
self.commands = {}
|
||||
self.command_registry = command_registry
|
||||
|
||||
@dataclass
|
||||
class Command:
|
||||
name: str
|
||||
description: str
|
||||
params: dict[str, str]
|
||||
function: Optional[Callable]
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Returns a string representation of the command."""
|
||||
params_string = ", ".join(
|
||||
f'"{key}": "{value}"' for key, value in self.params.items()
|
||||
)
|
||||
return f'{self.name}: "{self.description.rstrip(".")}". Params: ({params_string})'
|
||||
|
||||
def add_constraint(self, constraint: str) -> None:
|
||||
"""
|
||||
Add a constraint to the constraints list.
|
||||
|
||||
Params:
|
||||
constraint (str): The constraint to be added.
|
||||
"""
|
||||
if constraint not in self.constraints:
|
||||
self.constraints.append(constraint)
|
||||
|
||||
def add_command(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
params: dict[str, str] = {},
|
||||
function: Optional[Callable] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Registers a command.
|
||||
|
||||
*Should only be used by plugins.* Native commands should be added
|
||||
directly to the CommandRegistry.
|
||||
|
||||
Params:
|
||||
name (str): The name of the command (e.g. `command_name`).
|
||||
description (str): The description of the command.
|
||||
params (dict, optional): A dictionary containing argument names and their
|
||||
types. Defaults to an empty dictionary.
|
||||
function (callable, optional): A callable function to be called when
|
||||
the command is executed. Defaults to None.
|
||||
"""
|
||||
command = PromptGenerator.Command(
|
||||
name=name,
|
||||
description=description,
|
||||
params={name: type for name, type in params.items()},
|
||||
function=function,
|
||||
)
|
||||
|
||||
if name in self.commands:
|
||||
if description == self.commands[name].description:
|
||||
return
|
||||
logger.warning(
|
||||
f"Replacing command {self.commands[name]} with conflicting {command}"
|
||||
)
|
||||
self.commands[name] = command
|
||||
|
||||
def add_resource(self, resource: str) -> None:
|
||||
"""
|
||||
Add a resource to the resources list.
|
||||
|
||||
Params:
|
||||
resource (str): The resource to be added.
|
||||
"""
|
||||
if resource not in self.resources:
|
||||
self.resources.append(resource)
|
||||
|
||||
def add_best_practice(self, best_practice: str) -> None:
|
||||
"""
|
||||
Add an item to the list of best practices.
|
||||
|
||||
Params:
|
||||
best_practice (str): The best practice item to be added.
|
||||
"""
|
||||
if best_practice not in self.best_practices:
|
||||
self.best_practices.append(best_practice)
|
||||
|
||||
def construct_system_prompt(self, agent: BaseAgent) -> str:
|
||||
"""Constructs a system prompt containing the most important information for the AI.
|
||||
|
||||
Params:
|
||||
agent: The agent for which the system prompt is being constructed.
|
||||
|
||||
Returns:
|
||||
str: The constructed system prompt.
|
||||
"""
|
||||
|
||||
for plugin in agent.config.plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
plugin.post_prompt(self)
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt_parts = (
|
||||
self._generate_intro_prompt()
|
||||
+ self._generate_os_info(agent.legacy_config)
|
||||
+ self._generate_body(
|
||||
agent=agent,
|
||||
additional_constraints=self._generate_budget_info(),
|
||||
)
|
||||
+ self._generate_goals_info()
|
||||
)
|
||||
|
||||
# Join non-empty parts together into paragraph format
|
||||
return "\n\n".join(filter(None, full_prompt_parts)).strip("\n")
|
||||
|
||||
def _generate_intro_prompt(self) -> list[str]:
|
||||
"""Generates the introduction part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of strings forming the introduction part of the prompt.
|
||||
"""
|
||||
return [
|
||||
f"You are {self.ai_config.ai_name}, {self.ai_config.ai_role.rstrip('.')}.",
|
||||
"Your decisions must always be made independently without seeking "
|
||||
"user assistance. Play to your strengths as an LLM and pursue "
|
||||
"simple strategies with no legal complications.",
|
||||
]
|
||||
|
||||
def _generate_os_info(self, config: Config) -> list[str]:
|
||||
"""Generates the OS information part of the prompt.
|
||||
|
||||
Params:
|
||||
config (Config): The configuration object.
|
||||
|
||||
Returns:
|
||||
str: The OS information part of the prompt.
|
||||
"""
|
||||
if config.execute_local_commands:
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return [f"The OS you are running on is: {os_info}"]
|
||||
return []
|
||||
|
||||
def _generate_budget_info(self) -> list[str]:
|
||||
"""Generates the budget information part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: The budget information part of the prompt, or an empty list.
|
||||
"""
|
||||
if self.ai_config.api_budget > 0.0:
|
||||
return [
|
||||
f"It takes money to let you run. "
|
||||
f"Your API budget is ${self.ai_config.api_budget:.3f}"
|
||||
]
|
||||
return []
|
||||
|
||||
def _generate_goals_info(self) -> list[str]:
|
||||
"""Generates the goals information part of the prompt.
|
||||
|
||||
Returns:
|
||||
str: The goals information part of the prompt.
|
||||
"""
|
||||
if self.ai_config.ai_goals:
|
||||
return [
|
||||
"\n".join(
|
||||
[
|
||||
"## Goals",
|
||||
"For your task, you must fulfill the following goals:",
|
||||
*[
|
||||
f"{i+1}. {goal}"
|
||||
for i, goal in enumerate(self.ai_config.ai_goals)
|
||||
],
|
||||
]
|
||||
)
|
||||
]
|
||||
return []
|
||||
|
||||
def _generate_body(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
*,
|
||||
additional_constraints: list[str] = [],
|
||||
additional_resources: list[str] = [],
|
||||
additional_best_practices: list[str] = [],
|
||||
) -> list[str]:
|
||||
"""
|
||||
Generates a prompt section containing the constraints, commands, resources,
|
||||
and best practices.
|
||||
|
||||
Params:
|
||||
agent: The agent for which the prompt string is being generated.
|
||||
additional_constraints: Additional constraints to be included in the prompt string.
|
||||
additional_resources: Additional resources to be included in the prompt string.
|
||||
additional_best_practices: Additional best practices to be included in the prompt string.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt section.
|
||||
"""
|
||||
|
||||
return [
|
||||
"## Constraints\n"
|
||||
"You operate within the following constraints:\n"
|
||||
f"{format_numbered_list(self.constraints + additional_constraints)}",
|
||||
"## Resources\n"
|
||||
"You can leverage access to the following resources:\n"
|
||||
f"{format_numbered_list(self.resources + additional_resources)}",
|
||||
"## Commands\n"
|
||||
"You have access to the following commands:\n"
|
||||
f"{self.list_commands(agent)}",
|
||||
"## Best practices\n"
|
||||
f"{format_numbered_list(self.best_practices + additional_best_practices)}",
|
||||
]
|
||||
|
||||
def list_commands(self, agent: BaseAgent) -> str:
|
||||
"""Lists the commands available to the agent.
|
||||
|
||||
Params:
|
||||
agent: The agent for which the commands are being listed.
|
||||
|
||||
Returns:
|
||||
str: A string containing a numbered list of commands.
|
||||
"""
|
||||
command_strings = []
|
||||
if self.command_registry:
|
||||
command_strings += [
|
||||
str(cmd) for cmd in self.command_registry.list_available_commands(agent)
|
||||
]
|
||||
|
||||
# Add commands from plugins etc.
|
||||
command_strings += [str(cmd) for cmd in self.commands.values()]
|
||||
|
||||
return format_numbered_list(command_strings)
|
||||
@@ -107,6 +107,9 @@ def agent(config: Config, llm_provider: ChatModelProvider) -> Agent:
|
||||
memory_json_file = get_memory(config)
|
||||
memory_json_file.clear()
|
||||
|
||||
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
|
||||
agent_prompt_config.use_functions_api = config.openai_functions
|
||||
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
@@ -117,6 +120,7 @@ def agent(config: Config, llm_provider: ChatModelProvider) -> Agent:
|
||||
use_functions_api=config.openai_functions,
|
||||
plugins=config.plugins,
|
||||
),
|
||||
prompt_config=agent_prompt_config,
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
|
||||
@@ -30,6 +30,8 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
|
||||
],
|
||||
)
|
||||
|
||||
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
|
||||
agent_prompt_config.use_functions_api = config.openai_functions
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
@@ -40,6 +42,7 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
|
||||
use_functions_api=config.openai_functions,
|
||||
plugins=config.plugins,
|
||||
),
|
||||
prompt_config=agent_prompt_config,
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
from autogpt.agents.agent import Agent, execute_command
|
||||
|
||||
|
||||
def test_agent_initialization(agent: Agent):
|
||||
assert agent.ai_config.ai_name == "Base"
|
||||
assert agent.event_history.episodes == []
|
||||
assert agent.config.cycle_budget is 1
|
||||
assert "You are Base" in agent.system_prompt
|
||||
|
||||
|
||||
def test_execute_command_plugin(agent: Agent):
|
||||
"""Test that executing a command that came from a plugin works as expected"""
|
||||
command_name = "check_plan"
|
||||
agent.prompt_generator.add_command(
|
||||
command_name,
|
||||
"Read the plan.md with the next goals to achieve",
|
||||
{},
|
||||
lambda: "hi",
|
||||
)
|
||||
command_result = execute_command(
|
||||
command_name=command_name,
|
||||
arguments={},
|
||||
agent=agent,
|
||||
)
|
||||
assert command_result == "hi"
|
||||
|
||||
|
||||
# More test methods can be added for specific agent interactions
|
||||
# For example, mocking chat_with_ai and testing the agent's interaction loop
|
||||
@@ -11,12 +11,27 @@ import pytest
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents import Agent, BaseAgent
|
||||
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.models.command import Command, CommandParameter
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
PARAMETERS = [
|
||||
CommandParameter("arg1", "int", description="Argument 1", required=True),
|
||||
CommandParameter("arg2", "str", description="Argument 2", required=False),
|
||||
CommandParameter(
|
||||
"arg1",
|
||||
spec=JSONSchema(
|
||||
type=JSONSchema.Type.INTEGER,
|
||||
description="Argument 1",
|
||||
required=True,
|
||||
),
|
||||
),
|
||||
CommandParameter(
|
||||
"arg2",
|
||||
spec=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="Argument 2",
|
||||
required=False,
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -39,7 +54,8 @@ def test_command_creation():
|
||||
assert cmd.description == "Example command"
|
||||
assert cmd.method == example_command_method
|
||||
assert (
|
||||
str(cmd) == "example: Example command. Params: (arg1: int, arg2: Optional[str])"
|
||||
str(cmd)
|
||||
== "example: Example command. Params: (arg1: integer, arg2: Optional[string])"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ from autogpt.logs.utils import remove_color_codes
|
||||
),
|
||||
(
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/AutoGPT, https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/AutoGPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}",
|
||||
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/AutoGPT, https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
|
||||
),
|
||||
("", ""),
|
||||
("hello", "hello"),
|
||||
|
||||
Reference in New Issue
Block a user