AutoGPT: started replacing monolithic Config by .core.configuration

This commit is contained in:
Reinier van der Leer
2023-09-21 16:46:13 +02:00
parent 7720f6af24
commit c1494ba1ef
22 changed files with 324 additions and 168 deletions

View File

@@ -2,14 +2,13 @@ import asyncio
import sys
from pathlib import Path
from autogpt.agents import Agent
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.app.main import _configure_openai_provider, run_interaction_loop
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, ConfigBuilder
from autogpt.logs.config import configure_logging
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
PROJECT_DIR = Path().resolve()
@@ -42,13 +41,26 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
ai_role="a multi-purpose AI assistant.",
ai_goals=[task],
)
agent_settings = AgentSettings(
name=Agent.default_settings.name,
description=Agent.default_settings.description,
ai_config=ai_config,
config=AgentConfiguration(
fast_llm=config.fast_llm,
smart_llm=config.smart_llm,
use_functions_api=config.openai_functions,
plugins=config.plugins,
),
history=Agent.default_settings.history.copy(deep=True),
)
return Agent(
memory=get_memory(config),
settings=agent_settings,
llm_provider=_configure_openai_provider(config),
command_registry=command_registry,
ai_config=ai_config,
config=config,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
memory=get_memory(config),
legacy_config=config,
)

View File

@@ -4,13 +4,15 @@ import json
import logging
import time
from datetime import datetime
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
from autogpt.config import Config
from autogpt.memory.vector import VectorMemory
from autogpt.models.command_registry import CommandRegistry
from autogpt.config.ai_config import AIConfig
from autogpt.core.configuration import Configurable
from autogpt.core.prompting import ChatPrompt
from autogpt.core.resource.model_providers import (
ChatMessage,
@@ -36,7 +38,7 @@ from autogpt.models.action_history import (
from autogpt.models.command import CommandOutput
from autogpt.models.context_item import ContextItem
from .base import BaseAgent
from .base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
from .features.context import ContextMixin
from .features.watchdog import WatchdogMixin
from .features.workspace import WorkspaceMixin
@@ -50,26 +52,44 @@ from .utils.exceptions import (
logger = logging.getLogger(__name__)
class Agent(ContextMixin, WorkspaceMixin, WatchdogMixin, BaseAgent):
"""Agent class for interacting with Auto-GPT."""
class AgentConfiguration(BaseAgentConfiguration):
pass
class AgentSettings(BaseAgentSettings):
config: AgentConfiguration
class Agent(
ContextMixin,
WorkspaceMixin,
WatchdogMixin,
BaseAgent,
Configurable[AgentSettings],
):
"""AutoGPT's primary Agent; uses one-shot prompting."""
default_settings = AgentSettings(
name="Agent",
description=__doc__,
ai_config=AIConfig(ai_name="AutoGPT"),
config=AgentConfiguration(),
history=BaseAgent.default_settings.history,
)
def __init__(
self,
ai_config: AIConfig,
settings: AgentSettings,
llm_provider: ChatModelProvider,
command_registry: CommandRegistry,
memory: VectorMemory,
triggering_prompt: str,
config: Config,
cycle_budget: Optional[int] = None,
legacy_config: Config,
):
super().__init__(
ai_config=ai_config,
settings=settings,
llm_provider=llm_provider,
command_registry=command_registry,
config=config,
default_cycle_instruction=triggering_prompt,
cycle_budget=cycle_budget,
legacy_config=legacy_config,
)
self.memory = memory
@@ -126,7 +146,7 @@ class Agent(ContextMixin, WorkspaceMixin, WatchdogMixin, BaseAgent):
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
self.config.cycle_count,
prompt.raw(),
CURRENT_CONTEXT_FILE_NAME,
)
@@ -145,7 +165,7 @@ class Agent(ContextMixin, WorkspaceMixin, WatchdogMixin, BaseAgent):
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
self.config.cycle_count,
user_input,
USER_INPUT_FILE_NAME,
)
@@ -225,14 +245,14 @@ class Agent(ContextMixin, WorkspaceMixin, WatchdogMixin, BaseAgent):
# Get command name and arguments
command_name, arguments = extract_command(
assistant_reply_dict, llm_response, self.config
assistant_reply_dict, llm_response, self.config.use_functions_api
)
response = command_name, arguments, assistant_reply_dict
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
self.config.cycle_count,
assistant_reply_dict,
NEXT_ACTION_FILE_NAME,
)
@@ -300,7 +320,9 @@ RESPONSE_SCHEMA = JSONSchema(
def extract_command(
assistant_reply_json: dict, assistant_reply: ChatModelResponse, config: Config
assistant_reply_json: dict,
assistant_reply: ChatModelResponse,
use_openai_functions_api: bool,
) -> tuple[str, dict[str, str]]:
"""Parse the response and return the command name and arguments
@@ -317,7 +339,7 @@ def extract_command(
Exception: If any other error occurs
"""
if config.openai_functions:
if use_openai_functions_api:
if "function_call" not in assistant_reply.response:
raise InvalidAgentResponseError("No 'function_call' in assistant reply")
assistant_reply_json["command"] = {

View File

@@ -5,8 +5,11 @@ import re
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Literal, Optional
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pydantic import Field, validator
if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
from autogpt.config import Config
from autogpt.core.resource.model_providers.schema import (
ChatModelInfo,
ChatModelProvider,
@@ -14,9 +17,19 @@ if TYPE_CHECKING:
)
from autogpt.models.command_registry import CommandRegistry
from autogpt.config.ai_config import AIConfig
from autogpt.config.ai_directives import AIDirectives
from autogpt.core.configuration import (
Configurable,
SystemConfiguration,
SystemSettings,
UserConfigurable,
)
from autogpt.core.prompting.schema import ChatMessage, ChatPrompt
from autogpt.core.resource.model_providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.core.resource.model_providers.openai import (
OPEN_AI_CHAT_MODELS,
OpenAIModelName,
)
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
from autogpt.llm.providers.openai import get_openai_command_specs
from autogpt.models.action_history import ActionResult, EpisodicActionHistory
@@ -30,73 +43,129 @@ CommandArgs = dict[str, str]
AgentThoughts = dict[str, Any]
class BaseAgent(ABC):
"""Base class for all Auto-GPT agents."""
class BaseAgentConfiguration(SystemConfiguration):
fast_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT3_16k)
smart_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT4)
use_functions_api: bool = UserConfigurable(default=False)
default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT
"""The default instruction passed to the AI for a thinking cycle."""
big_brain: bool = UserConfigurable(default=True)
"""
Whether this agent uses the configured smart LLM (default) to think,
as opposed to the configured fast LLM. Enabling this disables hybrid mode.
"""
cycle_budget: Optional[int] = 1
"""
The number of cycles that the agent is allowed to run unsupervised.
`None` for unlimited continuous execution,
`1` to require user approval for every step,
`0` to stop the agent.
"""
cycles_remaining = cycle_budget
"""The number of cycles remaining within the `cycle_budget`."""
cycle_count = 0
"""The number of cycles that the agent has run since its initialization."""
send_token_limit: Optional[int] = None
"""
The token limit for prompt construction. Should leave room for the completion;
defaults to 75% of `llm.max_tokens`.
"""
summary_max_tlength: Optional[
int
] = None # TODO: move to ActionHistoryConfiguration
plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
class Config:
arbitrary_types_allowed = True # Necessary for plugins
@validator("plugins", each_item=True)
def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
assert issubclass(
p.__class__, AutoGPTPluginTemplate
), f"{p} does not subclass AutoGPTPluginTemplate"
assert (
p.__class__.__name__ != "AutoGPTPluginTemplate"
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
return p
@validator("use_functions_api")
def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
if v:
smart_llm = values["smart_llm"]
fast_llm = values["fast_llm"]
assert all(
[
not any(s in name for s in {"-0301", "-0314"})
for name in {smart_llm, fast_llm}
]
), (
f"Model {smart_llm} does not support OpenAI Functions. "
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
)
class BaseAgentSettings(SystemSettings):
ai_config: AIConfig
"""The AIConfig or "personality" object associated with this agent."""
config: BaseAgentConfiguration
"""The configuration for this BaseAgent subsystem instance."""
history: EpisodicActionHistory
"""(STATE) The action history of the agent."""
class BaseAgent(Configurable[BaseAgentSettings], ABC):
"""Base class for all AutoGPT agent classes."""
ThoughtProcessID = Literal["one-shot"]
ThoughtProcessOutput = tuple[CommandName, CommandArgs, AgentThoughts]
default_settings = BaseAgentSettings(
name="BaseAgent",
description=__doc__,
ai_config=AIConfig(),
config=BaseAgentConfiguration(),
history=EpisodicActionHistory(),
)
def __init__(
self,
ai_config: AIConfig,
settings: BaseAgentSettings,
llm_provider: ChatModelProvider,
command_registry: CommandRegistry,
config: Config,
big_brain: bool = True,
default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT,
cycle_budget: Optional[int] = 1,
send_token_limit: Optional[int] = None,
summary_max_tlength: Optional[int] = None,
legacy_config: Config,
):
self.ai_config = ai_config
"""The AIConfig or "personality" object associated with this agent."""
self.ai_config = settings.ai_config
self.llm_provider = llm_provider
self.command_registry = command_registry
"""The registry containing all commands available to the agent."""
self.llm_provider = llm_provider
self.prompt_generator = PromptGenerator(
ai_config=ai_config,
ai_directives=AIDirectives.from_file(config.prompt_settings_file),
ai_config=settings.ai_config,
ai_directives=AIDirectives.from_file(legacy_config.prompt_settings_file),
command_registry=command_registry,
)
"""The prompt generator used for generating the system prompt."""
self.config = config
self.legacy_config = legacy_config
self.config = settings.config
"""The applicable application configuration."""
self.big_brain = big_brain
"""
Whether this agent uses the configured smart LLM (default) to think,
as opposed to the configured fast LLM.
"""
self.default_cycle_instruction = default_cycle_instruction
"""The default instruction passed to the AI for a thinking cycle."""
self.cycle_budget = cycle_budget
"""
The number of cycles that the agent is allowed to run unsupervised.
`None` for unlimited continuous execution,
`1` to require user approval for every step,
`0` to stop the agent.
"""
self.cycles_remaining = cycle_budget
"""The number of cycles remaining within the `cycle_budget`."""
self.cycle_count = 0
"""The number of cycles that the agent has run since its initialization."""
self.send_token_limit = send_token_limit or self.llm.max_tokens * 3 // 4
"""
The token limit for prompt construction. Should leave room for the completion;
defaults to 75% of `llm.max_tokens`.
"""
self.event_history = EpisodicActionHistory()
self.event_history = settings.history
# Support multi-inheritance and mixins for subclasses
super(BaseAgent, self).__init__()
@@ -112,9 +181,15 @@ class BaseAgent(ABC):
@property
def llm(self) -> ChatModelInfo:
"""The LLM that the agent uses to think."""
llm_name = self.config.smart_llm if self.big_brain else self.config.fast_llm
llm_name = (
self.config.smart_llm if self.config.big_brain else self.config.fast_llm
)
return OPEN_AI_CHAT_MODELS[llm_name]
@property
def send_token_limit(self) -> int:
return self.config.send_token_limit or self.llm.max_tokens * 3 // 4
async def think(
self,
instruction: Optional[str] = None,
@@ -129,7 +204,7 @@ class BaseAgent(ABC):
The command name and arguments, if any, and the agent's thoughts.
"""
instruction = instruction or self.default_cycle_instruction
instruction = instruction or self.config.default_cycle_instruction
prompt: ChatPrompt = self.construct_prompt(instruction, thought_process_id)
prompt = self.on_before_think(prompt, thought_process_id, instruction)
@@ -138,11 +213,11 @@ class BaseAgent(ABC):
raw_response = await self.llm_provider.create_chat_completion(
prompt.messages,
functions=get_openai_command_specs(self.command_registry)
if self.config.openai_functions
if self.config.use_functions_api
else [],
model_name=self.llm.name,
)
self.cycle_count += 1
self.config.cycle_count += 1
return self.on_response(raw_response, thought_process_id, prompt, instruction)
@@ -285,11 +360,11 @@ class BaseAgent(ABC):
r"\n\s+",
"\n",
RESPONSE_FORMAT_WITHOUT_COMMAND
if self.config.openai_functions
if self.config.use_functions_api
else RESPONSE_FORMAT_WITH_COMMAND,
)
use_functions = self.config.openai_functions and self.command_registry.commands
use_functions = self.config.use_functions_api and self.command_registry.commands
return (
f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. "
"The JSON should be compatible with the TypeScript type `Response` from the following:\n"

View File

@@ -2,6 +2,10 @@ from __future__ import annotations
import logging
from contextlib import ExitStack
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..base import BaseAgentConfiguration
from autogpt.models.action_history import EpisodicActionHistory
@@ -16,6 +20,7 @@ class WatchdogMixin:
looping, the watchdog will switch from the FAST_LLM to the SMART_LLM and re-think.
"""
config: BaseAgentConfiguration
event_history: EpisodicActionHistory
def __init__(self, **kwargs) -> None:
@@ -33,7 +38,7 @@ class WatchdogMixin:
)
if (
not self.big_brain
not self.config.big_brain
and len(self.event_history) > 1
and self.config.fast_llm != self.config.smart_llm
):
@@ -51,7 +56,7 @@ class WatchdogMixin:
@stack.callback
def restore_state() -> None:
# Executed after exiting the ExitStack context
self.big_brain = False
self.config.big_brain = False
# Remove partial record of current cycle
self.event_history.rewind()

View File

@@ -19,13 +19,17 @@ class WorkspaceMixin:
# Initialize other bases first, because we need the config from BaseAgent
super(WorkspaceMixin, self).__init__(**kwargs)
config: Config = getattr(self, "config")
if not isinstance(config, Config):
legacy_config: Config = getattr(self, "legacy_config")
if not isinstance(legacy_config, Config):
raise ValueError(f"Cannot initialize Workspace for Agent without Config")
if not config.workspace_path:
raise ValueError(f"Cannot set up Workspace: no WORKSPACE_PATH in config")
if not legacy_config.workspace_path:
raise ValueError(
f"Cannot set up Workspace: no WORKSPACE_PATH in legacy_config"
)
self.workspace = Workspace(config.workspace_path, config.restrict_to_workspace)
self.workspace = Workspace(
legacy_config.workspace_path, legacy_config.restrict_to_workspace
)
def get_agent_workspace(agent: BaseAgent) -> Workspace | None:

View File

@@ -227,7 +227,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
self.created_at,
self.cycle_count,
self.event_history.episodes,
"action_history.json",
"event_history.json",
)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,

View File

@@ -11,7 +11,8 @@ from typing import Optional
from colorama import Fore, Style
from pydantic import SecretStr
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
from autogpt.agents import AgentThoughts, CommandArgs, CommandName
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.agents.utils.exceptions import InvalidAgentResponseError
from autogpt.app.configurator import create_config
from autogpt.app.setup import interactive_ai_config_setup
@@ -37,7 +38,6 @@ from autogpt.logs.helpers import print_attribute, speak
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
from scripts.install_plugin_deps import install_plugin_dependencies
@@ -175,13 +175,25 @@ async def run_auto_gpt(
print_attribute("Configured Browser", config.selenium_web_browser)
agent_settings = AgentSettings(
name=Agent.default_settings.name,
description=Agent.default_settings.description,
ai_config=ai_config,
config=AgentConfiguration(
fast_llm=config.fast_llm,
smart_llm=config.smart_llm,
use_functions_api=config.openai_functions,
plugins=config.plugins,
),
history=Agent.default_settings.history.copy(deep=True),
)
agent = Agent(
memory=memory,
settings=agent_settings,
llm_provider=llm_provider,
command_registry=command_registry,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
ai_config=ai_config,
config=config,
memory=memory,
legacy_config=config,
)
await run_interaction_loop(agent)
@@ -247,16 +259,16 @@ async def run_interaction_loop(
None
"""
# These contain both application config and agent config, so grab them here.
config = agent.config
legacy_config = agent.legacy_config
ai_config = agent.ai_config
logger = logging.getLogger(__name__)
logger.debug(f"{ai_config.ai_name} System Prompt:\n{agent.system_prompt}")
cycle_budget = cycles_remaining = _get_cycle_budget(
config.continuous_mode, config.continuous_limit
legacy_config.continuous_mode, legacy_config.continuous_limit
)
spinner = Spinner("Thinking...", plain_output=config.plain_output)
spinner = Spinner("Thinking...", plain_output=legacy_config.plain_output)
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
nonlocal cycle_budget, cycles_remaining, spinner
@@ -312,14 +324,16 @@ async def run_interaction_loop(
# Update User #
###############
# Print the assistant's thoughts and the next command to the user.
update_user(config, ai_config, command_name, command_args, assistant_reply_dict)
update_user(
legacy_config, ai_config, command_name, command_args, assistant_reply_dict
)
##################
# Get user input #
##################
if cycles_remaining == 1: # Last cycle
user_feedback, user_input, new_cycles_remaining = await get_user_feedback(
config,
legacy_config,
ai_config,
)

View File

@@ -100,7 +100,7 @@ def execute_python_file(
str: The output of the file
"""
logger.info(
f"Executing python file '{filename}' in working directory '{agent.config.workspace_path}'"
f"Executing python file '{filename}' in working directory '{agent.legacy_config.workspace_path}'"
)
if isinstance(args, str):
@@ -238,7 +238,7 @@ def execute_shell(command_line: str, agent: Agent) -> str:
Returns:
str: The output of the command
"""
if not validate_command(command_line, agent.config):
if not validate_command(command_line, agent.legacy_config):
logger.info(f"Command '{command_line}' not allowed")
raise OperationNotAllowedError("This shell command is not allowed.")
@@ -285,7 +285,7 @@ def execute_shell_popen(command_line: str, agent: Agent) -> str:
Returns:
str: Description of the fact that the process started and its id
"""
if not validate_command(command_line, agent.config):
if not validate_command(command_line, agent.legacy_config):
logger.info(f"Command '{command_line}' not allowed")
raise OperationNotAllowedError("This shell command is not allowed.")

View File

@@ -100,7 +100,7 @@ def is_duplicate_operation(
with contextlib.suppress(ValueError):
file_path = file_path.relative_to(agent.workspace.root)
state = file_operations_state(agent.config.file_logger_path)
state = file_operations_state(agent.legacy_config.file_logger_path)
if operation == "delete" and str(file_path) not in state:
return True
if operation == "write" and state.get(str(file_path)) == checksum:
@@ -128,7 +128,7 @@ def log_operation(
log_entry += f" #{checksum}"
logger.debug(f"Logging file operation: {log_entry}")
append_to_file(
agent.config.file_logger_path, f"{log_entry}\n", agent, should_log=False
agent.legacy_config.file_logger_path, f"{log_entry}\n", agent, should_log=False
)
@@ -279,7 +279,7 @@ def list_folder(folder: Path, agent: Agent) -> list[str]:
if file.startswith("."):
continue
relative_path = os.path.relpath(
os.path.join(root, file), agent.config.workspace_path
os.path.join(root, file), agent.legacy_config.workspace_path
)
found_files.append(relative_path)

View File

@@ -46,10 +46,8 @@ def clone_repository(url: str, clone_path: Path, agent: Agent) -> str:
str: The result of the clone operation.
"""
split_url = url.split("//")
auth_repo_url = (
f"//{agent.config.github_username}:{agent.config.github_api_key}@".join(
split_url
)
auth_repo_url = f"//{agent.legacy_config.github_username}:{agent.legacy_config.github_api_key}@".join(
split_url
)
try:
Repo.clone_from(url=auth_repo_url, to_path=clone_path)

View File

@@ -43,16 +43,16 @@ def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
Returns:
str: The filename of the image
"""
filename = agent.config.workspace_path / f"{str(uuid.uuid4())}.jpg"
filename = agent.legacy_config.workspace_path / f"{str(uuid.uuid4())}.jpg"
# DALL-E
if agent.config.image_provider == "dalle":
if agent.legacy_config.image_provider == "dalle":
return generate_image_with_dalle(prompt, filename, size, agent)
# HuggingFace
elif agent.config.image_provider == "huggingface":
elif agent.legacy_config.image_provider == "huggingface":
return generate_image_with_hf(prompt, filename, agent)
# SD WebUI
elif agent.config.image_provider == "sdwebui":
elif agent.legacy_config.image_provider == "sdwebui":
return generate_image_with_sd_webui(prompt, filename, agent, size)
return "No Image Provider Set"
@@ -67,13 +67,13 @@ def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
Returns:
str: The filename of the image
"""
API_URL = f"https://api-inference.huggingface.co/models/{agent.config.huggingface_image_model}"
if agent.config.huggingface_api_token is None:
API_URL = f"https://api-inference.huggingface.co/models/{agent.legacy_config.huggingface_image_model}"
if agent.legacy_config.huggingface_api_token is None:
raise ValueError(
"You need to set your Hugging Face API token in the config file."
)
headers = {
"Authorization": f"Bearer {agent.config.huggingface_api_token}",
"Authorization": f"Bearer {agent.legacy_config.huggingface_api_token}",
"X-Use-Cache": "false",
}
@@ -142,7 +142,7 @@ def generate_image_with_dalle(
n=1,
size=f"{size}x{size}",
response_format="b64_json",
api_key=agent.config.openai_api_key,
api_key=agent.legacy_config.openai_api_key,
)
logger.info(f"Image Generated for prompt:{prompt}")
@@ -175,13 +175,13 @@ def generate_image_with_sd_webui(
"""
# Create a session and set the basic auth if needed
s = requests.Session()
if agent.config.sd_webui_auth:
username, password = agent.config.sd_webui_auth.split(":")
if agent.legacy_config.sd_webui_auth:
username, password = agent.legacy_config.sd_webui_auth.split(":")
s.auth = (username, password or "")
# Generate the images
response = requests.post(
f"{agent.config.sd_webui_url}/sdapi/v1/txt2img",
f"{agent.legacy_config.sd_webui_url}/sdapi/v1/txt2img",
json={
"prompt": prompt,
"negative_prompt": negative_prompt,

View File

@@ -25,6 +25,8 @@ from autogpt.command_decorator import command
},
enabled=lambda config: not config.noninteractive_mode,
)
def ask_user(question: str, agent: Agent) -> str:
resp = clean_input(agent.config, f"{agent.ai_config.ai_name} asks: '{question}': ")
async def ask_user(question: str, agent: Agent) -> str:
resp = await clean_input(
agent.legacy_config, f"{agent.ai_config.ai_name} asks: '{question}': "
)
return f"The user's answer: '{resp}'"

View File

@@ -91,8 +91,8 @@ def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
try:
# Get the Google API key and Custom Search Engine ID from the config file
api_key = agent.config.google_api_key
custom_search_engine_id = agent.config.google_custom_search_engine_id
api_key = agent.legacy_config.google_api_key
custom_search_engine_id = agent.legacy_config.google_custom_search_engine_id
# Initialize the Custom Search API service
service = build("customsearch", "v1", developerKey=api_key)

View File

@@ -81,7 +81,8 @@ async def read_webpage(url: str, agent: Agent, question: str = "") -> str:
"""
driver = None
try:
driver = open_page_in_browser(url, agent.config)
# FIXME: agent.config -> something else
driver = open_page_in_browser(url, agent.legacy_config)
text = scrape_text_with_selenium(driver)
links = scrape_links_with_selenium(driver, url)
@@ -273,15 +274,15 @@ async def summarize_memorize_webpage(
text_length = len(text)
logger.info(f"Text length: {text_length} characters")
# memory = get_memory(agent.config)
# memory = get_memory(agent.legacy_config)
# new_memory = MemoryItem.from_webpage(text, url, agent.config, question=question)
# new_memory = MemoryItem.from_webpage(text, url, agent.legacy_config, question=question)
# memory.add(new_memory)
summary, _ = await summarize_text(
text,
question=question,
llm_provider=agent.llm_provider,
config=agent.config, # FIXME
config=agent.legacy_config, # FIXME
)
return summary

View File

@@ -144,7 +144,7 @@ class PromptGenerator:
# Construct full prompt
full_prompt_parts = (
self._generate_intro_prompt()
+ self._generate_os_info(agent.config)
+ self._generate_os_info(agent.legacy_config)
+ self._generate_body(
agent=agent,
additional_constraints=self._generate_budget_info(),

View File

@@ -6,7 +6,7 @@ import pytest
import yaml
from pytest_mock import MockerFixture
from autogpt.agents import Agent
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.app.main import _configure_openai_provider
from autogpt.config import AIConfig, Config, ConfigBuilder
from autogpt.core.resource.model_providers import ChatModelProvider, OpenAIProvider
@@ -14,7 +14,6 @@ from autogpt.llm.api_manager import ApiManager
from autogpt.logs.config import configure_logging
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
pytest_plugins = [
@@ -108,11 +107,23 @@ def agent(config: Config, llm_provider: ChatModelProvider) -> Agent:
memory_json_file = get_memory(config)
memory_json_file.clear()
agent_settings = AgentSettings(
name=Agent.default_settings.name,
description=Agent.default_settings.description,
ai_config=ai_config,
config=AgentConfiguration(
fast_llm=config.fast_llm,
smart_llm=config.smart_llm,
use_functions_api=config.openai_functions,
plugins=config.plugins,
),
history=Agent.default_settings.history.copy(deep=True),
)
return Agent(
memory=memory_json_file,
settings=agent_settings,
llm_provider=llm_provider,
command_registry=command_registry,
ai_config=ai_config,
config=config,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
memory=memory_json_file,
legacy_config=config,
)

View File

@@ -1,6 +1,6 @@
import pytest
from autogpt.agents import Agent
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.config import AIConfig, Config
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
@@ -30,13 +30,25 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
],
)
agent_settings = AgentSettings(
name=Agent.default_settings.name,
description=Agent.default_settings.description,
ai_config=ai_config,
config=AgentConfiguration(
fast_llm=config.fast_llm,
smart_llm=config.smart_llm,
use_functions_api=config.openai_functions,
plugins=config.plugins,
),
history=Agent.default_settings.history.copy(deep=True),
)
agent = Agent(
memory=memory_json_file,
settings=agent_settings,
llm_provider=llm_provider,
command_registry=command_registry,
ai_config=ai_config,
config=config,
triggering_prompt="dummy triggering prompt",
memory=memory_json_file,
legacy_config=config,
)
return agent

View File

@@ -89,30 +89,30 @@ def test_execute_shell_local_commands_not_allowed(random_string: str, agent: Age
def test_execute_shell_denylist_should_deny(agent: Agent, random_string: str):
agent.config.shell_denylist = ["echo"]
agent.legacy_config.shell_denylist = ["echo"]
with pytest.raises(OperationNotAllowedError, match="not allowed"):
sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
def test_execute_shell_denylist_should_allow(agent: Agent, random_string: str):
agent.config.shell_denylist = ["cat"]
agent.legacy_config.shell_denylist = ["cat"]
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
assert "Hello" in result and random_string in result
def test_execute_shell_allowlist_should_deny(agent: Agent, random_string: str):
agent.config.shell_command_control = sut.ALLOWLIST_CONTROL
agent.config.shell_allowlist = ["cat"]
agent.legacy_config.shell_command_control = sut.ALLOWLIST_CONTROL
agent.legacy_config.shell_allowlist = ["cat"]
with pytest.raises(OperationNotAllowedError, match="not allowed"):
sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
def test_execute_shell_allowlist_should_allow(agent: Agent, random_string: str):
agent.config.shell_command_control = sut.ALLOWLIST_CONTROL
agent.config.shell_allowlist = ["echo"]
agent.legacy_config.shell_command_control = sut.ALLOWLIST_CONTROL
agent.legacy_config.shell_allowlist = ["echo"]
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
assert "Hello" in result and random_string in result

View File

@@ -95,8 +95,8 @@ def generate_and_validate(
**kwargs,
):
"""Generate an image and validate the output."""
agent.config.image_provider = image_provider
agent.config.huggingface_image_model = hugging_face_image_model
agent.legacy_config.image_provider = image_provider
agent.legacy_config.huggingface_image_model = hugging_face_image_model
prompt = "astronaut riding a horse"
image_path = lst(generate_image(prompt, agent, image_size, **kwargs))
@@ -138,8 +138,8 @@ def test_huggingface_fail_request_with_delay(
mock_post.return_value.ok = False
mock_post.return_value.text = return_text
agent.config.image_provider = "huggingface"
agent.config.huggingface_image_model = image_model
agent.legacy_config.image_provider = "huggingface"
agent.legacy_config.huggingface_image_model = image_model
prompt = "astronaut riding a horse"
with patch("time.sleep") as mock_sleep:
@@ -155,7 +155,7 @@ def test_huggingface_fail_request_with_delay(
def test_huggingface_fail_request_with_delay(mocker, agent: Agent):
agent.config.huggingface_api_token = "1"
agent.legacy_config.huggingface_api_token = "1"
# Mock requests.post
mock_post = mocker.patch("requests.post")
@@ -166,8 +166,8 @@ def test_huggingface_fail_request_with_delay(mocker, agent: Agent):
# Mock time.sleep
mock_sleep = mocker.patch("time.sleep")
agent.config.image_provider = "huggingface"
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
agent.legacy_config.image_provider = "huggingface"
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
result = generate_image("astronaut riding a horse", agent, 512)
@@ -178,7 +178,7 @@ def test_huggingface_fail_request_with_delay(mocker, agent: Agent):
def test_huggingface_fail_request_no_delay(mocker, agent: Agent):
agent.config.huggingface_api_token = "1"
agent.legacy_config.huggingface_api_token = "1"
# Mock requests.post
mock_post = mocker.patch("requests.post")
@@ -191,8 +191,8 @@ def test_huggingface_fail_request_no_delay(mocker, agent: Agent):
# Mock time.sleep
mock_sleep = mocker.patch("time.sleep")
agent.config.image_provider = "huggingface"
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
agent.legacy_config.image_provider = "huggingface"
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
result = generate_image("astronaut riding a horse", agent, 512)
@@ -203,7 +203,7 @@ def test_huggingface_fail_request_no_delay(mocker, agent: Agent):
def test_huggingface_fail_request_bad_json(mocker, agent: Agent):
agent.config.huggingface_api_token = "1"
agent.legacy_config.huggingface_api_token = "1"
# Mock requests.post
mock_post = mocker.patch("requests.post")
@@ -214,8 +214,8 @@ def test_huggingface_fail_request_bad_json(mocker, agent: Agent):
# Mock time.sleep
mock_sleep = mocker.patch("time.sleep")
agent.config.image_provider = "huggingface"
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
agent.legacy_config.image_provider = "huggingface"
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
result = generate_image("astronaut riding a horse", agent, 512)
@@ -226,14 +226,14 @@ def test_huggingface_fail_request_bad_json(mocker, agent: Agent):
def test_huggingface_fail_request_bad_image(mocker, agent: Agent):
agent.config.huggingface_api_token = "1"
agent.legacy_config.huggingface_api_token = "1"
# Mock requests.post
mock_post = mocker.patch("requests.post")
mock_post.return_value.status_code = 200
agent.config.image_provider = "huggingface"
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
agent.legacy_config.image_provider = "huggingface"
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
result = generate_image("astronaut riding a horse", agent, 512)
@@ -241,8 +241,8 @@ def test_huggingface_fail_request_bad_image(mocker, agent: Agent):
def test_huggingface_fail_missing_api_token(mocker, agent: Agent):
agent.config.image_provider = "huggingface"
agent.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
agent.legacy_config.image_provider = "huggingface"
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
# Mock requests.post to raise ValueError
mock_post = mocker.patch("requests.post", side_effect=ValueError)

View File

@@ -4,7 +4,7 @@ from autogpt.agents.agent import Agent, execute_command
def test_agent_initialization(agent: Agent):
assert agent.ai_config.ai_name == "Base"
assert agent.event_history.episodes == []
assert agent.cycle_budget is None
assert agent.config.cycle_budget is 1
assert "You are Base" in agent.system_prompt

View File

@@ -169,7 +169,7 @@ def test_is_duplicate_operation(agent: Agent, mocker: MockerFixture):
# Test logging a file operation
def test_log_operation(agent: Agent):
file_ops.log_operation("log_test", "path/to/test", agent=agent)
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
content = f.read()
assert f"log_test: path/to/test\n" in content
@@ -183,7 +183,7 @@ def test_text_checksum(file_content: str):
def test_log_operation_with_checksum(agent: Agent):
file_ops.log_operation("log_test", "path/to/test", agent=agent, checksum="ABCDEF")
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
content = f.read()
assert f"log_test: path/to/test #ABCDEF\n" in content
@@ -224,7 +224,7 @@ def test_write_file_logs_checksum(test_file_name: Path, agent: Agent):
new_content = "This is new content.\n"
new_checksum = file_ops.text_checksum(new_content)
file_ops.write_to_file(str(test_file_name), new_content, agent=agent)
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
log_entry = f.read()
assert log_entry == f"write: {test_file_name} #{new_checksum}\n"
@@ -266,7 +266,7 @@ def test_append_to_file_uses_checksum_from_appended_file(
append_text = "This is appended text.\n"
file_ops.append_to_file(test_file_name, append_text, agent=agent)
file_ops.append_to_file(test_file_name, append_text, agent=agent)
with open(agent.config.file_logger_path, "r", encoding="utf-8") as f:
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
log_contents = f.read()
digest = hashlib.md5()

View File

@@ -26,7 +26,7 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, agent: Agent):
assert clone_result == expected_output
mock_clone_from.assert_called_once_with(
url=f"{scheme}{agent.config.github_username}:{agent.config.github_api_key}@{repo}",
url=f"{scheme}{agent.legacy_config.github_username}:{agent.legacy_config.github_api_key}@{repo}",
to_path=clone_path,
)