mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-31 20:04:28 +01:00
AutoGPT: Implement Agent Protocol (#5612)
This commit is contained in:
19
.github/workflows/autogpt-ci.yml
vendored
19
.github/workflows/autogpt-ci.yml
vendored
@@ -6,19 +6,16 @@ on:
|
||||
paths:
|
||||
- 'autogpts/autogpt/**'
|
||||
- '!autogpts/autogpt/tests/vcr_cassettes'
|
||||
- '!autogpts/autogpt/tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ stable, master, release-* ]
|
||||
paths:
|
||||
- 'autogpts/autogpt/**'
|
||||
- '!autogpts/autogpt/tests/vcr_cassettes'
|
||||
- '!autogpts/autogpt/tests/challenges/current_score.json'
|
||||
pull_request_target:
|
||||
branches: [ master, release-*, ci-test* ]
|
||||
paths:
|
||||
- 'autogpts/autogpt/**'
|
||||
- '!autogpts/autogpt/tests/vcr_cassettes'
|
||||
- '!autogpts/autogpt/tests/challenges/current_score.json'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -169,8 +166,7 @@ jobs:
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration tests/challenges
|
||||
poetry run python tests/challenges/utils/build_current_score.py
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }}
|
||||
@@ -199,19 +195,6 @@ jobs:
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Push updated challenge scores
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
score_file="tests/challenges/current_score.json"
|
||||
|
||||
if ! git diff --quiet $score_file; then
|
||||
git add $score_file
|
||||
git commit -m "Update challenge scores"
|
||||
git push origin HEAD:${{ github.ref_name }}
|
||||
else
|
||||
echo "The challenge scores didn't change."
|
||||
fi
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
|
||||
2
.github/workflows/autogpt-docker-ci.yml
vendored
2
.github/workflows/autogpt-docker-ci.yml
vendored
@@ -6,13 +6,11 @@ on:
|
||||
paths:
|
||||
- 'autogpts/autogpt/**'
|
||||
- '!autogpts/autogpt/tests/vcr_cassettes'
|
||||
- '!autogpts/autogpt/tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master, release-*, stable ]
|
||||
paths:
|
||||
- 'autogpts/autogpt/**'
|
||||
- '!autogpts/autogpt/tests/vcr_cassettes'
|
||||
- '!autogpts/autogpt/tests/challenges/current_score.json'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
|
||||
1
.github/workflows/pr-label.yml
vendored
1
.github/workflows/pr-label.yml
vendored
@@ -6,7 +6,6 @@ on:
|
||||
branches: [ master, release-* ]
|
||||
paths-ignore:
|
||||
- 'autogpts/autogpt/tests/vcr_cassettes'
|
||||
- 'autogpts/autogpt/tests/challenges/current_score.json'
|
||||
- 'benchmark/reports/**'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
|
||||
2
autogpts/autogpt/.gitignore
vendored
2
autogpts/autogpt/.gitignore
vendored
@@ -17,6 +17,7 @@ log-ingestion.txt
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
data/*
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -163,6 +164,7 @@ CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
agbenchmark_config/reports/
|
||||
agbenchmark_config/workspace/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
|
||||
@@ -36,12 +36,12 @@ CMD []
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN poetry install --no-root --without benchmark
|
||||
RUN poetry install --no-root
|
||||
ONBUILD COPY . ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN poetry install --no-root --without dev,benchmark
|
||||
RUN poetry install --no-root --without dev
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
|
||||
@@ -5,13 +5,10 @@ from pathlib import Path
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.main import _configure_openai_provider, run_interaction_loop
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIConfig, ConfigBuilder
|
||||
from autogpt.config import AIProfile, ConfigBuilder
|
||||
from autogpt.logs.config import configure_logging
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
PROJECT_DIR = Path().resolve()
|
||||
LOG_DIR = Path(__file__).parent / "logs"
|
||||
|
||||
|
||||
@@ -21,7 +18,7 @@ def run_specific_agent(task: str, continuous_mode: bool = False) -> None:
|
||||
|
||||
|
||||
def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
|
||||
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
config.debug_mode = False
|
||||
config.continuous_mode = continuous_mode
|
||||
config.continuous_limit = 20
|
||||
@@ -29,14 +26,16 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
|
||||
config.noninteractive_mode = True
|
||||
config.plain_output = True
|
||||
config.memory_backend = "no_memory"
|
||||
config.workspace_path = Workspace.init_workspace_directory(config)
|
||||
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
|
||||
|
||||
configure_logging(config, LOG_DIR)
|
||||
configure_logging(
|
||||
debug_mode=config.debug_mode,
|
||||
plain_output=config.plain_output,
|
||||
log_dir=LOG_DIR,
|
||||
)
|
||||
|
||||
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_profile = AIProfile(
|
||||
ai_name="AutoGPT",
|
||||
ai_role="a multi-purpose AI assistant.",
|
||||
ai_goals=[task],
|
||||
@@ -47,10 +46,11 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
ai_config=ai_config,
|
||||
ai_profile=ai_profile,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=config.fast_llm,
|
||||
smart_llm=config.smart_llm,
|
||||
allow_fs_access=not config.restrict_to_workspace,
|
||||
use_functions_api=config.openai_functions,
|
||||
plugins=config.plugins,
|
||||
),
|
||||
@@ -58,13 +58,14 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
return Agent(
|
||||
agent = Agent(
|
||||
settings=agent_settings,
|
||||
llm_provider=_configure_openai_provider(config),
|
||||
command_registry=command_registry,
|
||||
memory=get_memory(config),
|
||||
legacy_config=config,
|
||||
)
|
||||
agent.attach_fs(config.app_data_dir / "agents" / "AutoGPT-benchmark") # HACK
|
||||
return agent
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1 +1,8 @@
|
||||
{"workspace": {"input": "auto_gpt_workspace", "output":"auto_gpt_workspace" }, "entry_path": "agbenchmark.benchmarks"}
|
||||
{
|
||||
"workspace": {
|
||||
"input": "agbenchmark_config/workspace",
|
||||
"output": "agbenchmark_config/workspace"
|
||||
},
|
||||
"entry_path": "agbenchmark.benchmarks",
|
||||
"host": "http://localhost:8000"
|
||||
}
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
import autogpt.app.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.app.cli.main()
|
||||
autogpt.app.cli.cli()
|
||||
|
||||
116
autogpts/autogpt/autogpt/agent_factory/configurators.py
Normal file
116
autogpts/autogpt/autogpt/agent_factory/configurators.py
Normal file
@@ -0,0 +1,116 @@
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.agent_manager import AgentManager
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIDirectives, AIProfile, Config
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider
|
||||
from autogpt.logs.config import configure_chat_plugins
|
||||
from autogpt.logs.helpers import print_attribute
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.plugins import scan_plugins
|
||||
|
||||
|
||||
def create_agent(
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
app_config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
) -> Agent:
|
||||
if not task:
|
||||
raise ValueError("No task specified for new agent")
|
||||
if not directives:
|
||||
directives = AIDirectives.from_file(app_config.prompt_settings_file)
|
||||
|
||||
agent = _configure_agent(
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
agent.state.agent_id = AgentManager.generate_id(agent.ai_profile.ai_name)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
def configure_agent_with_state(
|
||||
state: AgentSettings,
|
||||
app_config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
) -> Agent:
|
||||
return _configure_agent(
|
||||
state=state,
|
||||
app_config=app_config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
|
||||
def _configure_agent(
|
||||
app_config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
task: str = "",
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
state: Optional[AgentSettings] = None,
|
||||
) -> Agent:
|
||||
if not (state or task and ai_profile and directives):
|
||||
raise TypeError(
|
||||
"Either (state) or (task, ai_profile, directives) must be specified"
|
||||
)
|
||||
|
||||
app_config.plugins = scan_plugins(app_config, app_config.debug_mode)
|
||||
configure_chat_plugins(app_config)
|
||||
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry.with_command_modules(
|
||||
modules=COMMAND_CATEGORIES,
|
||||
config=app_config,
|
||||
)
|
||||
|
||||
agent_state = state or create_agent_state(
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
)
|
||||
|
||||
# TODO: configure memory
|
||||
|
||||
print_attribute("Configured Browser", app_config.selenium_web_browser)
|
||||
|
||||
return Agent(
|
||||
settings=agent_state,
|
||||
llm_provider=llm_provider,
|
||||
command_registry=command_registry,
|
||||
legacy_config=app_config,
|
||||
)
|
||||
|
||||
|
||||
def create_agent_state(
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: Config,
|
||||
) -> AgentSettings:
|
||||
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
|
||||
agent_prompt_config.use_functions_api = app_config.openai_functions
|
||||
|
||||
return AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=app_config.fast_llm,
|
||||
smart_llm=app_config.smart_llm,
|
||||
allow_fs_access=not app_config.restrict_to_workspace,
|
||||
use_functions_api=app_config.openai_functions,
|
||||
plugins=app_config.plugins,
|
||||
),
|
||||
prompt_config=agent_prompt_config,
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
31
autogpts/autogpt/autogpt/agent_factory/generators.py
Normal file
31
autogpts/autogpt/autogpt/agent_factory/generators.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.resource.model_providers.schema import ChatModelProvider
|
||||
|
||||
from autogpt.config.ai_directives import AIDirectives
|
||||
|
||||
from .configurators import _configure_agent
|
||||
from .profile_generator import generate_agent_profile_for_task
|
||||
|
||||
|
||||
async def generate_agent_for_task(
|
||||
task: str,
|
||||
app_config: "Config",
|
||||
llm_provider: "ChatModelProvider",
|
||||
) -> "Agent":
|
||||
base_directives = AIDirectives.from_file(app_config.prompt_settings_file)
|
||||
ai_profile, task_directives = await generate_agent_profile_for_task(
|
||||
task=task,
|
||||
app_config=app_config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
return _configure_agent(
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=base_directives + task_directives,
|
||||
app_config=app_config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
222
autogpts/autogpt/autogpt/agent_factory/profile_generator.py
Normal file
222
autogpts/autogpt/autogpt/agent_factory/profile_generator.py
Normal file
@@ -0,0 +1,222 @@
|
||||
import logging
|
||||
|
||||
from autogpt.config import AIDirectives, AIProfile, Config
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.prompting import (
|
||||
ChatPrompt,
|
||||
LanguageModelClassification,
|
||||
PromptStrategy,
|
||||
)
|
||||
from autogpt.core.prompting.utils import json_loads
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
AssistantChatMessageDict,
|
||||
ChatMessage,
|
||||
ChatModelProvider,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable(
|
||||
default=LanguageModelClassification.SMART_MODEL
|
||||
)
|
||||
system_prompt: str = UserConfigurable(
|
||||
default=(
|
||||
"Your job is to respond to a user-defined task, given in triple quotes, by "
|
||||
"invoking the `create_agent` function to generate an autonomous agent to "
|
||||
"complete the task. "
|
||||
"You should supply a role-based name for the agent (_GPT), "
|
||||
"an informative description for what the agent does, and "
|
||||
"1 to 5 directives in each of the categories Best Practices and Constraints, "
|
||||
"that are optimally aligned with the successful completion "
|
||||
"of its assigned task.\n"
|
||||
"\n"
|
||||
"Example Input:\n"
|
||||
'"""Help me with marketing my business"""\n\n'
|
||||
"Example Function Call:\n"
|
||||
"```\n"
|
||||
"{"
|
||||
'"name": "create_agent",'
|
||||
' "arguments": {'
|
||||
'"name": "CMOGPT",'
|
||||
' "description": "a professional digital marketer AI that assists Solopreneurs in'
|
||||
" growing their businesses by providing world-class expertise in solving"
|
||||
' marketing problems for SaaS, content products, agencies, and more.",'
|
||||
' "directives": {'
|
||||
' "best_practices": ['
|
||||
'"Engage in effective problem-solving, prioritization, planning, and'
|
||||
" supporting execution to address your marketing needs as your virtual Chief"
|
||||
' Marketing Officer.",'
|
||||
' "Provide specific, actionable, and concise advice to help you make'
|
||||
" informed decisions without the use of platitudes or overly wordy"
|
||||
' explanations.",'
|
||||
' "Identify and prioritize quick wins and cost-effective campaigns that'
|
||||
' maximize results with minimal time and budget investment.",'
|
||||
' "Proactively take the lead in guiding you and offering suggestions when'
|
||||
" faced with unclear information or uncertainty to ensure your marketing"
|
||||
' strategy remains on track."'
|
||||
"]," # best_practices
|
||||
' "constraints": ['
|
||||
'"Do not suggest illegal or unethical plans or strategies.",'
|
||||
' "Take reasonable budgetary limits into account."'
|
||||
"]" # constraints
|
||||
"}" # directives
|
||||
"}" # arguments
|
||||
"}\n"
|
||||
"```"
|
||||
)
|
||||
)
|
||||
user_prompt_template: str = UserConfigurable(default='"""{user_objective}"""')
|
||||
create_agent_function: dict = UserConfigurable(
|
||||
default=CompletionModelFunction(
|
||||
name="create_agent",
|
||||
description="Create a new autonomous AI agent to complete a given task.",
|
||||
parameters={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="A short role-based name for an autonomous agent.",
|
||||
required=True,
|
||||
),
|
||||
"description": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="An informative one sentence description of what the AI agent does",
|
||||
required=True,
|
||||
),
|
||||
"directives": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"best_practices": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five highly effective best practices that are"
|
||||
" optimally aligned with the completion of the given task."
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
"constraints": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five highly effective constraints that are"
|
||||
" optimally aligned with the completion of the given task."
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
).schema
|
||||
)
|
||||
|
||||
|
||||
class AgentProfileGenerator(PromptStrategy):
|
||||
default_configuration: AgentProfileGeneratorConfiguration = (
|
||||
AgentProfileGeneratorConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = CompletionModelFunction.parse(
|
||||
create_agent_function
|
||||
)
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
|
||||
system_message = ChatMessage.system(self._system_prompt_message)
|
||||
user_message = ChatMessage.user(
|
||||
self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
)
|
||||
)
|
||||
prompt = ChatPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[self._create_agent_function],
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: AssistantChatMessageDict,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
try:
|
||||
arguments = json_loads(response_content["function_call"]["arguments"])
|
||||
ai_profile = AIProfile(
|
||||
ai_name=arguments.get("name"),
|
||||
ai_role=arguments.get("description"),
|
||||
)
|
||||
ai_directives = AIDirectives(
|
||||
best_practices=arguments["directives"].get("best_practices"),
|
||||
constraints=arguments["directives"].get("constraints"),
|
||||
resources=[],
|
||||
)
|
||||
except KeyError:
|
||||
logger.debug(f"Failed to parse this response content: {response_content}")
|
||||
raise
|
||||
return ai_profile, ai_directives
|
||||
|
||||
|
||||
async def generate_agent_profile_for_task(
|
||||
task: str,
|
||||
app_config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
agent_profile_generator = AgentProfileGenerator(
|
||||
**AgentProfileGenerator.default_configuration.dict() # HACK
|
||||
)
|
||||
|
||||
prompt = agent_profile_generator.build_prompt(task)
|
||||
|
||||
# Call LLM with the string as user input
|
||||
output = (
|
||||
await llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
model_name=app_config.smart_llm,
|
||||
functions=prompt.functions,
|
||||
)
|
||||
).response
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
|
||||
# Parse the output
|
||||
ai_profile, ai_directives = agent_profile_generator.parse_response_content(output)
|
||||
|
||||
return ai_profile, ai_directives
|
||||
3
autogpts/autogpt/autogpt/agent_manager/__init__.py
Normal file
3
autogpts/autogpt/autogpt/agent_manager/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .agent_manager import AgentManager
|
||||
|
||||
__all__ = ["AgentManager"]
|
||||
47
autogpts/autogpt/autogpt/agent_manager/agent_manager.py
Normal file
47
autogpts/autogpt/autogpt/agent_manager/agent_manager.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import AgentSettings
|
||||
|
||||
from autogpt.agents.utils.agent_file_manager import AgentFileManager
|
||||
|
||||
|
||||
class AgentManager:
|
||||
def __init__(self, app_data_dir: Path):
|
||||
self.agents_dir = app_data_dir / "agents"
|
||||
if not self.agents_dir.exists():
|
||||
self.agents_dir.mkdir()
|
||||
|
||||
@staticmethod
|
||||
def generate_id(agent_name: str) -> str:
|
||||
unique_id = str(uuid.uuid4())[:8]
|
||||
return f"{agent_name}-{unique_id}"
|
||||
|
||||
def list_agents(self) -> list[str]:
|
||||
return [
|
||||
dir.name
|
||||
for dir in self.agents_dir.iterdir()
|
||||
if dir.is_dir() and AgentFileManager(dir).state_file_path.exists()
|
||||
]
|
||||
|
||||
def get_agent_dir(self, agent_id: str, must_exist: bool = False) -> Path:
|
||||
agent_dir = self.agents_dir / agent_id
|
||||
if must_exist and not agent_dir.exists():
|
||||
raise FileNotFoundError(f"No agent with ID '{agent_id}'")
|
||||
return agent_dir
|
||||
|
||||
def retrieve_state(self, agent_id: str) -> AgentSettings:
|
||||
from autogpt.agents.agent import AgentSettings
|
||||
|
||||
agent_dir = self.get_agent_dir(agent_id, True)
|
||||
state_file = AgentFileManager(agent_dir).state_file_path
|
||||
if not state_file.exists():
|
||||
raise FileNotFoundError(f"Agent with ID '{agent_id}' has no state.json")
|
||||
|
||||
state = AgentSettings.load_from_json_file(state_file)
|
||||
state.agent_data_dir = agent_dir
|
||||
return state
|
||||
@@ -8,10 +8,10 @@ from typing import TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.config import AIConfig
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.core.configuration import Configurable
|
||||
from autogpt.core.prompting import ChatPrompt
|
||||
from autogpt.core.resource.model_providers import (
|
||||
@@ -38,8 +38,8 @@ from autogpt.models.context_item import ContextItem
|
||||
|
||||
from .base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from .features.context import ContextMixin
|
||||
from .features.file_workspace import FileWorkspaceMixin
|
||||
from .features.watchdog import WatchdogMixin
|
||||
from .features.workspace import WorkspaceMixin
|
||||
from .prompt_strategies.one_shot import (
|
||||
OneShotAgentPromptConfiguration,
|
||||
OneShotAgentPromptStrategy,
|
||||
@@ -54,13 +54,17 @@ class AgentConfiguration(BaseAgentConfiguration):
|
||||
|
||||
|
||||
class AgentSettings(BaseAgentSettings):
|
||||
config: AgentConfiguration
|
||||
prompt_config: OneShotAgentPromptConfiguration
|
||||
config: AgentConfiguration = Field(default_factory=AgentConfiguration)
|
||||
prompt_config: OneShotAgentPromptConfiguration = Field(
|
||||
default_factory=(
|
||||
lambda: OneShotAgentPromptStrategy.default_configuration.copy(deep=True)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Agent(
|
||||
ContextMixin,
|
||||
WorkspaceMixin,
|
||||
FileWorkspaceMixin,
|
||||
WatchdogMixin,
|
||||
BaseAgent,
|
||||
Configurable[AgentSettings],
|
||||
@@ -70,10 +74,6 @@ class Agent(
|
||||
default_settings: AgentSettings = AgentSettings(
|
||||
name="Agent",
|
||||
description=__doc__,
|
||||
ai_config=AIConfig(ai_name="AutoGPT"),
|
||||
config=AgentConfiguration(),
|
||||
prompt_config=OneShotAgentPromptStrategy.default_configuration,
|
||||
history=BaseAgent.default_settings.history,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
@@ -81,7 +81,6 @@ class Agent(
|
||||
settings: AgentSettings,
|
||||
llm_provider: ChatModelProvider,
|
||||
command_registry: CommandRegistry,
|
||||
memory: VectorMemory,
|
||||
legacy_config: Config,
|
||||
):
|
||||
prompt_strategy = OneShotAgentPromptStrategy(
|
||||
@@ -96,9 +95,6 @@ class Agent(
|
||||
legacy_config=legacy_config,
|
||||
)
|
||||
|
||||
self.memory = memory
|
||||
"""VectorMemoryProvider used to manage the agent's context (TODO)"""
|
||||
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
"""Timestamp the agent was created; only used for structured debug logging."""
|
||||
|
||||
@@ -159,7 +155,7 @@ class Agent(
|
||||
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.ai_profile.ai_name,
|
||||
self.created_at,
|
||||
self.config.cycle_count,
|
||||
prompt.raw(),
|
||||
@@ -184,7 +180,7 @@ class Agent(
|
||||
) = self.prompt_strategy.parse_response_content(llm_response.response)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.ai_profile.ai_name,
|
||||
self.created_at,
|
||||
self.config.cycle_count,
|
||||
assistant_reply_dict,
|
||||
@@ -212,7 +208,7 @@ class Agent(
|
||||
if command_name == "human_feedback":
|
||||
result = ActionInterruptedByHuman(feedback=user_input)
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.ai_profile.ai_name,
|
||||
self.created_at,
|
||||
self.config.cycle_count,
|
||||
user_input,
|
||||
|
||||
@@ -2,7 +2,8 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from pydantic import Field, validator
|
||||
@@ -18,8 +19,9 @@ if TYPE_CHECKING:
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.agents.utils.prompt_scratchpad import PromptScratchpad
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config import ConfigBuilder
|
||||
from autogpt.config.ai_directives import AIDirectives
|
||||
from autogpt.config.ai_profile import AIProfile
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
@@ -40,6 +42,8 @@ from autogpt.llm.providers.openai import get_openai_command_specs
|
||||
from autogpt.models.action_history import ActionResult, EpisodicActionHistory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
from .utils.agent_file_manager import AgentFileManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CommandName = str
|
||||
@@ -48,6 +52,8 @@ AgentThoughts = dict[str, Any]
|
||||
|
||||
|
||||
class BaseAgentConfiguration(SystemConfiguration):
|
||||
allow_fs_access: bool = UserConfigurable(default=False)
|
||||
|
||||
fast_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT3_16k)
|
||||
smart_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT4)
|
||||
use_functions_api: bool = UserConfigurable(default=False)
|
||||
@@ -82,9 +88,8 @@ class BaseAgentConfiguration(SystemConfiguration):
|
||||
defaults to 75% of `llm.max_tokens`.
|
||||
"""
|
||||
|
||||
summary_max_tlength: Optional[
|
||||
int
|
||||
] = None # TODO: move to ActionHistoryConfiguration
|
||||
summary_max_tlength: Optional[int] = None
|
||||
# TODO: move to ActionHistoryConfiguration
|
||||
|
||||
plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
|
||||
|
||||
@@ -115,31 +120,49 @@ class BaseAgentConfiguration(SystemConfiguration):
|
||||
f"Model {smart_llm} does not support OpenAI Functions. "
|
||||
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
|
||||
)
|
||||
return v
|
||||
|
||||
|
||||
class BaseAgentSettings(SystemSettings):
|
||||
ai_config: AIConfig
|
||||
"""The AIConfig or "personality" object associated with this agent."""
|
||||
agent_id: Optional[str] = None
|
||||
agent_data_dir: Optional[Path] = None
|
||||
|
||||
config: BaseAgentConfiguration
|
||||
ai_profile: AIProfile = Field(default_factory=lambda: AIProfile(ai_name="AutoGPT"))
|
||||
"""The AI profile or "personality" of the agent."""
|
||||
|
||||
directives: AIDirectives = Field(
|
||||
default_factory=lambda: AIDirectives.from_file(
|
||||
ConfigBuilder.default_settings.prompt_settings_file
|
||||
)
|
||||
)
|
||||
"""Directives (general instructional guidelines) for the agent."""
|
||||
|
||||
task: str = "Terminate immediately" # FIXME: placeholder for forge.sdk.schema.Task
|
||||
"""The user-given task that the agent is working on."""
|
||||
|
||||
config: BaseAgentConfiguration = Field(default_factory=BaseAgentConfiguration)
|
||||
"""The configuration for this BaseAgent subsystem instance."""
|
||||
|
||||
history: EpisodicActionHistory
|
||||
history: EpisodicActionHistory = Field(default_factory=EpisodicActionHistory)
|
||||
"""(STATE) The action history of the agent."""
|
||||
|
||||
def save_to_json_file(self, file_path: Path) -> None:
|
||||
with file_path.open("w") as f:
|
||||
f.write(self.json())
|
||||
|
||||
@classmethod
|
||||
def load_from_json_file(cls, file_path: Path):
|
||||
return cls.parse_file(file_path)
|
||||
|
||||
|
||||
class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
"""Base class for all AutoGPT agent classes."""
|
||||
|
||||
ThoughtProcessID = Literal["one-shot"]
|
||||
ThoughtProcessOutput = tuple[CommandName, CommandArgs, AgentThoughts]
|
||||
|
||||
default_settings = BaseAgentSettings(
|
||||
name="BaseAgent",
|
||||
description=__doc__,
|
||||
ai_config=AIConfig(),
|
||||
config=BaseAgentConfiguration(),
|
||||
history=EpisodicActionHistory(),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
@@ -150,8 +173,20 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
command_registry: CommandRegistry,
|
||||
legacy_config: Config,
|
||||
):
|
||||
self.ai_config = settings.ai_config
|
||||
self.ai_directives = AIDirectives.from_file(legacy_config.prompt_settings_file)
|
||||
self.state = settings
|
||||
self.config = settings.config
|
||||
self.ai_profile = settings.ai_profile
|
||||
self.directives = settings.directives
|
||||
self.event_history = settings.history
|
||||
|
||||
self.legacy_config = legacy_config
|
||||
"""LEGACY: Monolithic application configuration."""
|
||||
|
||||
self.file_manager: AgentFileManager = (
|
||||
AgentFileManager(settings.agent_data_dir)
|
||||
if settings.agent_data_dir
|
||||
else None
|
||||
) # type: ignore
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
|
||||
@@ -160,20 +195,27 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
self.command_registry = command_registry
|
||||
"""The registry containing all commands available to the agent."""
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
|
||||
self.legacy_config = legacy_config
|
||||
self.config = settings.config
|
||||
"""The applicable application configuration."""
|
||||
|
||||
self.event_history = settings.history
|
||||
|
||||
self._prompt_scratchpad: PromptScratchpad | None = None
|
||||
|
||||
# Support multi-inheritance and mixins for subclasses
|
||||
super(BaseAgent, self).__init__()
|
||||
|
||||
logger.debug(f"Created {__class__} '{self.ai_config.ai_name}'")
|
||||
logger.debug(f"Created {__class__} '{self.ai_profile.ai_name}'")
|
||||
|
||||
def set_id(self, new_id: str, new_agent_dir: Optional[Path] = None):
|
||||
self.state.agent_id = new_id
|
||||
if self.state.agent_data_dir:
|
||||
if not new_agent_dir:
|
||||
raise ValueError(
|
||||
"new_agent_dir must be specified if one is currently configured"
|
||||
)
|
||||
self.attach_fs(new_agent_dir)
|
||||
|
||||
def attach_fs(self, agent_dir: Path) -> AgentFileManager:
|
||||
self.file_manager = AgentFileManager(agent_dir)
|
||||
self.file_manager.initialize()
|
||||
self.state.agent_data_dir = agent_dir
|
||||
return self.file_manager
|
||||
|
||||
@property
|
||||
def llm(self) -> ChatModelInfo:
|
||||
@@ -196,6 +238,10 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
Returns:
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
assert self.file_manager, (
|
||||
f"Agent has no FileManager: call {__class__.__name__}.attach_fs()"
|
||||
" before trying to run the agent."
|
||||
)
|
||||
|
||||
# Scratchpad as surrogate PromptGenerator for plugin hooks
|
||||
self._prompt_scratchpad = PromptScratchpad()
|
||||
@@ -266,14 +312,15 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC):
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
plugin.post_prompt(scratchpad)
|
||||
ai_directives = self.ai_directives.copy(deep=True)
|
||||
ai_directives = self.directives.copy(deep=True)
|
||||
ai_directives.resources += scratchpad.resources
|
||||
ai_directives.constraints += scratchpad.constraints
|
||||
ai_directives.best_practices += scratchpad.best_practices
|
||||
extra_commands += list(scratchpad.commands.values())
|
||||
|
||||
prompt = self.prompt_strategy.build_prompt(
|
||||
ai_config=self.ai_config,
|
||||
task=self.state.task,
|
||||
ai_profile=self.ai_profile,
|
||||
ai_directives=ai_directives,
|
||||
commands=get_openai_command_specs(
|
||||
self.command_registry.list_available_commands(self)
|
||||
|
||||
57
autogpts/autogpt/autogpt/agents/features/file_workspace.py
Normal file
57
autogpts/autogpt/autogpt/agents/features/file_workspace.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from ..base import BaseAgent
|
||||
|
||||
from autogpt.file_workspace import FileWorkspace
|
||||
|
||||
from ..base import AgentFileManager, BaseAgentConfiguration
|
||||
|
||||
|
||||
class FileWorkspaceMixin:
|
||||
"""Mixin that adds workspace support to a class"""
|
||||
|
||||
workspace: FileWorkspace = None
|
||||
"""Workspace that the agent has access to, e.g. for reading/writing files."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Initialize other bases first, because we need the config from BaseAgent
|
||||
super(FileWorkspaceMixin, self).__init__(**kwargs)
|
||||
|
||||
config: BaseAgentConfiguration = getattr(self, "config")
|
||||
if not isinstance(config, BaseAgentConfiguration):
|
||||
raise ValueError(
|
||||
"Cannot initialize Workspace for Agent without compatible .config"
|
||||
)
|
||||
file_manager: AgentFileManager = getattr(self, "file_manager")
|
||||
if not file_manager:
|
||||
return
|
||||
|
||||
self.workspace = _setup_workspace(file_manager, config)
|
||||
|
||||
def attach_fs(self, agent_dir: Path):
|
||||
res = super(FileWorkspaceMixin, self).attach_fs(agent_dir)
|
||||
|
||||
self.workspace = _setup_workspace(self.file_manager, self.config)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def _setup_workspace(file_manager: AgentFileManager, config: BaseAgentConfiguration):
|
||||
workspace = FileWorkspace(
|
||||
file_manager.root / "workspace",
|
||||
restrict_to_root=not config.allow_fs_access,
|
||||
)
|
||||
workspace.initialize()
|
||||
return workspace
|
||||
|
||||
|
||||
def get_agent_workspace(agent: BaseAgent) -> FileWorkspace | None:
|
||||
if isinstance(agent, FileWorkspaceMixin):
|
||||
return agent.workspace
|
||||
|
||||
return None
|
||||
@@ -1,39 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..base import BaseAgent
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
class WorkspaceMixin:
|
||||
"""Mixin that adds workspace support to a class"""
|
||||
|
||||
workspace: Workspace
|
||||
"""Workspace that the agent has access to, e.g. for reading/writing files."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Initialize other bases first, because we need the config from BaseAgent
|
||||
super(WorkspaceMixin, self).__init__(**kwargs)
|
||||
|
||||
legacy_config: Config = getattr(self, "legacy_config")
|
||||
if not isinstance(legacy_config, Config):
|
||||
raise ValueError(f"Cannot initialize Workspace for Agent without Config")
|
||||
if not legacy_config.workspace_path:
|
||||
raise ValueError(
|
||||
f"Cannot set up Workspace: no WORKSPACE_PATH in legacy_config"
|
||||
)
|
||||
|
||||
self.workspace = Workspace(
|
||||
legacy_config.workspace_path, legacy_config.restrict_to_workspace
|
||||
)
|
||||
|
||||
|
||||
def get_agent_workspace(agent: BaseAgent) -> Workspace | None:
|
||||
if isinstance(agent, WorkspaceMixin):
|
||||
return agent.workspace
|
||||
|
||||
return None
|
||||
@@ -6,7 +6,7 @@ from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Literal, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import AIConfig, Config
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.base import ChatModelResponse, ChatSequence
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
@@ -32,19 +32,18 @@ from autogpt.models.context_item import ContextItem
|
||||
from .agent import execute_command, extract_command
|
||||
from .base import BaseAgent
|
||||
from .features.context import ContextMixin
|
||||
from .features.workspace import WorkspaceMixin
|
||||
from .features.file_workspace import FileWorkspaceMixin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
class PlanningAgent(ContextMixin, FileWorkspaceMixin, BaseAgent):
|
||||
"""Agent class for interacting with AutoGPT."""
|
||||
|
||||
ThoughtProcessID = Literal["plan", "action", "evaluate"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
command_registry: CommandRegistry,
|
||||
memory: VectorMemory,
|
||||
triggering_prompt: str,
|
||||
@@ -52,7 +51,6 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
cycle_budget: Optional[int] = None,
|
||||
):
|
||||
super().__init__(
|
||||
ai_config=ai_config,
|
||||
command_registry=command_registry,
|
||||
config=config,
|
||||
default_cycle_instruction=triggering_prompt,
|
||||
@@ -223,14 +221,14 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.ai_profile.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.event_history.episodes,
|
||||
"event_history.json",
|
||||
)
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.ai_profile.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
prompt.raw(),
|
||||
@@ -249,7 +247,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
if command_name == "human_feedback":
|
||||
result = ActionInterruptedByHuman(feedback=user_input)
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.ai_profile.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
@@ -333,7 +331,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
response = command_name, arguments, assistant_reply_dict
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.ai_profile.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_dict,
|
||||
|
||||
@@ -7,13 +7,14 @@ from logging import Logger
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
|
||||
import distro
|
||||
from pydantic import Field
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.models.action_history import Episode
|
||||
|
||||
from autogpt.agents.utils.exceptions import InvalidAgentResponseError
|
||||
from autogpt.config import AIConfig, AIDirectives
|
||||
from autogpt.config import AIDirectives, AIProfile
|
||||
from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.prompting import (
|
||||
ChatPrompt,
|
||||
@@ -29,56 +30,6 @@ from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.json_utils.utilities import extract_dict_from_response
|
||||
from autogpt.prompts.utils import format_numbered_list, indent
|
||||
|
||||
RESPONSE_SCHEMA = JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"thoughts": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"text": JSONSchema(
|
||||
description="Thoughts",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"reasoning": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"plan": JSONSchema(
|
||||
description="Short markdown-style bullet list that conveys the long-term plan",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"criticism": JSONSchema(
|
||||
description="Constructive self-criticism",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"speak": JSONSchema(
|
||||
description="Summary of thoughts, to say to user",
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
"command": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
properties={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
required=True,
|
||||
),
|
||||
"args": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class OneShotAgentPromptConfiguration(SystemConfiguration):
|
||||
DEFAULT_BODY_TEMPLATE: str = (
|
||||
@@ -166,7 +117,9 @@ class OneShotAgentPromptConfiguration(SystemConfiguration):
|
||||
#########
|
||||
# State #
|
||||
#########
|
||||
progress_summaries: dict[tuple[int, int], str] = {(0, 0): ""}
|
||||
# progress_summaries: dict[tuple[int, int], str] = Field(
|
||||
# default_factory=lambda: {(0, 0): ""}
|
||||
# )
|
||||
|
||||
|
||||
class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
@@ -190,7 +143,8 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
def build_prompt(
|
||||
self,
|
||||
*,
|
||||
ai_config: AIConfig,
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
event_history: list[Episode],
|
||||
@@ -213,13 +167,16 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
extra_messages = []
|
||||
|
||||
system_prompt = self.build_system_prompt(
|
||||
ai_config=ai_config,
|
||||
ai_profile=ai_profile,
|
||||
ai_directives=ai_directives,
|
||||
commands=commands,
|
||||
include_os_info=include_os_info,
|
||||
)
|
||||
system_prompt_tlength = count_message_tokens(ChatMessage.system(system_prompt))
|
||||
|
||||
user_task = f'"""{task}"""'
|
||||
user_task_tlength = count_message_tokens(ChatMessage.user(user_task))
|
||||
|
||||
response_format_instr = self.response_format_instruction(
|
||||
self.config.use_functions_api
|
||||
)
|
||||
@@ -235,6 +192,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
max_tokens=(
|
||||
max_prompt_tokens
|
||||
- system_prompt_tlength
|
||||
- user_task_tlength
|
||||
- final_instruction_tlength
|
||||
- count_message_tokens(extra_messages)
|
||||
),
|
||||
@@ -247,6 +205,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
prompt = ChatPrompt(
|
||||
messages=[
|
||||
ChatMessage.system(system_prompt),
|
||||
ChatMessage.user(user_task),
|
||||
*extra_messages,
|
||||
final_instruction_msg,
|
||||
],
|
||||
@@ -256,26 +215,31 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
|
||||
def build_system_prompt(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
) -> str:
|
||||
system_prompt_parts = (
|
||||
self._generate_intro_prompt(ai_config)
|
||||
self._generate_intro_prompt(ai_profile)
|
||||
+ (self._generate_os_info() if include_os_info else [])
|
||||
+ [
|
||||
self.config.body_template.format(
|
||||
constraints=format_numbered_list(
|
||||
ai_directives.constraints
|
||||
+ self._generate_budget_constraint(ai_config.api_budget)
|
||||
+ self._generate_budget_constraint(ai_profile.api_budget)
|
||||
),
|
||||
resources=format_numbered_list(ai_directives.resources),
|
||||
commands=self._generate_commands_list(commands),
|
||||
best_practices=format_numbered_list(ai_directives.best_practices),
|
||||
)
|
||||
]
|
||||
+ self._generate_goals_info(ai_config.ai_goals)
|
||||
+ [
|
||||
"## Your Task\n"
|
||||
"The user will specify a task for you to execute, in triple quotes,"
|
||||
" in the next message. Your job is to complete the task while following"
|
||||
" your directives as given above, and terminate when your task is done."
|
||||
]
|
||||
)
|
||||
|
||||
# Join non-empty parts together into paragraph format
|
||||
@@ -328,7 +292,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
return "\n\n".join(steps)
|
||||
|
||||
def response_format_instruction(self, use_functions_api: bool) -> str:
|
||||
response_schema = RESPONSE_SCHEMA.copy(deep=True)
|
||||
response_schema = self.response_schema.copy(deep=True)
|
||||
if (
|
||||
use_functions_api
|
||||
and response_schema.properties
|
||||
@@ -349,14 +313,14 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
f"{response_format}"
|
||||
)
|
||||
|
||||
def _generate_intro_prompt(self, ai_config: AIConfig) -> list[str]:
|
||||
def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]:
|
||||
"""Generates the introduction part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of strings forming the introduction part of the prompt.
|
||||
"""
|
||||
return [
|
||||
f"You are {ai_config.ai_name}, {ai_config.ai_role.rstrip('.')}.",
|
||||
f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.",
|
||||
"Your decisions must always be made independently without seeking "
|
||||
"user assistance. Play to your strengths as an LLM and pursue "
|
||||
"simple strategies with no legal complications.",
|
||||
@@ -392,24 +356,6 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
]
|
||||
return []
|
||||
|
||||
def _generate_goals_info(self, goals: list[str]) -> list[str]:
|
||||
"""Generates the goals information part of the prompt.
|
||||
|
||||
Returns:
|
||||
str: The goals information part of the prompt.
|
||||
"""
|
||||
if goals:
|
||||
return [
|
||||
"\n".join(
|
||||
[
|
||||
"## Goals",
|
||||
"For your task, you must fulfill the following goals:",
|
||||
*[f"{i+1}. {goal}" for i, goal in enumerate(goals)],
|
||||
]
|
||||
)
|
||||
]
|
||||
return []
|
||||
|
||||
def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str:
|
||||
"""Lists the commands available to the agent.
|
||||
|
||||
@@ -434,7 +380,10 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
|
||||
assistant_reply_dict = extract_dict_from_response(response["content"])
|
||||
|
||||
_, errors = RESPONSE_SCHEMA.validate_object(assistant_reply_dict, self.logger)
|
||||
_, errors = self.response_schema.validate_object(
|
||||
object=assistant_reply_dict,
|
||||
logger=self.logger,
|
||||
)
|
||||
if errors:
|
||||
raise InvalidAgentResponseError(
|
||||
"Validation of response failed:\n "
|
||||
|
||||
37
autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py
Normal file
37
autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentFileManager:
|
||||
"""A class that represents a workspace for an AutoGPT agent."""
|
||||
|
||||
def __init__(self, agent_data_dir: Path):
|
||||
self._root = agent_data_dir.resolve()
|
||||
|
||||
@property
|
||||
def root(self) -> Path:
|
||||
"""The root directory of the workspace."""
|
||||
return self._root
|
||||
|
||||
def initialize(self) -> None:
|
||||
self.root.mkdir(exist_ok=True, parents=True)
|
||||
self.init_file_ops_log(self.file_ops_log_path)
|
||||
|
||||
@property
|
||||
def state_file_path(self) -> Path:
|
||||
return self.root / "state.json"
|
||||
|
||||
@property
|
||||
def file_ops_log_path(self) -> Path:
|
||||
return self.root / "file_logger.log"
|
||||
|
||||
@staticmethod
|
||||
def init_file_ops_log(file_logger_path: Path) -> Path:
|
||||
if not file_logger_path.exists():
|
||||
with file_logger_path.open(mode="w", encoding="utf-8") as f:
|
||||
f.write("")
|
||||
return file_logger_path
|
||||
@@ -14,6 +14,10 @@ class AgentException(Exception):
|
||||
super().__init__(message, *args)
|
||||
|
||||
|
||||
class AgentTerminated(AgentException):
|
||||
"""The agent terminated or was terminated"""
|
||||
|
||||
|
||||
class ConfigurationError(AgentException):
|
||||
"""Error caused by invalid, incompatible or otherwise incorrect configuration"""
|
||||
|
||||
|
||||
374
autogpts/autogpt/autogpt/app/agent_protocol_server.py
Normal file
374
autogpts/autogpt/autogpt/app/agent_protocol_server.py
Normal file
@@ -0,0 +1,374 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
from io import BytesIO
|
||||
from uuid import uuid4
|
||||
|
||||
from fastapi import APIRouter, FastAPI, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import FileResponse, RedirectResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from forge.sdk.db import AgentDB
|
||||
from forge.sdk.errors import NotFoundError
|
||||
from forge.sdk.middlewares import AgentMiddleware
|
||||
from forge.sdk.routes.agent_protocol import base_router
|
||||
from forge.sdk.schema import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
Task,
|
||||
TaskArtifactsListResponse,
|
||||
TaskListResponse,
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from hypercorn.asyncio import serve as hypercorn_serve
|
||||
from hypercorn.config import Config as HypercornConfig
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state
|
||||
from autogpt.agent_factory.generators import generate_agent_for_task
|
||||
from autogpt.agent_manager import AgentManager
|
||||
from autogpt.commands.system import finish
|
||||
from autogpt.commands.user_interaction import ask_user
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider
|
||||
from autogpt.file_workspace import FileWorkspace
|
||||
from autogpt.models.action_history import ActionSuccessResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProtocolServer:
|
||||
def __init__(
|
||||
self,
|
||||
app_config: Config,
|
||||
database: AgentDB,
|
||||
llm_provider: ChatModelProvider,
|
||||
):
|
||||
self.app_config = app_config
|
||||
self.db = database
|
||||
self.llm_provider = llm_provider
|
||||
self.agent_manager = AgentManager(app_data_dir=app_config.app_data_dir)
|
||||
|
||||
async def start(self, port: int = 8000, router: APIRouter = base_router):
|
||||
"""Start the agent server."""
|
||||
logger.debug("Starting the agent server...")
|
||||
config = HypercornConfig()
|
||||
config.bind = [f"localhost:{port}"]
|
||||
app = FastAPI(
|
||||
title="AutoGPT Server",
|
||||
description="Forked from AutoGPT Forge; Modified version of The Agent Protocol.",
|
||||
version="v0.4",
|
||||
)
|
||||
|
||||
# Add CORS middleware
|
||||
origins = [
|
||||
"http://localhost:5000",
|
||||
"http://127.0.0.1:5000",
|
||||
"http://localhost:8000",
|
||||
"http://127.0.0.1:8000",
|
||||
"http://localhost:8080",
|
||||
"http://127.0.0.1:8080",
|
||||
# Add any other origins you want to whitelist
|
||||
]
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(router, prefix="/ap/v1")
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
frontend_path = (
|
||||
pathlib.Path(script_dir)
|
||||
.joinpath("../../../../frontend/build/web")
|
||||
.resolve()
|
||||
)
|
||||
|
||||
if os.path.exists(frontend_path):
|
||||
app.mount("/app", StaticFiles(directory=frontend_path), name="app")
|
||||
|
||||
@app.get("/", include_in_schema=False)
|
||||
async def root():
|
||||
return RedirectResponse(url="/app/index.html", status_code=307)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Frontend not found. {frontend_path} does not exist. The frontend will not be available."
|
||||
)
|
||||
|
||||
# Used to access the methods on this class from API route handlers
|
||||
app.add_middleware(AgentMiddleware, agent=self)
|
||||
|
||||
config.loglevel = "ERROR"
|
||||
config.bind = [f"0.0.0.0:{port}"]
|
||||
|
||||
logger.info(f"AutoGPT server starting on http://localhost:{port}")
|
||||
await hypercorn_serve(app, config)
|
||||
|
||||
async def create_task(self, task_request: TaskRequestBody) -> Task:
|
||||
"""
|
||||
Create a task for the agent.
|
||||
"""
|
||||
logger.debug(f"Creating agent for task: '{task_request.input}'")
|
||||
task_agent = await generate_agent_for_task(
|
||||
task=task_request.input,
|
||||
app_config=self.app_config,
|
||||
llm_provider=self.llm_provider,
|
||||
)
|
||||
task = await self.db.create_task(
|
||||
input=task_request.input,
|
||||
additional_input=task_request.additional_input,
|
||||
)
|
||||
agent_id = task_agent.state.agent_id = task_agent_id(task.task_id)
|
||||
logger.debug(f"New agent ID: {agent_id}")
|
||||
task_agent.attach_fs(self.app_config.app_data_dir / "agents" / agent_id)
|
||||
task_agent.state.save_to_json_file(task_agent.file_manager.state_file_path)
|
||||
return task
|
||||
|
||||
async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse:
|
||||
"""
|
||||
List all tasks that the agent has created.
|
||||
"""
|
||||
logger.debug("Listing all tasks...")
|
||||
tasks, pagination = await self.db.list_tasks(page, pageSize)
|
||||
response = TaskListResponse(tasks=tasks, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def get_task(self, task_id: int) -> Task:
|
||||
"""
|
||||
Get a task by ID.
|
||||
"""
|
||||
logger.debug(f"Getting task with ID: {task_id}...")
|
||||
task = await self.db.get_task(task_id)
|
||||
return task
|
||||
|
||||
async def list_steps(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskStepsListResponse:
|
||||
"""
|
||||
List the IDs of all steps that the task has created.
|
||||
"""
|
||||
logger.debug(f"Listing all steps created by task with ID: {task_id}...")
|
||||
steps, pagination = await self.db.list_steps(task_id, page, pageSize)
|
||||
response = TaskStepsListResponse(steps=steps, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
|
||||
"""Create a step for the task."""
|
||||
logger.debug(f"Creating a step for task with ID: {task_id}...")
|
||||
|
||||
# Restore Agent instance
|
||||
agent = configure_agent_with_state(
|
||||
state=self.agent_manager.retrieve_state(task_agent_id(task_id)),
|
||||
app_config=self.app_config,
|
||||
llm_provider=self.llm_provider,
|
||||
)
|
||||
agent.workspace.on_write_file = lambda path: self.db.create_artifact(
|
||||
task_id=task_id,
|
||||
file_name=path.parts[-1],
|
||||
relative_path=str(path),
|
||||
)
|
||||
|
||||
# According to the Agent Protocol spec, the first execute_step request contains
|
||||
# the same task input as the parent create_task request.
|
||||
# To prevent this from interfering with the agent's process, we ignore the input
|
||||
# of this first step request, and just generate the first step proposal.
|
||||
is_init_step = not bool(agent.event_history)
|
||||
execute_command, execute_command_args, execute_result = None, None, None
|
||||
execute_approved = False
|
||||
if is_init_step:
|
||||
step_request.input = ""
|
||||
elif (
|
||||
agent.event_history.current_episode
|
||||
and not agent.event_history.current_episode.result
|
||||
):
|
||||
execute_command = agent.event_history.current_episode.action.name
|
||||
execute_command_args = agent.event_history.current_episode.action.args
|
||||
execute_approved = not step_request.input or step_request.input == "y"
|
||||
|
||||
logger.debug(
|
||||
f"Agent proposed command"
|
||||
f" {execute_command}({fmt_kwargs(execute_command_args)})."
|
||||
f" User input/feedback: {repr(step_request.input)}"
|
||||
)
|
||||
|
||||
# Save step request
|
||||
step = await self.db.create_step(
|
||||
task_id=task_id,
|
||||
input=step_request,
|
||||
is_last=execute_command == finish.__name__ and execute_approved,
|
||||
)
|
||||
|
||||
# Execute previously proposed action
|
||||
if execute_command:
|
||||
assert execute_command_args is not None
|
||||
|
||||
if step.is_last and execute_command == finish.__name__:
|
||||
assert execute_command_args
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
output=execute_command_args["reason"],
|
||||
)
|
||||
return step
|
||||
|
||||
if execute_command == ask_user.__name__: # HACK
|
||||
execute_result = ActionSuccessResult(outputs=step_request.input)
|
||||
agent.event_history.register_result(execute_result)
|
||||
elif execute_approved:
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="running",
|
||||
)
|
||||
# Execute previously proposed action
|
||||
execute_result = await agent.execute(
|
||||
command_name=execute_command,
|
||||
command_args=execute_command_args,
|
||||
)
|
||||
else:
|
||||
assert step_request.input
|
||||
execute_result = await agent.execute(
|
||||
command_name="human_feedback", # HACK
|
||||
command_args={},
|
||||
user_input=step_request.input,
|
||||
)
|
||||
|
||||
# Propose next action
|
||||
next_command, next_command_args, raw_output = await agent.propose_action()
|
||||
|
||||
# Format step output
|
||||
output = (
|
||||
(
|
||||
f"Command `{execute_command}({fmt_kwargs(execute_command_args)})` returned:"
|
||||
f" {execute_result}\n\n"
|
||||
)
|
||||
if execute_command_args and execute_command != "ask_user"
|
||||
else ""
|
||||
)
|
||||
output += f"{raw_output['thoughts']['speak']}\n\n"
|
||||
output += (
|
||||
f"Next Command: {next_command}({fmt_kwargs(next_command_args)})"
|
||||
if next_command != "ask_user"
|
||||
else next_command_args["question"]
|
||||
)
|
||||
|
||||
additional_output = {
|
||||
**(
|
||||
{
|
||||
"last_action": {
|
||||
"name": execute_command,
|
||||
"args": execute_command_args,
|
||||
"result": execute_result.dict(),
|
||||
},
|
||||
}
|
||||
if not is_init_step
|
||||
else {}
|
||||
),
|
||||
**raw_output,
|
||||
}
|
||||
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="completed",
|
||||
output=output,
|
||||
additional_output=additional_output,
|
||||
)
|
||||
|
||||
agent.state.save_to_json_file(agent.file_manager.state_file_path)
|
||||
return step
|
||||
|
||||
async def get_step(self, task_id: str, step_id: str) -> Step:
|
||||
"""
|
||||
Get a step by ID.
|
||||
"""
|
||||
step = await self.db.get_step(task_id, step_id)
|
||||
return step
|
||||
|
||||
async def list_artifacts(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskArtifactsListResponse:
|
||||
"""
|
||||
List the artifacts that the task has created.
|
||||
"""
|
||||
artifacts, pagination = await self.db.list_artifacts(task_id, page, pageSize)
|
||||
return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination)
|
||||
|
||||
async def create_artifact(
|
||||
self, task_id: str, file: UploadFile, relative_path: str
|
||||
) -> Artifact:
|
||||
"""
|
||||
Create an artifact for the task.
|
||||
"""
|
||||
data = None
|
||||
file_name = file.filename or str(uuid4())
|
||||
data = b""
|
||||
while contents := file.file.read(1024 * 1024):
|
||||
data += contents
|
||||
# Check if relative path ends with filename
|
||||
if relative_path.endswith(file_name):
|
||||
file_path = relative_path
|
||||
else:
|
||||
file_path = os.path.join(relative_path, file_name)
|
||||
|
||||
workspace = get_task_agent_file_workspace(task_id, self.agent_manager)
|
||||
workspace.write_file(file_path, data)
|
||||
|
||||
artifact = await self.db.create_artifact(
|
||||
task_id=task_id,
|
||||
file_name=file_name,
|
||||
relative_path=relative_path,
|
||||
agent_created=False,
|
||||
)
|
||||
return artifact
|
||||
|
||||
async def get_artifact(self, task_id: str, artifact_id: str) -> Artifact:
|
||||
"""
|
||||
Get an artifact by ID.
|
||||
"""
|
||||
try:
|
||||
artifact = await self.db.get_artifact(artifact_id)
|
||||
if artifact.file_name not in artifact.relative_path:
|
||||
file_path = os.path.join(artifact.relative_path, artifact.file_name)
|
||||
else:
|
||||
file_path = artifact.relative_path
|
||||
workspace = get_task_agent_file_workspace(task_id, self.agent_manager)
|
||||
retrieved_artifact = workspace.read_file(file_path, binary=True)
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except FileNotFoundError as e:
|
||||
raise
|
||||
|
||||
return StreamingResponse(
|
||||
BytesIO(retrieved_artifact),
|
||||
media_type="application/octet-stream",
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename={artifact.file_name}"
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def task_agent_id(task_id: str | int) -> str:
|
||||
return f"AutoGPT-{task_id}"
|
||||
|
||||
|
||||
def get_task_agent_file_workspace(
|
||||
task_id: str | int,
|
||||
agent_manager: AgentManager,
|
||||
) -> FileWorkspace:
|
||||
return FileWorkspace(
|
||||
root=agent_manager.get_agent_dir(
|
||||
agent_id=task_agent_id(task_id),
|
||||
must_exist=True,
|
||||
),
|
||||
restrict_to_root=True,
|
||||
)
|
||||
|
||||
|
||||
def fmt_kwargs(kwargs: dict) -> str:
|
||||
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
|
||||
@@ -6,6 +6,14 @@ import click
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context):
|
||||
# Invoke `run` by default
|
||||
if ctx.invoked_subcommand is None:
|
||||
ctx.invoke(run)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
@@ -16,6 +24,7 @@ import click
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
type=click.Path(exists=True, dir_okay=False, path_type=Path),
|
||||
help=(
|
||||
"Specifies which ai_settings.yaml file to use, relative to the AutoGPT"
|
||||
" root directory. Will also automatically skip the re-prompt."
|
||||
@@ -24,6 +33,7 @@ import click
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
type=click.Path(exists=True, dir_okay=False, path_type=Path),
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
@@ -82,18 +92,45 @@ import click
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-goal",
|
||||
"--constraint",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help="AI goal override; may be used multiple times to pass multiple goals",
|
||||
help=(
|
||||
"Add or override AI constraints to include in the prompt;"
|
||||
" may be used multiple times to pass multiple constraints"
|
||||
),
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
@click.option(
|
||||
"--resource",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI resources to include in the prompt;"
|
||||
" may be used multiple times to pass multiple resources"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--best-practice",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI best practices to include in the prompt;"
|
||||
" may be used multiple times to pass multiple best practices"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--override-directives",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"If specified, --constraint, --resource and --best-practice will override"
|
||||
" the AI's directives instead of being appended to them"
|
||||
),
|
||||
)
|
||||
def run(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
ai_settings: Optional[Path],
|
||||
prompt_settings: Optional[Path],
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -107,41 +144,103 @@ def main(
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
ai_goal: tuple[str],
|
||||
resource: tuple[str],
|
||||
constraint: tuple[str],
|
||||
best_practice: tuple[str],
|
||||
override_directives: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an AutoGPT assistant.
|
||||
Sets up and runs an agent, based on the task specified by the user, or resumes an
|
||||
existing agent.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings=ai_settings,
|
||||
prompt_settings=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
working_directory=Path(
|
||||
__file__
|
||||
).parent.parent.parent, # TODO: make this an option
|
||||
workspace_directory=workspace_directory,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
ai_name=ai_name,
|
||||
ai_role=ai_role,
|
||||
ai_goals=ai_goal,
|
||||
)
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings=ai_settings,
|
||||
prompt_settings=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
workspace_directory=workspace_directory,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
override_ai_name=ai_name,
|
||||
override_ai_role=ai_role,
|
||||
resources=list(resource),
|
||||
constraints=list(constraint),
|
||||
best_practices=list(best_practice),
|
||||
override_directives=override_directives,
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
type=click.Path(exists=True, dir_okay=False, path_type=Path),
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows AutoGPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
def serve(
|
||||
prompt_settings: Optional[Path],
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
install_plugin_deps: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Starts an Agent Protocol compliant AutoGPT server, which creates a custom agent for
|
||||
every task.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt_server
|
||||
|
||||
run_auto_gpt_server(
|
||||
prompt_settings=prompt_settings,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
cli()
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Literal
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
@@ -17,29 +18,29 @@ from autogpt.memory.vector import get_supported_memory_backends
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_config(
|
||||
def apply_overrides_to_config(
|
||||
config: Config,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
ai_settings_file: Optional[Path] = None,
|
||||
prompt_settings_file: Optional[Path] = None,
|
||||
skip_reprompt: bool = False,
|
||||
speak: bool = False,
|
||||
debug: bool = False,
|
||||
gpt3only: bool = False,
|
||||
gpt4only: bool = False,
|
||||
memory_type: str = "",
|
||||
browser_name: str = "",
|
||||
allow_downloads: bool = False,
|
||||
skip_news: bool = False,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Args:
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
ai_settings_file (Path): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (Path): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
@@ -52,7 +53,7 @@ def create_config(
|
||||
"""
|
||||
config.debug_mode = False
|
||||
config.continuous_mode = False
|
||||
config.speak_mode = False
|
||||
config.tts_config.speak_mode = False
|
||||
|
||||
if debug:
|
||||
print_attribute("Debug mode", "ENABLED")
|
||||
@@ -77,7 +78,7 @@ def create_config(
|
||||
|
||||
if speak:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
config.speak_mode = True
|
||||
config.tts_config.speak_mode = True
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
@@ -130,7 +131,7 @@ def create_config(
|
||||
exit(1)
|
||||
|
||||
print_attribute("Using AI Settings File", file)
|
||||
config.ai_settings_file = file
|
||||
config.ai_settings_file = config.project_root / file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
@@ -144,7 +145,7 @@ def create_config(
|
||||
exit(1)
|
||||
|
||||
print_attribute("Using Prompt Settings File", file)
|
||||
config.prompt_settings_file = file
|
||||
config.prompt_settings_file = config.project_root / file
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
@@ -2,52 +2,59 @@
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from forge.sdk.db import AgentDB
|
||||
from pydantic import SecretStr
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agent_factory.profile_generator import generate_agent_profile_for_task
|
||||
from autogpt.agent_manager import AgentManager
|
||||
from autogpt.agents import AgentThoughts, CommandArgs, CommandName
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.agents.utils.exceptions import InvalidAgentResponseError
|
||||
from autogpt.app.configurator import create_config
|
||||
from autogpt.app.setup import interactive_ai_config_setup
|
||||
from autogpt.app.spinner import Spinner
|
||||
from autogpt.app.utils import (
|
||||
clean_input,
|
||||
get_current_git_branch,
|
||||
get_latest_bulletin,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
)
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
|
||||
from autogpt.core.resource.model_providers import (
|
||||
ChatModelProvider,
|
||||
ModelProviderCredentials,
|
||||
from autogpt.agents.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
from autogpt.config import (
|
||||
AIDirectives,
|
||||
AIProfile,
|
||||
Config,
|
||||
ConfigBuilder,
|
||||
assert_config_has_openai_api_key,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import ModelProviderCredentials
|
||||
from autogpt.core.resource.model_providers.openai import OpenAIProvider
|
||||
from autogpt.core.runner.client_lib.utils import coroutine
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs.config import configure_chat_plugins, configure_logging
|
||||
from autogpt.logs.helpers import print_attribute, speak
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
||||
from .configurator import apply_overrides_to_config
|
||||
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
|
||||
from .spinner import Spinner
|
||||
from .utils import (
|
||||
clean_input,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
print_git_branch_info,
|
||||
print_motd,
|
||||
print_python_version_info,
|
||||
)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
ai_settings: Optional[Path],
|
||||
prompt_settings: Optional[Path],
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -57,37 +64,43 @@ async def run_auto_gpt(
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
working_directory: Path,
|
||||
workspace_directory: str | Path,
|
||||
workspace_directory: Path,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str] = None,
|
||||
ai_role: Optional[str] = None,
|
||||
ai_goals: tuple[str] = tuple(),
|
||||
override_ai_name: str = "",
|
||||
override_ai_role: str = "",
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
override_directives: bool = False,
|
||||
):
|
||||
config = ConfigBuilder.build_config_from_env(workdir=working_directory)
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key(config)
|
||||
assert_config_has_openai_api_key(config)
|
||||
|
||||
create_config(
|
||||
config,
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings_file=ai_settings,
|
||||
prompt_settings_file=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
)
|
||||
|
||||
# Set up logging module
|
||||
configure_logging(config)
|
||||
configure_logging(
|
||||
debug_mode=debug,
|
||||
plain_output=config.plain_output,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
llm_provider = _configure_openai_provider(config)
|
||||
|
||||
@@ -105,102 +118,229 @@ async def run_auto_gpt(
|
||||
)
|
||||
|
||||
if not config.skip_news:
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(
|
||||
extra={
|
||||
"title": "NEWS:",
|
||||
"title_color": Fore.GREEN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=motd_line,
|
||||
)
|
||||
if is_new_motd and not config.chat_messages_enabled:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.warn(
|
||||
f"You are running on `{git_branch}` branch"
|
||||
" - this is not a supported branch."
|
||||
)
|
||||
if sys.version_info < (3, 10):
|
||||
logger.error(
|
||||
"WARNING: You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of AutoGPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
print_motd(config, logger)
|
||||
print_git_branch_info(logger)
|
||||
print_python_version_info(logger)
|
||||
|
||||
if install_plugin_deps:
|
||||
install_plugin_dependencies()
|
||||
|
||||
# TODO: have this directory live outside the repository (e.g. in a user's
|
||||
# home directory) and have it come in as a command line argument or part of
|
||||
# the env file.
|
||||
config.workspace_path = Workspace.init_workspace_directory(
|
||||
config, workspace_directory
|
||||
)
|
||||
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
|
||||
|
||||
config.plugins = scan_plugins(config, config.debug_mode)
|
||||
configure_chat_plugins(config)
|
||||
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
|
||||
# Let user choose an existing agent to run
|
||||
agent_manager = AgentManager(config.app_data_dir)
|
||||
existing_agents = agent_manager.list_agents()
|
||||
load_existing_agent = ""
|
||||
if existing_agents:
|
||||
print(
|
||||
"Existing agents\n---------------\n"
|
||||
+ "\n".join(f"{i} - {id}" for i, id in enumerate(existing_agents, 1))
|
||||
)
|
||||
load_existing_agent = await clean_input(
|
||||
config,
|
||||
"Enter the number or name of the agent to run, or hit enter to create a new one:",
|
||||
)
|
||||
if re.match(r"^\d+$", load_existing_agent):
|
||||
load_existing_agent = existing_agents[int(load_existing_agent) - 1]
|
||||
elif load_existing_agent and load_existing_agent not in existing_agents:
|
||||
raise ValueError(f"Unknown agent '{load_existing_agent}'")
|
||||
|
||||
ai_config = await construct_main_ai_config(
|
||||
config,
|
||||
llm_provider=llm_provider,
|
||||
name=ai_name,
|
||||
role=ai_role,
|
||||
goals=ai_goals,
|
||||
)
|
||||
# print(prompt)
|
||||
# Either load existing or set up new agent state
|
||||
agent = None
|
||||
agent_state = None
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(config)
|
||||
memory.clear()
|
||||
print_attribute("Configured Memory", memory.__class__.__name__)
|
||||
############################
|
||||
# Resume an Existing Agent #
|
||||
############################
|
||||
if load_existing_agent:
|
||||
agent_state = agent_manager.retrieve_state(load_existing_agent)
|
||||
while True:
|
||||
answer = await clean_input(config, "Resume? [Y/n]")
|
||||
if answer.lower() == "y":
|
||||
break
|
||||
elif answer.lower() == "n":
|
||||
agent_state = None
|
||||
break
|
||||
else:
|
||||
print("Please respond with 'y' or 'n'")
|
||||
|
||||
print_attribute("Configured Browser", config.selenium_web_browser)
|
||||
if agent_state:
|
||||
agent = configure_agent_with_state(
|
||||
state=agent_state,
|
||||
app_config=config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
|
||||
agent_prompt_config.use_functions_api = config.openai_functions
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
ai_profile, ai_directives = await interactively_revise_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
ai_config=ai_config,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=config.fast_llm,
|
||||
smart_llm=config.smart_llm,
|
||||
use_functions_api=config.openai_functions,
|
||||
plugins=config.plugins,
|
||||
),
|
||||
prompt_config=agent_prompt_config,
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
######################
|
||||
# Set up a new Agent #
|
||||
######################
|
||||
if not agent:
|
||||
task = await clean_input(
|
||||
config,
|
||||
"Enter the task that you want AutoGPT to execute,"
|
||||
" with as much detail as possible:",
|
||||
)
|
||||
base_ai_directives = AIDirectives.from_file(config.prompt_settings_file)
|
||||
|
||||
ai_profile, task_oriented_ai_directives = await generate_agent_profile_for_task(
|
||||
task,
|
||||
app_config=config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
ai_directives = base_ai_directives + task_oriented_ai_directives
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=ai_directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
ai_profile, ai_directives = await interactively_revise_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=ai_directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
agent = create_agent(
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=ai_directives,
|
||||
app_config=config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
agent.attach_fs(agent_manager.get_agent_dir(agent.state.agent_id))
|
||||
|
||||
if not agent.config.allow_fs_access:
|
||||
logger.info(
|
||||
f"{Fore.YELLOW}NOTE: All files/directories created by this agent"
|
||||
f" can be found inside its workspace at:{Fore.RESET} {agent.workspace.root}",
|
||||
extra={"preserve_color": True},
|
||||
)
|
||||
|
||||
#################
|
||||
# Run the Agent #
|
||||
#################
|
||||
try:
|
||||
await run_interaction_loop(agent)
|
||||
except AgentTerminated:
|
||||
agent_id = agent.state.agent_id
|
||||
logger.info(f"Saving state of {agent_id}...")
|
||||
|
||||
# Allow user to Save As other ID
|
||||
save_as_id = (
|
||||
await clean_input(
|
||||
config,
|
||||
f"Press enter to save as '{agent_id}', or enter a different ID to save to:",
|
||||
)
|
||||
or agent_id
|
||||
)
|
||||
if save_as_id and save_as_id != agent_id:
|
||||
agent.set_id(
|
||||
new_id=save_as_id,
|
||||
new_agent_dir=agent_manager.get_agent_dir(save_as_id),
|
||||
)
|
||||
# TODO: clone workspace if user wants that
|
||||
# TODO: ... OR allow many-to-one relations of agents and workspaces
|
||||
|
||||
agent.state.save_to_json_file(agent.file_manager.state_file_path)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt_server(
|
||||
prompt_settings: Optional[Path],
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
install_plugin_deps: bool,
|
||||
):
|
||||
from .agent_protocol_server import AgentProtocolServer
|
||||
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
|
||||
# TODO: fill in llm values here
|
||||
assert_config_has_openai_api_key(config)
|
||||
|
||||
apply_overrides_to_config(
|
||||
config=config,
|
||||
prompt_settings_file=prompt_settings,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
settings=agent_settings,
|
||||
llm_provider=llm_provider,
|
||||
command_registry=command_registry,
|
||||
memory=memory,
|
||||
legacy_config=config,
|
||||
# Set up logging module
|
||||
configure_logging(
|
||||
debug_mode=debug,
|
||||
plain_output=config.plain_output,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await run_interaction_loop(agent)
|
||||
llm_provider = _configure_openai_provider(config)
|
||||
|
||||
if install_plugin_deps:
|
||||
install_plugin_dependencies()
|
||||
|
||||
config.plugins = scan_plugins(config, config.debug_mode)
|
||||
|
||||
# Set up & start server
|
||||
database = AgentDB("sqlite:///data/ap_server.db", debug_enabled=False)
|
||||
server = AgentProtocolServer(
|
||||
app_config=config, database=database, llm_provider=llm_provider
|
||||
)
|
||||
await server.start()
|
||||
|
||||
|
||||
def _configure_openai_provider(config: Config) -> OpenAIProvider:
|
||||
@@ -252,7 +392,7 @@ class UserFeedback(str, enum.Enum):
|
||||
|
||||
|
||||
async def run_interaction_loop(
|
||||
agent: Agent,
|
||||
agent: "Agent",
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
@@ -264,31 +404,42 @@ async def run_interaction_loop(
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
legacy_config = agent.legacy_config
|
||||
ai_config = agent.ai_config
|
||||
ai_profile = agent.ai_profile
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
legacy_config.continuous_mode, legacy_config.continuous_limit
|
||||
)
|
||||
spinner = Spinner("Thinking...", plain_output=legacy_config.plain_output)
|
||||
stop_reason = None
|
||||
|
||||
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
|
||||
nonlocal cycle_budget, cycles_remaining, spinner
|
||||
if cycles_remaining in [0, 1]:
|
||||
logger.error("Interrupt signal received. Stopping AutoGPT immediately.")
|
||||
nonlocal cycle_budget, cycles_remaining, spinner, stop_reason
|
||||
if stop_reason:
|
||||
logger.error("Quitting immediately...")
|
||||
sys.exit()
|
||||
if cycles_remaining in [0, 1]:
|
||||
logger.warning("Interrupt signal received: shutting down gracefully.")
|
||||
logger.warning(
|
||||
"Press Ctrl+C again if you want to stop AutoGPT immediately."
|
||||
)
|
||||
stop_reason = AgentTerminated("Interrupt signal received")
|
||||
else:
|
||||
restart_spinner = spinner.running
|
||||
if spinner.running:
|
||||
spinner.stop()
|
||||
|
||||
logger.error(
|
||||
"Interrupt signal received. Stopping continuous command execution."
|
||||
"Interrupt signal received: stopping continuous command execution."
|
||||
)
|
||||
cycles_remaining = 1
|
||||
if restart_spinner:
|
||||
spinner.start()
|
||||
|
||||
def handle_stop_signal() -> None:
|
||||
if stop_reason:
|
||||
raise stop_reason
|
||||
|
||||
# Set up an interrupt signal for the agent.
|
||||
signal.signal(signal.SIGINT, graceful_agent_interrupt)
|
||||
|
||||
@@ -305,6 +456,7 @@ async def run_interaction_loop(
|
||||
########
|
||||
# Plan #
|
||||
########
|
||||
handle_stop_signal()
|
||||
# Have the agent determine the next action to take.
|
||||
with spinner:
|
||||
try:
|
||||
@@ -318,10 +470,13 @@ async def run_interaction_loop(
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
f"The agent failed to output valid thoughts {consecutive_failures} "
|
||||
"times in a row. Terminating..."
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
sys.exit()
|
||||
continue
|
||||
|
||||
consecutive_failures = 0
|
||||
@@ -331,16 +486,21 @@ async def run_interaction_loop(
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(
|
||||
legacy_config, ai_config, command_name, command_args, assistant_reply_dict
|
||||
ai_profile,
|
||||
command_name,
|
||||
command_args,
|
||||
assistant_reply_dict,
|
||||
speak_mode=legacy_config.tts_config.speak_mode,
|
||||
)
|
||||
|
||||
##################
|
||||
# Get user input #
|
||||
##################
|
||||
handle_stop_signal()
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
user_feedback, user_input, new_cycles_remaining = await get_user_feedback(
|
||||
legacy_config,
|
||||
ai_config,
|
||||
ai_profile,
|
||||
)
|
||||
|
||||
if user_feedback == UserFeedback.AUTHORIZE:
|
||||
@@ -394,6 +554,8 @@ async def run_interaction_loop(
|
||||
if not command_name:
|
||||
continue
|
||||
|
||||
handle_stop_signal()
|
||||
|
||||
result = await agent.execute(command_name, command_args, user_input)
|
||||
|
||||
if result.status == "success":
|
||||
@@ -405,26 +567,30 @@ async def run_interaction_loop(
|
||||
|
||||
|
||||
def update_user(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
ai_profile: AIProfile,
|
||||
command_name: CommandName,
|
||||
command_args: CommandArgs,
|
||||
assistant_reply_dict: AgentThoughts,
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
ai_profile: The AI's personality/profile
|
||||
command_name: The name of the command to execute.
|
||||
command_args: The arguments for the command.
|
||||
assistant_reply_dict: The assistant's reply.
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config)
|
||||
print_assistant_thoughts(
|
||||
ai_name=ai_profile.ai_name,
|
||||
assistant_reply_json_valid=assistant_reply_dict,
|
||||
speak_mode=speak_mode,
|
||||
)
|
||||
|
||||
if config.speak_mode:
|
||||
if speak_mode:
|
||||
speak(f"I want to execute {command_name}")
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
@@ -442,13 +608,13 @@ def update_user(
|
||||
|
||||
async def get_user_feedback(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
ai_profile: AIProfile,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
ai_profile: The AI's configuration.
|
||||
|
||||
Returns:
|
||||
A tuple of the user's feedback, the user's input, and the number of
|
||||
@@ -463,7 +629,7 @@ async def get_user_feedback(
|
||||
f"Enter '{config.authorise_key}' to authorise command, "
|
||||
f"'{config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{ai_config.ai_name}..."
|
||||
f"{ai_profile.ai_name}..."
|
||||
)
|
||||
|
||||
user_feedback = None
|
||||
@@ -503,93 +669,10 @@ async def get_user_feedback(
|
||||
return user_feedback, user_input, new_cycles_remaining
|
||||
|
||||
|
||||
async def construct_main_ai_config(
|
||||
config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
name: Optional[str] = None,
|
||||
role: Optional[str] = None,
|
||||
goals: tuple[str] = tuple(),
|
||||
) -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
str: The prompt string
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ai_config = AIConfig.load(config.workdir / config.ai_settings_file)
|
||||
|
||||
# Apply overrides
|
||||
if name:
|
||||
ai_config.ai_name = name
|
||||
if role:
|
||||
ai_config.ai_role = role
|
||||
if goals:
|
||||
ai_config.ai_goals = list(goals)
|
||||
|
||||
if (
|
||||
all([name, role, goals])
|
||||
or config.skip_reprompt
|
||||
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
|
||||
):
|
||||
print_attribute("Name :", ai_config.ai_name)
|
||||
print_attribute("Role :", ai_config.ai_role)
|
||||
print_attribute("Goals:", ai_config.ai_goals)
|
||||
print_attribute(
|
||||
"API Budget:",
|
||||
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
|
||||
)
|
||||
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
|
||||
logger.info(
|
||||
extra={"title": f"{Fore.GREEN}Welcome back!{Fore.RESET}"},
|
||||
msg=f"Would you like me to return to being {ai_config.ai_name}?",
|
||||
)
|
||||
should_continue = await clean_input(
|
||||
config,
|
||||
f"""Continue with the last settings?
|
||||
Name: {ai_config.ai_name}
|
||||
Role: {ai_config.ai_role}
|
||||
Goals: {ai_config.ai_goals}
|
||||
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
|
||||
Continue ({config.authorise_key}/{config.exit_key}):""",
|
||||
)
|
||||
if should_continue.lower() == config.exit_key:
|
||||
ai_config = AIConfig()
|
||||
|
||||
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
|
||||
ai_config = await interactive_ai_config_setup(config, llm_provider)
|
||||
ai_config.save(config.workdir / config.ai_settings_file)
|
||||
|
||||
if config.restrict_to_workspace:
|
||||
logger.info(
|
||||
f"{Fore.YELLOW}NOTE: All files/directories created by this agent"
|
||||
f" can be found inside its workspace at:{Fore.RESET} {config.workspace_path}",
|
||||
extra={"preserve_color": True},
|
||||
)
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(ai_config.api_budget)
|
||||
|
||||
# Agent Created, print message
|
||||
logger.info(
|
||||
f"{Fore.LIGHTBLUE_EX}{ai_config.ai_name}{Fore.RESET} has been created with the following details:",
|
||||
extra={"preserve_color": True},
|
||||
)
|
||||
|
||||
# Print the ai_config details
|
||||
print_attribute("Name :", ai_config.ai_name)
|
||||
print_attribute("Role :", ai_config.ai_role)
|
||||
print_attribute("Goals:", "")
|
||||
for goal in ai_config.ai_goals:
|
||||
logger.info(f"- {goal}")
|
||||
|
||||
return ai_config
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: str,
|
||||
assistant_reply_json_valid: dict,
|
||||
config: Config,
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -634,7 +717,7 @@ def print_assistant_thoughts(
|
||||
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak:
|
||||
if config.speak_mode:
|
||||
if speak_mode:
|
||||
speak(assistant_thoughts_speak)
|
||||
else:
|
||||
print_attribute("SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW)
|
||||
|
||||
@@ -1,253 +1,190 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from jinja2 import Template
|
||||
|
||||
from autogpt.app import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.core.resource.model_providers import ChatMessage, ChatModelProvider
|
||||
from autogpt.logs.helpers import user_friendly_output
|
||||
from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_USER_DESIRE_PROMPT,
|
||||
)
|
||||
from autogpt.app.utils import clean_input
|
||||
from autogpt.config import AIDirectives, AIProfile, Config
|
||||
from autogpt.logs.helpers import print_attribute
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def interactive_ai_config_setup(
|
||||
config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
ai_config_template: Optional[AIConfig] = None,
|
||||
) -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
def apply_overrides_to_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
override_name: str = "",
|
||||
override_role: str = "",
|
||||
replace_directives: bool = False,
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
):
|
||||
if override_name:
|
||||
ai_profile.ai_name = override_name
|
||||
if override_role:
|
||||
ai_profile.ai_role = override_role
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
if replace_directives:
|
||||
if resources:
|
||||
directives.resources = resources
|
||||
if constraints:
|
||||
directives.constraints = constraints
|
||||
if best_practices:
|
||||
directives.best_practices = best_practices
|
||||
else:
|
||||
if resources:
|
||||
directives.resources += resources
|
||||
if constraints:
|
||||
directives.constraints += constraints
|
||||
if best_practices:
|
||||
directives.best_practices += best_practices
|
||||
|
||||
|
||||
async def interactively_revise_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: Config,
|
||||
):
|
||||
"""Interactively revise the AI settings.
|
||||
|
||||
Args:
|
||||
ai_profile (AIConfig): The current AI profile.
|
||||
ai_directives (AIDirectives): The current AI directives.
|
||||
app_config (Config): The application configuration.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
AIConfig: The revised AI settings.
|
||||
"""
|
||||
logger = logging.getLogger("revise_ai_profile")
|
||||
|
||||
# Construct the prompt
|
||||
user_friendly_output(
|
||||
title="Welcome to AutoGPT! ",
|
||||
message="run with '--help' for more information.",
|
||||
title_color=Fore.GREEN,
|
||||
)
|
||||
revised = False
|
||||
|
||||
ai_config_template_provided = ai_config_template is not None and any(
|
||||
[
|
||||
ai_config_template.ai_goals,
|
||||
ai_config_template.ai_name,
|
||||
ai_config_template.ai_role,
|
||||
]
|
||||
)
|
||||
|
||||
user_desire = ""
|
||||
if not ai_config_template_provided:
|
||||
# Get user desire if command line overrides have not been passed in
|
||||
user_friendly_output(
|
||||
title="Create an AI-Assistant:",
|
||||
message="input '--manual' to enter manual mode.",
|
||||
title_color=Fore.GREEN,
|
||||
while True:
|
||||
# Print the current AI configuration
|
||||
print_ai_settings(
|
||||
title="Current AI Settings" if not revised else "Revised AI Settings",
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
user_desire = await utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}I want AutoGPT to{Style.RESET_ALL}:"
|
||||
)
|
||||
if (
|
||||
await clean_input(app_config, "Continue with these settings? [Y/n]")
|
||||
or app_config.authorise_key
|
||||
) == app_config.authorise_key:
|
||||
break
|
||||
|
||||
if user_desire.strip() == "":
|
||||
user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
|
||||
|
||||
# If user desire contains "--manual" or we have overridden any of the AI configuration
|
||||
if "--manual" in user_desire or ai_config_template_provided:
|
||||
user_friendly_output(
|
||||
"",
|
||||
title="Manual Mode Selected",
|
||||
title_color=Fore.GREEN,
|
||||
)
|
||||
return await generate_aiconfig_manual(config, ai_config_template)
|
||||
|
||||
else:
|
||||
try:
|
||||
return await generate_aiconfig_automatic(user_desire, config, llm_provider)
|
||||
except Exception as e:
|
||||
user_friendly_output(
|
||||
title="Unable to automatically generate AI Config based on user desire.",
|
||||
message="Falling back to manual mode.",
|
||||
title_color=Fore.RED,
|
||||
# Ask for revised ai_profile
|
||||
ai_profile.ai_name = (
|
||||
await clean_input(
|
||||
app_config, "Enter AI name (or press enter to keep current):"
|
||||
)
|
||||
logger.debug(f"Error during AIConfig generation: {e}")
|
||||
|
||||
return await generate_aiconfig_manual(config)
|
||||
|
||||
|
||||
async def generate_aiconfig_manual(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
This function guides the user through a series of prompts to collect the necessary information to create
|
||||
an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
|
||||
goals. If the user does not provide a value for any of the fields, default values will be used.
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
|
||||
"""
|
||||
|
||||
# Manual Setup Intro
|
||||
user_friendly_output(
|
||||
title="Create an AI-Assistant:",
|
||||
message="Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
title_color=Fore.GREEN,
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_name:
|
||||
ai_name = ai_config_template.ai_name
|
||||
else:
|
||||
ai_name = ""
|
||||
# Get AI Name from User
|
||||
user_friendly_output(
|
||||
title="Name your AI:",
|
||||
message="For example, 'Entrepreneur-GPT'",
|
||||
title_color=Fore.GREEN,
|
||||
or ai_profile.ai_name
|
||||
)
|
||||
ai_name = await utils.clean_input(config, "AI Name:")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
user_friendly_output(
|
||||
title=f"{ai_name} here!",
|
||||
message="I am at your service.",
|
||||
title_color=Fore.LIGHTBLUE_EX,
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_role:
|
||||
ai_role = ai_config_template.ai_role
|
||||
else:
|
||||
# Get AI Role from User
|
||||
user_friendly_output(
|
||||
title="Describe your AI's role:",
|
||||
message="For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
title_color=Fore.GREEN,
|
||||
)
|
||||
ai_role = await utils.clean_input(config, f"{ai_name} is:")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
|
||||
if ai_config_template and ai_config_template.ai_goals:
|
||||
ai_goals = ai_config_template.ai_goals
|
||||
else:
|
||||
# Enter up to 5 goals for the AI
|
||||
user_friendly_output(
|
||||
title="Enter up to 5 goals for your AI:",
|
||||
message="For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
title_color=Fore.GREEN,
|
||||
)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = await utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}:"
|
||||
ai_profile.ai_role = (
|
||||
await clean_input(
|
||||
app_config, "Enter new AI role (or press enter to keep current):"
|
||||
)
|
||||
if ai_goal == "":
|
||||
or ai_profile.ai_role
|
||||
)
|
||||
|
||||
# Revise constraints
|
||||
for i, constraint in enumerate(directives.constraints):
|
||||
print_attribute(f"Constraint {i+1}:", f'"{constraint}"')
|
||||
new_constraint = (
|
||||
await clean_input(
|
||||
app_config,
|
||||
f"Enter new constraint {i+1} (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or constraint
|
||||
)
|
||||
if new_constraint == "-":
|
||||
directives.constraints.remove(constraint)
|
||||
elif new_constraint:
|
||||
directives.constraints[i] = new_constraint
|
||||
|
||||
# Add new constraints
|
||||
while True:
|
||||
new_constraint = await clean_input(
|
||||
app_config,
|
||||
"Press enter to finish, or enter a constraint to add:",
|
||||
)
|
||||
if not new_constraint:
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if not ai_goals:
|
||||
ai_goals = [
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
]
|
||||
directives.constraints.append(new_constraint)
|
||||
|
||||
# Get API Budget from User
|
||||
user_friendly_output(
|
||||
title="Enter your budget for API calls:",
|
||||
message="For example: $1.50",
|
||||
title_color=Fore.GREEN,
|
||||
)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = await utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Budget ($){Style.RESET_ALL}:"
|
||||
)
|
||||
if api_budget_input == "":
|
||||
api_budget = 0.0
|
||||
else:
|
||||
try:
|
||||
api_budget = float(api_budget_input.replace("$", ""))
|
||||
except ValueError:
|
||||
user_friendly_output(
|
||||
level=logging.WARNING,
|
||||
title="Invalid budget input.",
|
||||
message="Setting budget to unlimited.",
|
||||
title_color=Fore.RED,
|
||||
# Revise resources
|
||||
for i, resource in enumerate(directives.resources):
|
||||
print_attribute(f"Resource {i+1}:", f'"{resource}"')
|
||||
new_resource = (
|
||||
await clean_input(
|
||||
app_config,
|
||||
f"Enter new resource {i+1} (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or resource
|
||||
)
|
||||
api_budget = 0.0
|
||||
if new_resource == "-":
|
||||
directives.resources.remove(resource)
|
||||
elif new_resource:
|
||||
directives.resources[i] = new_resource
|
||||
|
||||
return AIConfig(
|
||||
ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget
|
||||
)
|
||||
# Add new resources
|
||||
while True:
|
||||
new_resource = await clean_input(
|
||||
app_config,
|
||||
"Press enter to finish, or enter a resource to add:",
|
||||
)
|
||||
if not new_resource:
|
||||
break
|
||||
directives.resources.append(new_resource)
|
||||
|
||||
# Revise best practices
|
||||
for i, best_practice in enumerate(directives.best_practices):
|
||||
print_attribute(f"Best Practice {i+1}:", f'"{best_practice}"')
|
||||
new_best_practice = (
|
||||
await clean_input(
|
||||
app_config,
|
||||
f"Enter new best practice {i+1} (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or best_practice
|
||||
)
|
||||
if new_best_practice == "-":
|
||||
directives.best_practices.remove(best_practice)
|
||||
elif new_best_practice:
|
||||
directives.best_practices[i] = new_best_practice
|
||||
|
||||
# Add new best practices
|
||||
while True:
|
||||
new_best_practice = await clean_input(
|
||||
app_config,
|
||||
"Press enter to finish, or add a best practice to add:",
|
||||
)
|
||||
if not new_best_practice:
|
||||
break
|
||||
directives.best_practices.append(new_best_practice)
|
||||
|
||||
revised = True
|
||||
|
||||
return ai_profile, directives
|
||||
|
||||
|
||||
async def generate_aiconfig_automatic(
|
||||
user_prompt: str,
|
||||
config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
) -> AIConfig:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
def print_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
logger: logging.Logger,
|
||||
title: str = "AI Settings",
|
||||
):
|
||||
print_attribute(title, "")
|
||||
print_attribute("-" * len(title), "")
|
||||
print_attribute("Name :", ai_profile.ai_name)
|
||||
print_attribute("Role :", ai_profile.ai_role)
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC
|
||||
prompt_ai_config_automatic = Template(
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
|
||||
).render(user_prompt=user_prompt)
|
||||
# Call LLM with the string as user input
|
||||
output = (
|
||||
await llm_provider.create_chat_completion(
|
||||
[
|
||||
ChatMessage.system(system_prompt),
|
||||
ChatMessage.user(prompt_ai_config_automatic),
|
||||
],
|
||||
config.smart_llm,
|
||||
)
|
||||
).response["content"]
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
|
||||
# Parse the output
|
||||
ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
|
||||
ai_role = (
|
||||
re.search(
|
||||
r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)",
|
||||
output,
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
.group(1)
|
||||
.strip()
|
||||
)
|
||||
ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
|
||||
api_budget = 0.0 # TODO: parse api budget using a regular expression
|
||||
|
||||
return AIConfig(
|
||||
ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget
|
||||
)
|
||||
print_attribute("Constraints:", "" if directives.constraints else "(none)")
|
||||
for constraint in directives.constraints:
|
||||
logger.info(f"- {constraint}")
|
||||
print_attribute("Resources:", "" if directives.resources else "(none)")
|
||||
for resource in directives.resources:
|
||||
logger.info(f"- {resource}")
|
||||
print_attribute("Best practices:", "" if directives.best_practices else "(none)")
|
||||
for best_practice in directives.best_practices:
|
||||
logger.info(f"- {best_practice}")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import requests
|
||||
from colorama import Fore, Style
|
||||
@@ -146,3 +147,44 @@ behalf. You acknowledge that using the System could expose you to potential liab
|
||||
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
||||
"""
|
||||
return legal_text
|
||||
|
||||
|
||||
def print_motd(config: Config, logger: logging.Logger):
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(
|
||||
extra={
|
||||
"title": "NEWS:",
|
||||
"title_color": Fore.GREEN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=motd_line,
|
||||
)
|
||||
if is_new_motd and not config.chat_messages_enabled:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def print_git_branch_info(logger: logging.Logger):
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.warn(
|
||||
f"You are running on `{git_branch}` branch"
|
||||
" - this is not a supported branch."
|
||||
)
|
||||
|
||||
|
||||
def print_python_version_info(logger: logging.Logger):
|
||||
if sys.version_info < (3, 10):
|
||||
logger.error(
|
||||
"WARNING: You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of AutoGPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
@@ -102,7 +102,7 @@ def execute_python_file(
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(
|
||||
f"Executing python file '{filename}' in working directory '{agent.legacy_config.workspace_path}'"
|
||||
f"Executing python file '{filename}' in working directory '{agent.workspace.root}'"
|
||||
)
|
||||
|
||||
if isinstance(args, str):
|
||||
|
||||
@@ -62,15 +62,15 @@ def operations_from_log(
|
||||
def file_operations_state(log_path: str | Path) -> dict[str, str]:
|
||||
"""Iterates over the operations log and returns the expected state.
|
||||
|
||||
Parses a log file at config.file_logger_path to construct a dictionary that maps
|
||||
each file path written or appended to its checksum. Deleted files are removed
|
||||
from the dictionary.
|
||||
Parses a log file at file_manager.file_ops_log_path to construct a dictionary
|
||||
that maps each file path written or appended to its checksum. Deleted files are
|
||||
removed from the dictionary.
|
||||
|
||||
Returns:
|
||||
A dictionary mapping file paths to their checksums.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If config.file_logger_path is not found.
|
||||
FileNotFoundError: If file_manager.file_ops_log_path is not found.
|
||||
ValueError: If the log file content is not in the expected format.
|
||||
"""
|
||||
state = {}
|
||||
@@ -101,7 +101,7 @@ def is_duplicate_operation(
|
||||
with contextlib.suppress(ValueError):
|
||||
file_path = file_path.relative_to(agent.workspace.root)
|
||||
|
||||
state = file_operations_state(agent.legacy_config.file_logger_path)
|
||||
state = file_operations_state(agent.file_manager.file_ops_log_path)
|
||||
if operation == "delete" and str(file_path) not in state:
|
||||
return True
|
||||
if operation == "write" and state.get(str(file_path)) == checksum:
|
||||
@@ -129,7 +129,7 @@ def log_operation(
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(
|
||||
agent.legacy_config.file_logger_path, f"{log_entry}\n", agent, should_log=False
|
||||
agent.file_manager.file_ops_log_path, f"{log_entry}\n", agent, should_log=False
|
||||
)
|
||||
|
||||
|
||||
@@ -155,6 +155,7 @@ def read_file(filename: Path, agent: Agent) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
content = read_textual_file(filename, logger)
|
||||
# TODO: content = agent.workspace.read_file(filename)
|
||||
|
||||
# # TODO: invalidate/update memory when file is edited
|
||||
# file_memory = MemoryItem.from_text_file(content, str(filename), agent.config)
|
||||
@@ -224,13 +225,11 @@ def write_to_file(filename: Path, contents: str, agent: Agent) -> str:
|
||||
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(contents)
|
||||
agent.workspace.write_file(filename, contents)
|
||||
log_operation("write", filename, agent, checksum)
|
||||
return f"File {filename.name} has been written successfully."
|
||||
|
||||
|
||||
@sanitize_path_arg("filename")
|
||||
def append_to_file(
|
||||
filename: Path, text: str, agent: Agent, should_log: bool = True
|
||||
) -> None:
|
||||
@@ -243,11 +242,11 @@ def append_to_file(
|
||||
"""
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "a", encoding="utf-8") as f:
|
||||
with open(filename, "a") as f:
|
||||
f.write(text)
|
||||
|
||||
if should_log:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
with open(filename, "r") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, agent, checksum=checksum)
|
||||
|
||||
@@ -280,7 +279,7 @@ def list_folder(folder: Path, agent: Agent) -> list[str]:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), agent.legacy_config.workspace_path
|
||||
os.path.join(root, file), agent.workspace.root
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = agent.legacy_config.workspace_path / f"{str(uuid.uuid4())}.jpg"
|
||||
filename = agent.workspace.root / f"{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if agent.legacy_config.image_provider == "dalle":
|
||||
|
||||
@@ -12,7 +12,7 @@ if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
from autogpt.agents.features.context import get_agent_context
|
||||
from autogpt.agents.utils.exceptions import InvalidArgumentError
|
||||
from autogpt.agents.utils.exceptions import AgentTerminated, InvalidArgumentError
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
@@ -42,8 +42,7 @@ def finish(reason: str, agent: Agent) -> None:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
logger.info(reason, extra={"title": "Shutting down...\n"})
|
||||
quit()
|
||||
raise AgentTerminated(reason)
|
||||
|
||||
|
||||
@command(
|
||||
|
||||
@@ -58,7 +58,11 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
attempts += 1
|
||||
|
||||
search_results = [
|
||||
{"title": r["title"], "url": r["href"], "description": r["body"]}
|
||||
{
|
||||
"title": r["title"],
|
||||
"url": r["href"],
|
||||
**({"description": r["body"]} if r.get("body") else {}),
|
||||
}
|
||||
for r in search_results
|
||||
]
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
"""
|
||||
This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from .ai_config import AIConfig
|
||||
from .ai_directives import AIDirectives
|
||||
from .config import Config, ConfigBuilder, check_openai_api_key
|
||||
from .ai_profile import AIProfile
|
||||
from .config import Config, ConfigBuilder, assert_config_has_openai_api_key
|
||||
|
||||
__all__ = [
|
||||
"check_openai_api_key",
|
||||
"AIConfig",
|
||||
"assert_config_has_openai_api_key",
|
||||
"AIProfile",
|
||||
"AIDirectives",
|
||||
"Config",
|
||||
"ConfigBuilder",
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.logs.helpers import request_user_double_check
|
||||
from autogpt.utils import validate_yaml_file
|
||||
@@ -20,17 +19,17 @@ class AIDirectives(BaseModel):
|
||||
best_practices (list): A list of best practices that the AI should follow.
|
||||
"""
|
||||
|
||||
constraints: list[str]
|
||||
resources: list[str]
|
||||
best_practices: list[str]
|
||||
resources: list[str] = Field(default_factory=list)
|
||||
constraints: list[str] = Field(default_factory=list)
|
||||
best_practices: list[str] = Field(default_factory=list)
|
||||
|
||||
@staticmethod
|
||||
def from_file(prompt_settings_file: str) -> AIDirectives:
|
||||
def from_file(prompt_settings_file: Path) -> "AIDirectives":
|
||||
(validated, message) = validate_yaml_file(prompt_settings_file)
|
||||
if not validated:
|
||||
logger.error(message, extra={"title": "FAILED FILE VALIDATION"})
|
||||
request_user_double_check()
|
||||
exit(1)
|
||||
raise RuntimeError(f"File validation failed: {message}")
|
||||
|
||||
with open(prompt_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
@@ -40,3 +39,10 @@ class AIDirectives(BaseModel):
|
||||
resources=config_params.get("resources", []),
|
||||
best_practices=config_params.get("best_practices", []),
|
||||
)
|
||||
|
||||
def __add__(self, other: "AIDirectives") -> "AIDirectives":
|
||||
return AIDirectives(
|
||||
resources=self.resources + other.resources,
|
||||
constraints=self.constraints + other.constraints,
|
||||
best_practices=self.best_practices + other.best_practices,
|
||||
).copy(deep=True)
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
"""A module that contains the AIConfig class object that contains the configuration"""
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AIConfig(BaseModel):
|
||||
class AIProfile(BaseModel):
|
||||
"""
|
||||
A class object that contains the configuration information for the AI
|
||||
Object to hold the AI's personality.
|
||||
|
||||
Attributes:
|
||||
ai_name (str): The name of the AI.
|
||||
@@ -24,7 +21,7 @@ class AIConfig(BaseModel):
|
||||
api_budget: float = 0.0
|
||||
|
||||
@staticmethod
|
||||
def load(ai_settings_file: str | Path) -> "AIConfig":
|
||||
def load(ai_settings_file: str | Path) -> "AIProfile":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
|
||||
loaded from yaml file if yaml file exists, else returns class with no parameters.
|
||||
@@ -52,7 +49,7 @@ class AIConfig(BaseModel):
|
||||
]
|
||||
api_budget = config_params.get("api_budget", 0.0)
|
||||
|
||||
return AIConfig(
|
||||
return AIProfile(
|
||||
ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget
|
||||
)
|
||||
|
||||
@@ -12,14 +12,17 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
from pydantic import Field, validator
|
||||
|
||||
import autogpt
|
||||
from autogpt.core.configuration.schema import Configurable, SystemSettings
|
||||
from autogpt.core.resource.model_providers.openai import OPEN_AI_CHAT_MODELS
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
from autogpt.speech import TTSConfig
|
||||
|
||||
AI_SETTINGS_FILE = "ai_settings.yaml"
|
||||
AZURE_CONFIG_FILE = "azure.yaml"
|
||||
PLUGINS_CONFIG_FILE = "plugins_config.yaml"
|
||||
PROMPT_SETTINGS_FILE = "prompt_settings.yaml"
|
||||
PROJECT_ROOT = Path(autogpt.__file__).parent.parent
|
||||
AI_SETTINGS_FILE = Path("ai_settings.yaml")
|
||||
AZURE_CONFIG_FILE = Path("azure.yaml")
|
||||
PLUGINS_CONFIG_FILE = Path("plugins_config.yaml")
|
||||
PROMPT_SETTINGS_FILE = Path("prompt_settings.yaml")
|
||||
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
@@ -31,6 +34,8 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
########################
|
||||
# Application Settings #
|
||||
########################
|
||||
project_root: Path = PROJECT_ROOT
|
||||
app_data_dir: Path = project_root / "data"
|
||||
skip_news: bool = False
|
||||
skip_reprompt: bool = False
|
||||
authorise_key: str = "y"
|
||||
@@ -40,20 +45,14 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
noninteractive_mode: bool = False
|
||||
chat_messages_enabled: bool = True
|
||||
# TTS configuration
|
||||
speak_mode: bool = False
|
||||
text_to_speech_provider: str = "gtts"
|
||||
streamelements_voice: str = "Brian"
|
||||
elevenlabs_voice_id: Optional[str] = None
|
||||
tts_config: TTSConfig = TTSConfig()
|
||||
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Paths
|
||||
ai_settings_file: str = AI_SETTINGS_FILE
|
||||
prompt_settings_file: str = PROMPT_SETTINGS_FILE
|
||||
workdir: Path = None
|
||||
workspace_path: Optional[Path] = None
|
||||
file_logger_path: Optional[Path] = None
|
||||
ai_settings_file: Path = project_root / AI_SETTINGS_FILE
|
||||
prompt_settings_file: Path = project_root / PROMPT_SETTINGS_FILE
|
||||
# Model configuration
|
||||
fast_llm: str = "gpt-3.5-turbo-16k"
|
||||
smart_llm: str = "gpt-4-0314"
|
||||
@@ -105,7 +104,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
# Plugin Settings #
|
||||
###################
|
||||
plugins_dir: str = "plugins"
|
||||
plugins_config_file: str = PLUGINS_CONFIG_FILE
|
||||
plugins_config_file: Path = project_root / PLUGINS_CONFIG_FILE
|
||||
plugins_config: PluginsConfig = Field(
|
||||
default_factory=lambda: PluginsConfig(plugins={})
|
||||
)
|
||||
@@ -124,10 +123,8 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
openai_api_version: Optional[str] = None
|
||||
openai_organization: Optional[str] = None
|
||||
use_azure: bool = False
|
||||
azure_config_file: Optional[str] = AZURE_CONFIG_FILE
|
||||
azure_config_file: Optional[Path] = project_root / AZURE_CONFIG_FILE
|
||||
azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None
|
||||
# Elevenlabs
|
||||
elevenlabs_api_key: Optional[str] = None
|
||||
# Github
|
||||
github_api_key: Optional[str] = None
|
||||
github_username: Optional[str] = None
|
||||
@@ -225,33 +222,34 @@ class ConfigBuilder(Configurable[Config]):
|
||||
default_settings = Config()
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls, workdir: Path) -> Config:
|
||||
def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> Config:
|
||||
"""Initialize the Config class"""
|
||||
config_dict = {
|
||||
"workdir": workdir,
|
||||
"project_root": project_root,
|
||||
"authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
|
||||
"exit_key": os.getenv("EXIT_KEY"),
|
||||
"plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
|
||||
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
|
||||
"ai_settings_file": os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE),
|
||||
"prompt_settings_file": os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE
|
||||
),
|
||||
"ai_settings_file": project_root
|
||||
/ Path(os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE)),
|
||||
"prompt_settings_file": project_root
|
||||
/ Path(os.getenv("PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE)),
|
||||
"fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
|
||||
"smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
|
||||
"embedding_model": os.getenv("EMBEDDING_MODEL"),
|
||||
"browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"),
|
||||
"openai_api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"use_azure": os.getenv("USE_AZURE") == "True",
|
||||
"azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE),
|
||||
"azure_config_file": project_root
|
||||
/ Path(os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE)),
|
||||
"execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False")
|
||||
== "True",
|
||||
"restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True")
|
||||
== "True",
|
||||
"openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True",
|
||||
"elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"),
|
||||
"streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"),
|
||||
"text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"),
|
||||
"tts_config": {
|
||||
"provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"),
|
||||
},
|
||||
"github_api_key": os.getenv("GITHUB_API_KEY"),
|
||||
"github_username": os.getenv("GITHUB_USERNAME"),
|
||||
"google_api_key": os.getenv("GOOGLE_API_KEY"),
|
||||
@@ -273,9 +271,8 @@ class ConfigBuilder(Configurable[Config]):
|
||||
"redis_password": os.getenv("REDIS_PASSWORD"),
|
||||
"wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
|
||||
"plugins_dir": os.getenv("PLUGINS_DIR"),
|
||||
"plugins_config_file": os.getenv(
|
||||
"PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE
|
||||
),
|
||||
"plugins_config_file": project_root
|
||||
/ Path(os.getenv("PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE)),
|
||||
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
|
||||
}
|
||||
|
||||
@@ -294,19 +291,26 @@ class ConfigBuilder(Configurable[Config]):
|
||||
"GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
)
|
||||
|
||||
config_dict["elevenlabs_voice_id"] = os.getenv(
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
)
|
||||
if not config_dict["text_to_speech_provider"]:
|
||||
if os.getenv("ELEVENLABS_API_KEY"):
|
||||
config_dict["tts_config"]["elevenlabs"] = {
|
||||
"api_key": os.getenv("ELEVENLABS_API_KEY"),
|
||||
"voice_id": os.getenv("ELEVENLABS_VOICE_ID", ""),
|
||||
}
|
||||
if os.getenv("STREAMELEMENTS_VOICE"):
|
||||
config_dict["tts_config"]["streamelements"] = {
|
||||
"voice": os.getenv("STREAMELEMENTS_VOICE"),
|
||||
}
|
||||
|
||||
if not config_dict["tts_config"]["provider"]:
|
||||
if os.getenv("USE_MAC_OS_TTS"):
|
||||
default_tts_provider = "macos"
|
||||
elif config_dict["elevenlabs_api_key"]:
|
||||
elif "elevenlabs" in config_dict["tts_config"]:
|
||||
default_tts_provider = "elevenlabs"
|
||||
elif os.getenv("USE_BRIAN_TTS"):
|
||||
default_tts_provider = "streamelements"
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
config_dict["text_to_speech_provider"] = default_tts_provider
|
||||
config_dict["tts_config"]["provider"] = default_tts_provider
|
||||
|
||||
config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS"))
|
||||
config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS"))
|
||||
@@ -320,7 +324,7 @@ class ConfigBuilder(Configurable[Config]):
|
||||
|
||||
if config_dict["use_azure"]:
|
||||
azure_config = cls.load_azure_config(
|
||||
workdir / config_dict["azure_config_file"]
|
||||
project_root / config_dict["azure_config_file"]
|
||||
)
|
||||
config_dict.update(azure_config)
|
||||
|
||||
@@ -340,7 +344,7 @@ class ConfigBuilder(Configurable[Config]):
|
||||
# Set secondary config variables (that depend on other config variables)
|
||||
|
||||
config.plugins_config = PluginsConfig.load_config(
|
||||
config.workdir / config.plugins_config_file,
|
||||
config.plugins_config_file,
|
||||
config.plugins_denylist,
|
||||
config.plugins_allowlist,
|
||||
)
|
||||
@@ -374,7 +378,7 @@ class ConfigBuilder(Configurable[Config]):
|
||||
}
|
||||
|
||||
|
||||
def check_openai_api_key(config: Config) -> None:
|
||||
def assert_config_has_openai_api_key(config: Config) -> None:
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not config.openai_api_key:
|
||||
print(
|
||||
|
||||
@@ -3,7 +3,7 @@ import functools
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
from typing import Callable, Optional, ParamSpec, TypeVar
|
||||
|
||||
import openai
|
||||
import tiktoken
|
||||
@@ -16,6 +16,7 @@ from autogpt.core.configuration import (
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
AssistantChatMessageDict,
|
||||
AssistantFunctionCallDict,
|
||||
ChatMessage,
|
||||
ChatModelInfo,
|
||||
ChatModelProvider,
|
||||
@@ -33,6 +34,7 @@ from autogpt.core.resource.model_providers.schema import (
|
||||
ModelProviderUsage,
|
||||
ModelTokenizer,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_P = ParamSpec("_P")
|
||||
@@ -263,11 +265,17 @@ class OpenAIProvider(
|
||||
model_prompt: list[ChatMessage],
|
||||
model_name: OpenAIModelName,
|
||||
completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None,
|
||||
functions: list[CompletionModelFunction] = [],
|
||||
functions: Optional[list[CompletionModelFunction]] = None,
|
||||
**kwargs,
|
||||
) -> ChatModelResponse[_T]:
|
||||
"""Create a completion using the OpenAI API."""
|
||||
|
||||
completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
|
||||
functions_compat_mode = functions and "functions" not in completion_kwargs
|
||||
if "messages" in completion_kwargs:
|
||||
model_prompt += completion_kwargs["messages"]
|
||||
del completion_kwargs["messages"]
|
||||
|
||||
response = await self._create_chat_completion(
|
||||
messages=model_prompt,
|
||||
**completion_kwargs,
|
||||
@@ -279,6 +287,10 @@ class OpenAIProvider(
|
||||
}
|
||||
|
||||
response_message = response.choices[0].message.to_dict_recursive()
|
||||
if functions_compat_mode:
|
||||
response_message["function_call"] = _functions_compat_extract_call(
|
||||
response_message["content"]
|
||||
)
|
||||
response = ChatModelResponse(
|
||||
response=response_message,
|
||||
parsed_result=completion_parser(response_message),
|
||||
@@ -313,7 +325,7 @@ class OpenAIProvider(
|
||||
def _get_completion_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
functions: list[CompletionModelFunction],
|
||||
functions: Optional[list[CompletionModelFunction]] = None,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for completion API call.
|
||||
@@ -331,8 +343,13 @@ class OpenAIProvider(
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
|
||||
if functions:
|
||||
completion_kwargs["functions"] = [f.schema for f in functions]
|
||||
if OPEN_AI_CHAT_MODELS[model_name].has_function_call_api:
|
||||
completion_kwargs["functions"] = [f.schema for f in functions]
|
||||
else:
|
||||
# Provide compatibility with older models
|
||||
_functions_compat_fix_kwargs(functions, completion_kwargs)
|
||||
|
||||
return completion_kwargs
|
||||
|
||||
@@ -459,3 +476,129 @@ class _OpenAIRetryHandler:
|
||||
self._backoff(attempt)
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
def format_function_specs_as_typescript_ns(
|
||||
functions: list[CompletionModelFunction],
|
||||
) -> str:
|
||||
"""Returns a function signature block in the format used by OpenAI internally:
|
||||
https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18
|
||||
|
||||
For use with `count_tokens` to determine token usage of provided functions.
|
||||
|
||||
Example:
|
||||
```ts
|
||||
namespace functions {
|
||||
|
||||
// Get the current weather in a given location
|
||||
type get_current_weather = (_: {
|
||||
// The city and state, e.g. San Francisco, CA
|
||||
location: string,
|
||||
unit?: "celsius" | "fahrenheit",
|
||||
}) => any;
|
||||
|
||||
} // namespace functions
|
||||
```
|
||||
"""
|
||||
|
||||
return (
|
||||
"namespace functions {\n\n"
|
||||
+ "\n\n".join(format_openai_function_for_prompt(f) for f in functions)
|
||||
+ "\n\n} // namespace functions"
|
||||
)
|
||||
|
||||
|
||||
def format_openai_function_for_prompt(func: CompletionModelFunction) -> str:
|
||||
"""Returns the function formatted similarly to the way OpenAI does it internally:
|
||||
https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18
|
||||
|
||||
Example:
|
||||
```ts
|
||||
// Get the current weather in a given location
|
||||
type get_current_weather = (_: {
|
||||
// The city and state, e.g. San Francisco, CA
|
||||
location: string,
|
||||
unit?: "celsius" | "fahrenheit",
|
||||
}) => any;
|
||||
```
|
||||
"""
|
||||
|
||||
def param_signature(name: str, spec: JSONSchema) -> str:
|
||||
return (
|
||||
f"// {spec.description}\n" if spec.description else ""
|
||||
) + f"{name}{'' if spec.required else '?'}: {spec.typescript_type},"
|
||||
|
||||
return "\n".join(
|
||||
[
|
||||
f"// {func.description}",
|
||||
f"type {func.name} = (_ :{{",
|
||||
*[param_signature(name, p) for name, p in func.parameters.items()],
|
||||
"}) => any;",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def count_openai_functions_tokens(
|
||||
functions: list[CompletionModelFunction], count_tokens: Callable[[str], int]
|
||||
) -> int:
|
||||
"""Returns the number of tokens taken up by a set of function definitions
|
||||
|
||||
Reference: https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18
|
||||
"""
|
||||
return count_tokens(
|
||||
f"# Tools\n\n## functions\n\n{format_function_specs_as_typescript_ns(functions)}"
|
||||
)
|
||||
|
||||
|
||||
def _functions_compat_fix_kwargs(
|
||||
functions: list[CompletionModelFunction],
|
||||
completion_kwargs: dict,
|
||||
):
|
||||
function_definitions = format_function_specs_as_typescript_ns(functions)
|
||||
function_call_schema = JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"name": JSONSchema(
|
||||
description="The name of the function to call",
|
||||
enum=[f.name for f in functions],
|
||||
required=True,
|
||||
),
|
||||
"arguments": JSONSchema(
|
||||
description="The arguments for the function call",
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
)
|
||||
completion_kwargs["messages"] = [
|
||||
ChatMessage.system(
|
||||
"# function_call instructions\n\n"
|
||||
"Specify a '```function_call' block in your response,"
|
||||
" enclosing a function call in the form of a valid JSON object"
|
||||
" that adheres to the following schema:\n\n"
|
||||
f"{function_call_schema.to_dict()}\n\n"
|
||||
"Put the function_call block at the end of your response"
|
||||
" and include its fences if it is not the only content.\n\n"
|
||||
"## functions\n\n"
|
||||
"For the function call itself, use one of the following"
|
||||
f" functions:\n\n{function_definitions}"
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def _functions_compat_extract_call(response: str) -> AssistantFunctionCallDict:
|
||||
import json
|
||||
import re
|
||||
|
||||
logging.debug(f"Trying to extract function call from response:\n{response}")
|
||||
|
||||
if response[0] == "{":
|
||||
function_call = json.loads(response)
|
||||
else:
|
||||
block = re.search(r"```(?:function_call)?\n(.*)\n```\s*$", response, re.DOTALL)
|
||||
if not block:
|
||||
raise ValueError("Could not find function call block in response")
|
||||
function_call = json.loads(block.group(1))
|
||||
|
||||
function_call["arguments"] = str(function_call["arguments"]) # HACK
|
||||
return function_call
|
||||
|
||||
@@ -333,7 +333,7 @@ class ChatModelProvider(ModelProvider):
|
||||
model_prompt: list[ChatMessage],
|
||||
model_name: str,
|
||||
completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None,
|
||||
functions: list[CompletionModelFunction] = [],
|
||||
functions: Optional[list[CompletionModelFunction]] = None,
|
||||
**kwargs,
|
||||
) -> ChatModelResponse[_T]:
|
||||
...
|
||||
|
||||
@@ -6,14 +6,10 @@ from agent_protocol import StepHandler, StepResult
|
||||
from autogpt.agents import Agent
|
||||
from autogpt.app.main import UserFeedback
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIConfig, ConfigBuilder
|
||||
from autogpt.config import AIProfile, ConfigBuilder
|
||||
from autogpt.logs.helpers import user_friendly_output
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
PROJECT_DIR = Path().resolve()
|
||||
|
||||
|
||||
async def task_handler(task_input) -> StepHandler:
|
||||
@@ -69,11 +65,11 @@ async def interaction_step(
|
||||
)
|
||||
return
|
||||
|
||||
next_command_name, next_command_args, assistant_reply_dict = agent.think()
|
||||
next_command_name, next_command_args, assistant_reply_dict = agent.propose_action()
|
||||
|
||||
return {
|
||||
"config": agent.config,
|
||||
"ai_config": agent.ai_config,
|
||||
"ai_profile": agent.ai_profile,
|
||||
"result": result,
|
||||
"assistant_reply_dict": assistant_reply_dict,
|
||||
"next_step_command_name": next_command_name,
|
||||
@@ -82,25 +78,21 @@ async def interaction_step(
|
||||
|
||||
|
||||
def bootstrap_agent(task, continuous_mode) -> Agent:
|
||||
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
config.debug_mode = True
|
||||
config.continuous_mode = continuous_mode
|
||||
config.temperature = 0
|
||||
config.plain_output = True
|
||||
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
|
||||
config.memory_backend = "no_memory"
|
||||
config.workspace_path = Workspace.init_workspace_directory(config)
|
||||
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
|
||||
ai_config = AIConfig(
|
||||
ai_profile = AIProfile(
|
||||
ai_name="AutoGPT",
|
||||
ai_role="a multi-purpose AI assistant.",
|
||||
ai_goals=[task],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
return Agent(
|
||||
memory=get_memory(config),
|
||||
command_registry=command_registry,
|
||||
ai_config=ai_config,
|
||||
ai_profile=ai_profile,
|
||||
config=config,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import functools
|
||||
from bdb import BdbQuit
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
from typing import Any, Callable, Coroutine, ParamSpec, TypeVar
|
||||
|
||||
import click
|
||||
|
||||
@@ -53,9 +53,9 @@ def handle_exceptions(
|
||||
return wrapped
|
||||
|
||||
|
||||
def coroutine(f):
|
||||
def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]:
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs):
|
||||
return asyncio.run(f(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
|
||||
5
autogpts/autogpt/autogpt/file_workspace/__init__.py
Normal file
5
autogpts/autogpt/autogpt/file_workspace/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from .file_workspace import FileWorkspace
|
||||
|
||||
__all__ = [
|
||||
"FileWorkspace",
|
||||
]
|
||||
145
autogpts/autogpt/autogpt/file_workspace/file_workspace.py
Normal file
145
autogpts/autogpt/autogpt/file_workspace/file_workspace.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
The FileWorkspace class provides an interface for interacting with a file workspace.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileWorkspace:
|
||||
"""A class that represents a file workspace."""
|
||||
|
||||
NULL_BYTES = ["\0", "\000", "\x00", "\u0000"]
|
||||
|
||||
on_write_file: Callable[[Path], Any] | None = None
|
||||
"""
|
||||
Event hook, executed after writing a file.
|
||||
|
||||
Params:
|
||||
Path: The path of the file that was written, relative to the workspace root.
|
||||
"""
|
||||
|
||||
def __init__(self, root: str | Path, restrict_to_root: bool):
|
||||
self._root = self._sanitize_path(root)
|
||||
self._restrict_to_root = restrict_to_root
|
||||
|
||||
@property
|
||||
def root(self) -> Path:
|
||||
"""The root directory of the file workspace."""
|
||||
return self._root
|
||||
|
||||
@property
|
||||
def restrict_to_root(self):
|
||||
"""Whether to restrict generated paths to the root."""
|
||||
return self._restrict_to_root
|
||||
|
||||
def initialize(self) -> None:
|
||||
self.root.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
def get_path(self, relative_path: str | Path) -> Path:
|
||||
"""Get the full path for an item in the workspace.
|
||||
|
||||
Parameters:
|
||||
relative_path: The relative path to resolve in the workspace.
|
||||
|
||||
Returns:
|
||||
Path: The resolved path relative to the workspace.
|
||||
"""
|
||||
return self._sanitize_path(
|
||||
relative_path,
|
||||
root=self.root,
|
||||
restrict_to_root=self.restrict_to_root,
|
||||
)
|
||||
|
||||
def open_file(self, path: str | Path, mode: str = "r"):
|
||||
"""Open a file in the workspace."""
|
||||
full_path = self.get_path(path)
|
||||
return open(full_path, mode)
|
||||
|
||||
def read_file(self, path: str | Path, binary: bool = False):
|
||||
"""Read a file in the workspace."""
|
||||
with self.open_file(path, "rb" if binary else "r") as file:
|
||||
return file.read()
|
||||
|
||||
def write_file(self, path: str | Path, content: str | bytes):
|
||||
"""Write to a file in the workspace."""
|
||||
with self.open_file(path, "wb" if type(content) is bytes else "w") as file:
|
||||
file.write(content)
|
||||
|
||||
if self.on_write_file:
|
||||
path = Path(path)
|
||||
if path.is_absolute():
|
||||
path = path.relative_to(self.root)
|
||||
self.on_write_file(path)
|
||||
|
||||
def list_files(self, path: str | Path = "."):
|
||||
"""List all files in a directory in the workspace."""
|
||||
full_path = self.get_path(path)
|
||||
return [str(file) for file in full_path.glob("*") if file.is_file()]
|
||||
|
||||
def delete_file(self, path: str | Path):
|
||||
"""Delete a file in the workspace."""
|
||||
full_path = self.get_path(path)
|
||||
full_path.unlink()
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_path(
|
||||
relative_path: str | Path,
|
||||
root: Optional[str | Path] = None,
|
||||
restrict_to_root: bool = True,
|
||||
) -> Path:
|
||||
"""Resolve the relative path within the given root if possible.
|
||||
|
||||
Parameters:
|
||||
relative_path: The relative path to resolve.
|
||||
root: The root path to resolve the relative path within.
|
||||
restrict_to_root: Whether to restrict the path to the root.
|
||||
|
||||
Returns:
|
||||
Path: The resolved path.
|
||||
|
||||
Raises:
|
||||
ValueError: If the path is absolute and a root is provided.
|
||||
ValueError: If the path is outside the root and the root is restricted.
|
||||
"""
|
||||
|
||||
# Posix systems disallow null bytes in paths. Windows is agnostic about it.
|
||||
# Do an explicit check here for all sorts of null byte representations.
|
||||
|
||||
for null_byte in FileWorkspace.NULL_BYTES:
|
||||
if null_byte in str(relative_path) or null_byte in str(root):
|
||||
raise ValueError("embedded null byte")
|
||||
|
||||
if root is None:
|
||||
return Path(relative_path).resolve()
|
||||
|
||||
logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
|
||||
|
||||
root, relative_path = Path(root).resolve(), Path(relative_path)
|
||||
|
||||
logger.debug(f"Resolved root as '{root}'")
|
||||
|
||||
# Allow absolute paths if they are contained in the workspace.
|
||||
if (
|
||||
relative_path.is_absolute()
|
||||
and restrict_to_root
|
||||
and not relative_path.is_relative_to(root)
|
||||
):
|
||||
raise ValueError(
|
||||
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
|
||||
)
|
||||
|
||||
full_path = root.joinpath(relative_path).resolve()
|
||||
|
||||
logger.debug(f"Joined paths as '{full_path}'")
|
||||
|
||||
if restrict_to_root and not full_path.is_relative_to(root):
|
||||
raise ValueError(
|
||||
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
|
||||
)
|
||||
|
||||
return full_path
|
||||
@@ -4,13 +4,14 @@ from __future__ import annotations
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openai.util import logger as openai_logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech import TTSConfig
|
||||
|
||||
from autogpt.core.runner.client_lib.logging import BelowLevelFilter
|
||||
|
||||
@@ -33,15 +34,20 @@ USER_FRIENDLY_OUTPUT_LOGGER = "USER_FRIENDLY_OUTPUT"
|
||||
_chat_plugins: list[AutoGPTPluginTemplate] = []
|
||||
|
||||
|
||||
def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None:
|
||||
def configure_logging(
|
||||
debug_mode: bool = False,
|
||||
plain_output: bool = False,
|
||||
tts_config: Optional[TTSConfig] = None,
|
||||
log_dir: Path = LOG_DIR,
|
||||
) -> None:
|
||||
"""Configure the native logging module."""
|
||||
|
||||
# create log directory if it doesn't exist
|
||||
if not log_dir.exists():
|
||||
log_dir.mkdir()
|
||||
|
||||
log_level = logging.DEBUG if config.debug_mode else logging.INFO
|
||||
log_format = DEBUG_LOG_FORMAT if config.debug_mode else SIMPLE_LOG_FORMAT
|
||||
log_level = logging.DEBUG if debug_mode else logging.INFO
|
||||
log_format = DEBUG_LOG_FORMAT if debug_mode else SIMPLE_LOG_FORMAT
|
||||
console_formatter = AutoGptFormatter(log_format)
|
||||
|
||||
# Console output handlers
|
||||
@@ -60,7 +66,7 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None:
|
||||
AutoGptFormatter(SIMPLE_LOG_FORMAT, no_color=True)
|
||||
)
|
||||
|
||||
if config.debug_mode:
|
||||
if debug_mode:
|
||||
# DEBUG log file handler
|
||||
debug_log_handler = logging.FileHandler(log_dir / DEBUG_LOG_FILE, "a", "utf-8")
|
||||
debug_log_handler.setLevel(logging.DEBUG)
|
||||
@@ -79,7 +85,7 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None:
|
||||
level=log_level,
|
||||
handlers=(
|
||||
[stdout, stderr, activity_log_handler, error_log_handler]
|
||||
+ ([debug_log_handler] if config.debug_mode else [])
|
||||
+ ([debug_log_handler] if debug_mode else [])
|
||||
),
|
||||
)
|
||||
|
||||
@@ -93,9 +99,10 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None:
|
||||
user_friendly_output_logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER)
|
||||
user_friendly_output_logger.setLevel(logging.INFO)
|
||||
user_friendly_output_logger.addHandler(
|
||||
typing_console_handler if not config.plain_output else stdout
|
||||
typing_console_handler if not plain_output else stdout
|
||||
)
|
||||
user_friendly_output_logger.addHandler(TTSHandler(config))
|
||||
if tts_config:
|
||||
user_friendly_output_logger.addHandler(TTSHandler(tts_config))
|
||||
user_friendly_output_logger.addHandler(activity_log_handler)
|
||||
user_friendly_output_logger.addHandler(error_log_handler)
|
||||
user_friendly_output_logger.addHandler(stderr)
|
||||
@@ -103,7 +110,8 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None:
|
||||
|
||||
speech_output_logger = logging.getLogger(SPEECH_OUTPUT_LOGGER)
|
||||
speech_output_logger.setLevel(logging.INFO)
|
||||
speech_output_logger.addHandler(TTSHandler(config))
|
||||
if tts_config:
|
||||
speech_output_logger.addHandler(TTSHandler(tts_config))
|
||||
speech_output_logger.propagate = False
|
||||
|
||||
# JSON logger with better formatting
|
||||
|
||||
@@ -11,7 +11,7 @@ from autogpt.logs.utils import remove_color_codes
|
||||
from autogpt.speech import TextToSpeechProvider
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech import TTSConfig
|
||||
|
||||
|
||||
class TypingConsoleHandler(logging.StreamHandler):
|
||||
@@ -50,7 +50,7 @@ class TypingConsoleHandler(logging.StreamHandler):
|
||||
class TTSHandler(logging.Handler):
|
||||
"""Output messages to the configured TTS engine (if any)"""
|
||||
|
||||
def __init__(self, config: Config):
|
||||
def __init__(self, config: TTSConfig):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.tts_provider = TextToSpeechProvider(config)
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from typing import Any, Iterator, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.prompts.utils import format_numbered_list, indent
|
||||
|
||||
@@ -60,14 +60,8 @@ class Episode(BaseModel):
|
||||
class EpisodicActionHistory(BaseModel):
|
||||
"""Utility container for an action history"""
|
||||
|
||||
cursor: int
|
||||
episodes: list[Episode]
|
||||
|
||||
def __init__(self, episodes: list[Episode] = []):
|
||||
super().__init__(
|
||||
episodes=episodes,
|
||||
cursor=len(episodes),
|
||||
)
|
||||
episodes: list[Episode] = Field(default_factory=list)
|
||||
cursor: int = 0
|
||||
|
||||
@property
|
||||
def current_episode(self) -> Episode | None:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""This module contains the speech recognition and speech synthesis functions."""
|
||||
from autogpt.speech.say import TextToSpeechProvider
|
||||
from autogpt.speech.say import TextToSpeechProvider, TTSConfig
|
||||
|
||||
__all__ = ["TextToSpeechProvider"]
|
||||
__all__ = ["TextToSpeechProvider", "TTSConfig"]
|
||||
|
||||
@@ -4,10 +4,6 @@ from __future__ import annotations
|
||||
import abc
|
||||
import re
|
||||
from threading import Lock
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
class VoiceBase:
|
||||
@@ -15,7 +11,7 @@ class VoiceBase:
|
||||
Base class for all voice classes.
|
||||
"""
|
||||
|
||||
def __init__(self, config: Config):
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
Initialize the voice class.
|
||||
"""
|
||||
@@ -24,7 +20,7 @@ class VoiceBase:
|
||||
self._api_key = None
|
||||
self._voices = []
|
||||
self._mutex = Lock()
|
||||
self._setup(config)
|
||||
self._setup(*args, **kwargs)
|
||||
|
||||
def say(self, text: str, voice_index: int = 0) -> bool:
|
||||
"""
|
||||
@@ -43,7 +39,7 @@ class VoiceBase:
|
||||
return self._speech(text, voice_index)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _setup(self, config: Config) -> None:
|
||||
def _setup(self, *args, **kwargs) -> None:
|
||||
"""
|
||||
Setup the voices, API key, etc.
|
||||
"""
|
||||
|
||||
@@ -3,13 +3,12 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
|
||||
from .base import VoiceBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -17,10 +16,15 @@ logger = logging.getLogger(__name__)
|
||||
PLACEHOLDERS = {"your-voice-id"}
|
||||
|
||||
|
||||
class ElevenLabsConfig(SystemConfiguration):
|
||||
api_key: str = UserConfigurable()
|
||||
voice_id: str = UserConfigurable()
|
||||
|
||||
|
||||
class ElevenLabsSpeech(VoiceBase):
|
||||
"""ElevenLabs speech class"""
|
||||
|
||||
def _setup(self, config: Config) -> None:
|
||||
def _setup(self, config: ElevenLabsConfig) -> None:
|
||||
"""Set up the voices, API key, etc.
|
||||
|
||||
Returns:
|
||||
@@ -41,12 +45,12 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
}
|
||||
self._headers = {
|
||||
"Content-Type": "application/json",
|
||||
"xi-api-key": config.elevenlabs_api_key,
|
||||
"xi-api-key": config.api_key,
|
||||
}
|
||||
self._voices = default_voices.copy()
|
||||
if config.elevenlabs_voice_id in voice_options:
|
||||
config.elevenlabs_voice_id = voice_options[config.elevenlabs_voice_id]
|
||||
self._use_custom_voice(config.elevenlabs_voice_id, 0)
|
||||
if config.voice_id in voice_options:
|
||||
config.voice_id = voice_options[config.voice_id]
|
||||
self._use_custom_voice(config.voice_id, 0)
|
||||
|
||||
def _use_custom_voice(self, voice, voice_index) -> None:
|
||||
"""Use a custom voice if provided and not a placeholder
|
||||
|
||||
@@ -2,21 +2,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import gtts
|
||||
from playsound import playsound
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
|
||||
class GTTSVoice(VoiceBase):
|
||||
"""GTTS Voice."""
|
||||
|
||||
def _setup(self, config: Config) -> None:
|
||||
def _setup(self) -> None:
|
||||
pass
|
||||
|
||||
def _speech(self, text: str, _: int = 0) -> bool:
|
||||
|
||||
@@ -2,10 +2,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
@@ -13,7 +9,7 @@ from autogpt.speech.base import VoiceBase
|
||||
class MacOSTTS(VoiceBase):
|
||||
"""MacOS TTS Voice."""
|
||||
|
||||
def _setup(self, config: Config) -> None:
|
||||
def _setup(self) -> None:
|
||||
pass
|
||||
|
||||
def _speech(self, text: str, voice_index: int = 0) -> bool:
|
||||
|
||||
@@ -3,24 +3,32 @@ from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from threading import Semaphore
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Literal, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable
|
||||
|
||||
from .base import VoiceBase
|
||||
from .eleven_labs import ElevenLabsSpeech
|
||||
from .eleven_labs import ElevenLabsConfig, ElevenLabsSpeech
|
||||
from .gtts import GTTSVoice
|
||||
from .macos_tts import MacOSTTS
|
||||
from .stream_elements_speech import StreamElementsSpeech
|
||||
from .stream_elements_speech import StreamElementsConfig, StreamElementsSpeech
|
||||
|
||||
_QUEUE_SEMAPHORE = Semaphore(
|
||||
1
|
||||
) # The amount of sounds to queue before blocking the main thread
|
||||
|
||||
|
||||
class TTSConfig(SystemConfiguration):
|
||||
speak_mode: bool = False
|
||||
provider: Literal[
|
||||
"elevenlabs", "gtts", "macos", "streamelements"
|
||||
] = UserConfigurable(default="gtts")
|
||||
elevenlabs: Optional[ElevenLabsConfig] = None
|
||||
streamelements: Optional[StreamElementsConfig] = None
|
||||
|
||||
|
||||
class TextToSpeechProvider:
|
||||
def __init__(self, config: Config):
|
||||
def __init__(self, config: TTSConfig):
|
||||
self._config = config
|
||||
self._default_voice_engine, self._voice_engine = self._get_voice_engine(config)
|
||||
|
||||
@@ -37,19 +45,19 @@ class TextToSpeechProvider:
|
||||
thread.start()
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}(enabled={self._config.speak_mode}, provider={self._voice_engine.__class__.__name__})"
|
||||
return f"{self.__class__.__name__}(provider={self._voice_engine.__class__.__name__})"
|
||||
|
||||
@staticmethod
|
||||
def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
|
||||
def _get_voice_engine(config: TTSConfig) -> tuple[VoiceBase, VoiceBase]:
|
||||
"""Get the voice engine to use for the given configuration"""
|
||||
tts_provider = config.text_to_speech_provider
|
||||
tts_provider = config.provider
|
||||
if tts_provider == "elevenlabs":
|
||||
voice_engine = ElevenLabsSpeech(config)
|
||||
voice_engine = ElevenLabsSpeech(config.elevenlabs)
|
||||
elif tts_provider == "macos":
|
||||
voice_engine = MacOSTTS(config)
|
||||
voice_engine = MacOSTTS()
|
||||
elif tts_provider == "streamelements":
|
||||
voice_engine = StreamElementsSpeech(config)
|
||||
voice_engine = StreamElementsSpeech(config.streamelements)
|
||||
else:
|
||||
voice_engine = GTTSVoice(config)
|
||||
voice_engine = GTTSVoice()
|
||||
|
||||
return GTTSVoice(config), voice_engine
|
||||
return GTTSVoice(), voice_engine
|
||||
|
||||
@@ -2,28 +2,29 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StreamElementsConfig(SystemConfiguration):
|
||||
voice: str = UserConfigurable(default="Brian")
|
||||
|
||||
|
||||
class StreamElementsSpeech(VoiceBase):
|
||||
"""Streamelements speech module for autogpt"""
|
||||
|
||||
def _setup(self, config: Config) -> None:
|
||||
def _setup(self, config: StreamElementsConfig) -> None:
|
||||
"""Setup the voices, API key, etc."""
|
||||
self.config = config
|
||||
|
||||
def _speech(self, text: str, voice: str, _: int = 0) -> bool:
|
||||
voice = self.config.streamelements_voice
|
||||
voice = self.config.voice
|
||||
"""Speak text using the streamelements API
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
def validate_yaml_file(file: str):
|
||||
def validate_yaml_file(file: str | Path):
|
||||
try:
|
||||
with open(file, encoding="utf-8") as fp:
|
||||
yaml.load(fp.read(), Loader=yaml.FullLoader)
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
from autogpt.workspace.workspace import Workspace
|
||||
|
||||
__all__ = [
|
||||
"Workspace",
|
||||
]
|
||||
@@ -1,169 +0,0 @@
|
||||
"""
|
||||
=========
|
||||
Workspace
|
||||
=========
|
||||
|
||||
The workspace is a directory containing configuration and working files for an AutoGPT
|
||||
agent.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Workspace:
|
||||
"""A class that represents a workspace for an AutoGPT agent."""
|
||||
|
||||
NULL_BYTES = ["\0", "\000", "\x00", "\u0000"]
|
||||
|
||||
def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool):
|
||||
self._root = self._sanitize_path(workspace_root)
|
||||
self._restrict_to_workspace = restrict_to_workspace
|
||||
|
||||
@property
|
||||
def root(self) -> Path:
|
||||
"""The root directory of the workspace."""
|
||||
return self._root
|
||||
|
||||
@property
|
||||
def restrict_to_workspace(self):
|
||||
"""Whether to restrict generated paths to the workspace."""
|
||||
return self._restrict_to_workspace
|
||||
|
||||
@classmethod
|
||||
def make_workspace(cls, workspace_directory: str | Path, *args, **kwargs) -> Path:
|
||||
"""Create a workspace directory and return the path to it.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
workspace_directory
|
||||
The path to the workspace directory.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The path to the workspace directory.
|
||||
|
||||
"""
|
||||
# TODO: have this make the env file and ai settings file in the directory.
|
||||
workspace_directory = cls._sanitize_path(workspace_directory)
|
||||
workspace_directory.mkdir(exist_ok=True, parents=True)
|
||||
return workspace_directory
|
||||
|
||||
def get_path(self, relative_path: str | Path) -> Path:
|
||||
"""Get the full path for an item in the workspace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The relative path to resolve in the workspace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The resolved path relative to the workspace.
|
||||
|
||||
"""
|
||||
return self._sanitize_path(
|
||||
relative_path,
|
||||
root=self.root,
|
||||
restrict_to_root=self.restrict_to_workspace,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_path(
|
||||
relative_path: str | Path,
|
||||
root: Optional[str | Path] = None,
|
||||
restrict_to_root: bool = True,
|
||||
) -> Path:
|
||||
"""Resolve the relative path within the given root if possible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The relative path to resolve.
|
||||
root
|
||||
The root path to resolve the relative path within.
|
||||
restrict_to_root
|
||||
Whether to restrict the path to the root.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The resolved path.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the path is absolute and a root is provided.
|
||||
ValueError
|
||||
If the path is outside the root and the root is restricted.
|
||||
|
||||
"""
|
||||
|
||||
# Posix systems disallow null bytes in paths. Windows is agnostic about it.
|
||||
# Do an explicit check here for all sorts of null byte representations.
|
||||
|
||||
for null_byte in Workspace.NULL_BYTES:
|
||||
if null_byte in str(relative_path) or null_byte in str(root):
|
||||
raise ValueError("embedded null byte")
|
||||
|
||||
if root is None:
|
||||
return Path(relative_path).resolve()
|
||||
|
||||
logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
|
||||
|
||||
root, relative_path = Path(root).resolve(), Path(relative_path)
|
||||
|
||||
logger.debug(f"Resolved root as '{root}'")
|
||||
|
||||
# Allow exception for absolute paths if they are contained in your workspace directory.
|
||||
if (
|
||||
relative_path.is_absolute()
|
||||
and restrict_to_root
|
||||
and not relative_path.is_relative_to(root)
|
||||
):
|
||||
raise ValueError(
|
||||
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
|
||||
)
|
||||
|
||||
full_path = root.joinpath(relative_path).resolve()
|
||||
|
||||
logger.debug(f"Joined paths as '{full_path}'")
|
||||
|
||||
if restrict_to_root and not full_path.is_relative_to(root):
|
||||
raise ValueError(
|
||||
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
|
||||
)
|
||||
|
||||
return full_path
|
||||
|
||||
@staticmethod
|
||||
def build_file_logger_path(workspace_directory: Path) -> Path:
|
||||
file_logger_path = workspace_directory / "file_logger.log"
|
||||
if not file_logger_path.exists():
|
||||
with file_logger_path.open(mode="w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
return file_logger_path
|
||||
|
||||
@staticmethod
|
||||
def init_workspace_directory(
|
||||
config: Config, override_workspace_path: Optional[str | Path] = None
|
||||
) -> Path:
|
||||
if override_workspace_path is None:
|
||||
workspace_path = config.workdir / "auto_gpt_workspace"
|
||||
elif type(override_workspace_path) == str:
|
||||
workspace_path = Path(override_workspace_path)
|
||||
else:
|
||||
workspace_path = override_workspace_path
|
||||
|
||||
# TODO: pass in the ai_settings file and the env file and have them cloned into
|
||||
# the workspace directory so we can bind them to the agent.
|
||||
return Workspace.make_workspace(workspace_path)
|
||||
1297
autogpts/autogpt/poetry.lock
generated
1297
autogpts/autogpt/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -16,11 +16,15 @@ packages = [{ include = "autogpt" }]
|
||||
|
||||
|
||||
[tool.poetry.scripts]
|
||||
autogpt = "autogpt.app.cli:main"
|
||||
autogpt = "autogpt.app.cli:run"
|
||||
run = "autogpt.app.cli:run"
|
||||
serve = "autogpt.app.cli:serve"
|
||||
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
# autogpt-forge = { path = "../forge" }
|
||||
autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "autogpts/forge", rev = "10aecec"}
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
charset-normalizer = "^3.1.0"
|
||||
click = "*"
|
||||
@@ -29,9 +33,11 @@ distro = "^1.8.0"
|
||||
docker = "*"
|
||||
duckduckgo-search = "^3.0.2"
|
||||
en-core-web-sm = {url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl"}
|
||||
fastapi = "*"
|
||||
ftfy = "^6.1.1"
|
||||
google-api-python-client = "*"
|
||||
gTTS = "^2.3.1"
|
||||
hypercorn = "^0.14.4"
|
||||
inflection = "*"
|
||||
jsonschema = "*"
|
||||
markdown = "*"
|
||||
@@ -56,17 +62,18 @@ spacy = "^3.0.0"
|
||||
tiktoken = "^0.5.0"
|
||||
webdriver-manager = "*"
|
||||
|
||||
# web server
|
||||
fastapi = "*"
|
||||
uvicorn = "*"
|
||||
|
||||
# OpenAI and Generic plugins import
|
||||
openapi-python-client = "^0.14.0"
|
||||
|
||||
# Benchmarking
|
||||
agbenchmark = { path = "../../benchmark", optional = true }
|
||||
|
||||
[tool.poetry.extras]
|
||||
benchmark = ["agbenchmark"]
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
auto-gpt-plugin-template = {git = "https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template", rev = "0.1.0"}
|
||||
black = "*"
|
||||
coverage = "*"
|
||||
flake8 = "*"
|
||||
gitpython = "^3.1.32"
|
||||
isort = "*"
|
||||
@@ -77,8 +84,9 @@ types-colorama = "*"
|
||||
types-Markdown = "*"
|
||||
types-Pillow = "*"
|
||||
|
||||
# Testing dependencies
|
||||
# Testing
|
||||
asynctest = "*"
|
||||
coverage = "*"
|
||||
pytest = "*"
|
||||
pytest-asyncio = "*"
|
||||
pytest-benchmark = "*"
|
||||
@@ -89,9 +97,6 @@ pytest-recording = "*"
|
||||
pytest-xdist = "*"
|
||||
vcrpy = {git = "https://github.com/Significant-Gravitas/vcrpy.git", rev = "master"}
|
||||
|
||||
[tool.poetry.group.benchmark.dependencies]
|
||||
agbenchmark = { path = "../../benchmark" }
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
10
autogpts/autogpt/run
Executable file
10
autogpts/autogpt/run
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
kill $(lsof -t -i :8000)
|
||||
|
||||
if [ ! -f .env ]; then
|
||||
cp .env.example .env
|
||||
echo "Please add your api keys to the .env file." >&2
|
||||
# exit 1
|
||||
fi
|
||||
poetry run serve --debug
|
||||
9
autogpts/autogpt/run_benchmark
Executable file
9
autogpts/autogpt/run_benchmark
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Kill processes using port 8080 if any.
|
||||
if lsof -t -i :8080; then
|
||||
kill $(lsof -t -i :8080)
|
||||
fi
|
||||
# This is the cli entry point for the benchmarking tool.
|
||||
# To run this in server mode pass in `serve` as the first argument.
|
||||
poetry run agbenchmark "$@"
|
||||
4
autogpts/autogpt/setup
Executable file
4
autogpts/autogpt/setup
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
poetry install --no-interaction --extras benchmark
|
||||
echo "Setup completed successfully."
|
||||
@@ -1,10 +0,0 @@
|
||||
If the goal oriented task pipeline fails, it means:
|
||||
- you somehow changed the way the system prompt is generated
|
||||
- or you broke autogpt.
|
||||
|
||||
To know which one, you can run the following command:
|
||||
```bash
|
||||
pytest -s -k tests/integration/goal_oriented
|
||||
|
||||
If the test is successful, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline
|
||||
will pass
|
||||
@@ -1,34 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_challenge
|
||||
|
||||
CYCLE_COUNT = 2
|
||||
USER_INPUTS = [
|
||||
"Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer the question 'What is the price of the book?'\nWrite the price of the book to a file named 'browse_website.txt'.'\nUse the task_complete command to complete the task.\nDo not use any other commands."
|
||||
]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_browse_website(
|
||||
patched_api_requestor: None,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
run_challenge(
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
monkeypatch,
|
||||
USER_INPUTS[level_to_run - 1],
|
||||
CYCLE_COUNT,
|
||||
)
|
||||
|
||||
file_path = workspace.get_path("browse_website.txt")
|
||||
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
assert "£25.89" in content, f"Expected £25.89, got {content}"
|
||||
@@ -1,47 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_challenge
|
||||
|
||||
CYCLE_COUNT_PER_LEVEL = [1, 1]
|
||||
EXPECTED_OUTPUTS_PER_LEVEL = [
|
||||
{"hello_world.txt": ["Hello World"]},
|
||||
{"hello_world_1.txt": ["Hello World"], "hello_world_2.txt": ["Hello World"]},
|
||||
]
|
||||
USER_INPUTS = [
|
||||
"Write 'Hello World' into a file named \"hello_world.txt\".",
|
||||
'Write \'Hello World\' into 2 files named "hello_world_1.txt"and "hello_world_2.txt".',
|
||||
]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_write_file(
|
||||
config: Config,
|
||||
patched_api_requestor: None,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
run_challenge(
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
monkeypatch,
|
||||
USER_INPUTS[level_to_run - 1],
|
||||
CYCLE_COUNT_PER_LEVEL[level_to_run - 1],
|
||||
)
|
||||
|
||||
expected_outputs = EXPECTED_OUTPUTS_PER_LEVEL[level_to_run - 1]
|
||||
|
||||
for file_name, expected_lines in expected_outputs.items():
|
||||
file_path = get_workspace_path(workspace, file_name)
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
for expected_line in expected_lines:
|
||||
assert (
|
||||
expected_line in content
|
||||
), f"Expected '{expected_line}' in file {file_name}, but it was not found"
|
||||
@@ -1,24 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class Challenge:
|
||||
BEAT_CHALLENGES = False
|
||||
DEFAULT_CHALLENGE_NAME = "default_challenge_name"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
category: str,
|
||||
max_level: int,
|
||||
is_new_challenge: bool,
|
||||
max_level_beaten: Optional[int] = None,
|
||||
level_to_run: Optional[int] = None,
|
||||
) -> None:
|
||||
self.name = name
|
||||
self.category = category
|
||||
self.max_level_beaten = max_level_beaten
|
||||
self.max_level = max_level
|
||||
self.succeeded = False
|
||||
self.skipped = False
|
||||
self.level_to_run = level_to_run
|
||||
self.is_new_challenge = is_new_challenge
|
||||
@@ -1,84 +0,0 @@
|
||||
import os
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.challenges.challenge_decorator.challenge_utils import create_challenge
|
||||
from tests.challenges.challenge_decorator.score_utils import (
|
||||
get_scores,
|
||||
update_new_score,
|
||||
)
|
||||
|
||||
MAX_LEVEL_TO_IMPROVE_ON = (
|
||||
1 # we will attempt to beat 1 level above the current level for now.
|
||||
)
|
||||
|
||||
CHALLENGE_FAILED_MESSAGE = "Challenges can sometimes fail randomly, please run this test again and if it fails reach out to us on https://discord.gg/autogpt in the 'challenges' channel to let us know the challenge you're struggling with."
|
||||
|
||||
|
||||
def challenge() -> Callable[[Callable[..., Any]], Callable[..., None]]:
|
||||
def decorator(func: Callable[..., Any]) -> Callable[..., None]:
|
||||
@pytest.mark.requires_openai_api_key
|
||||
@pytest.mark.vcr
|
||||
@wraps(func)
|
||||
def wrapper(*args: Any, **kwargs: Any) -> None:
|
||||
run_remaining = MAX_LEVEL_TO_IMPROVE_ON if Challenge.BEAT_CHALLENGES else 1
|
||||
original_error: Optional[Exception] = None
|
||||
|
||||
while run_remaining > 0:
|
||||
current_score, new_score, new_score_location = get_scores()
|
||||
level_to_run = (
|
||||
kwargs["level_to_run"] if "level_to_run" in kwargs else None
|
||||
)
|
||||
challenge = create_challenge(
|
||||
func, current_score, Challenge.BEAT_CHALLENGES, level_to_run
|
||||
)
|
||||
if challenge.level_to_run is not None:
|
||||
kwargs["level_to_run"] = challenge.level_to_run
|
||||
kwargs["challenge_name"] = challenge.name
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
challenge.succeeded = True
|
||||
except AssertionError as err:
|
||||
original_error = AssertionError(
|
||||
f"{CHALLENGE_FAILED_MESSAGE}\n{err}"
|
||||
)
|
||||
challenge.succeeded = False
|
||||
except Exception as err:
|
||||
original_error = err
|
||||
challenge.succeeded = False
|
||||
else:
|
||||
challenge.skipped = True
|
||||
if os.environ.get("CI") == "true":
|
||||
new_max_level_beaten = get_new_max_level_beaten(
|
||||
challenge, Challenge.BEAT_CHALLENGES
|
||||
)
|
||||
update_new_score(
|
||||
new_score_location, new_score, challenge, new_max_level_beaten
|
||||
)
|
||||
if challenge.level_to_run is None:
|
||||
pytest.skip("This test has not been unlocked yet.")
|
||||
|
||||
if not challenge.succeeded:
|
||||
if Challenge.BEAT_CHALLENGES or challenge.is_new_challenge:
|
||||
pytest.xfail(str(original_error))
|
||||
if original_error:
|
||||
raise original_error
|
||||
run_remaining -= 1
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def get_new_max_level_beaten(
|
||||
challenge: Challenge, beat_challenges: bool
|
||||
) -> Optional[int]:
|
||||
if challenge.succeeded:
|
||||
return challenge.level_to_run
|
||||
if challenge.skipped:
|
||||
return challenge.max_level_beaten
|
||||
# Challenge failed
|
||||
return challenge.max_level_beaten if beat_challenges else None
|
||||
@@ -1,85 +0,0 @@
|
||||
import os
|
||||
from typing import Any, Callable, Dict, Optional, Tuple
|
||||
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
|
||||
CHALLENGE_PREFIX = "test_"
|
||||
|
||||
|
||||
def create_challenge(
|
||||
func: Callable[..., Any],
|
||||
current_score: Dict[str, Any],
|
||||
is_beat_challenges: bool,
|
||||
level_to_run: Optional[int] = None,
|
||||
) -> Challenge:
|
||||
challenge_category, challenge_name = get_challenge_identifiers(func)
|
||||
is_new_challenge = challenge_name not in current_score.get(challenge_category, {})
|
||||
max_level = get_max_level(current_score, challenge_category, challenge_name)
|
||||
max_level_beaten = get_max_level_beaten(
|
||||
current_score, challenge_category, challenge_name
|
||||
)
|
||||
level_to_run = get_level_to_run(
|
||||
is_beat_challenges, level_to_run, max_level, max_level_beaten, is_new_challenge
|
||||
)
|
||||
|
||||
return Challenge(
|
||||
name=challenge_name,
|
||||
category=challenge_category,
|
||||
max_level=max_level,
|
||||
max_level_beaten=max_level_beaten,
|
||||
level_to_run=level_to_run,
|
||||
is_new_challenge=is_new_challenge,
|
||||
)
|
||||
|
||||
|
||||
def get_level_to_run(
|
||||
is_beat_challenges: bool,
|
||||
level_to_run: Optional[int],
|
||||
max_level: int,
|
||||
max_level_beaten: Optional[int],
|
||||
is_new_challenge: bool,
|
||||
) -> Optional[int]:
|
||||
if is_new_challenge:
|
||||
return 1
|
||||
if level_to_run is not None:
|
||||
if level_to_run > max_level:
|
||||
raise ValueError(
|
||||
f"Level to run ({level_to_run}) is greater than max level ({max_level})"
|
||||
)
|
||||
return level_to_run
|
||||
if is_beat_challenges:
|
||||
if max_level_beaten == max_level:
|
||||
return None
|
||||
return 1 if max_level_beaten is None else max_level_beaten + 1
|
||||
return max_level_beaten
|
||||
|
||||
|
||||
def get_challenge_identifiers(func: Callable[..., Any]) -> Tuple[str, str]:
|
||||
full_path = os.path.dirname(os.path.abspath(func.__code__.co_filename))
|
||||
challenge_category = os.path.basename(full_path)
|
||||
challenge_name = func.__name__.replace(CHALLENGE_PREFIX, "")
|
||||
return challenge_category, challenge_name
|
||||
|
||||
|
||||
def get_max_level(
|
||||
current_score: Dict[str, Any],
|
||||
challenge_category: str,
|
||||
challenge_name: str,
|
||||
) -> int:
|
||||
return (
|
||||
current_score.get(challenge_category, {})
|
||||
.get(challenge_name, {})
|
||||
.get("max_level", 1)
|
||||
)
|
||||
|
||||
|
||||
def get_max_level_beaten(
|
||||
current_score: Dict[str, Any],
|
||||
challenge_category: str,
|
||||
challenge_name: str,
|
||||
) -> Optional[int]:
|
||||
return (
|
||||
current_score.get(challenge_category, {})
|
||||
.get(challenge_name, {})
|
||||
.get("max_level_beaten", None)
|
||||
)
|
||||
@@ -1,59 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
|
||||
CURRENT_SCORE_LOCATION = "../current_score"
|
||||
NEW_SCORE_LOCATION = "../new_score"
|
||||
|
||||
|
||||
def update_new_score(
|
||||
filename_new_score: str,
|
||||
new_score: Dict[str, Any],
|
||||
challenge: Challenge,
|
||||
new_max_level_beaten: Optional[int],
|
||||
) -> None:
|
||||
write_new_score(new_score, challenge, new_max_level_beaten)
|
||||
write_new_score_to_file(new_score, filename_new_score)
|
||||
|
||||
|
||||
def write_new_score(
|
||||
new_score: Dict[str, Any], challenge: Challenge, new_max_level_beaten: Optional[int]
|
||||
) -> Dict[str, Any]:
|
||||
new_score.setdefault(challenge.category, {})
|
||||
new_score[challenge.category][challenge.name] = {
|
||||
"max_level_beaten": new_max_level_beaten,
|
||||
"max_level": challenge.max_level,
|
||||
}
|
||||
return new_score
|
||||
|
||||
|
||||
def write_new_score_to_file(new_score: Dict[str, Any], filename: str) -> None:
|
||||
with open(filename, "w") as file:
|
||||
json.dump(new_score, file, indent=4)
|
||||
|
||||
|
||||
def get_scores() -> Tuple[Dict[str, Any], Dict[str, Any], str]:
|
||||
filename_current_score, filename_new_score = get_score_locations()
|
||||
current_score = load_json(filename_current_score)
|
||||
new_score = load_json(filename_new_score)
|
||||
return current_score, new_score, filename_new_score
|
||||
|
||||
|
||||
def load_json(filename: str) -> Dict[str, Any]:
|
||||
if os.path.isfile(filename):
|
||||
with open(filename, "r") as file:
|
||||
return json.load(file)
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
def get_score_locations() -> Tuple[str, str]:
|
||||
pid = os.getpid()
|
||||
project_root = os.path.dirname(os.path.abspath(__file__))
|
||||
filename_current_score = os.path.join(
|
||||
project_root, f"{CURRENT_SCORE_LOCATION}.json"
|
||||
)
|
||||
filename_new_score = os.path.join(project_root, f"{NEW_SCORE_LOCATION}_{pid}.json")
|
||||
return filename_current_score, filename_new_score
|
||||
@@ -1,77 +0,0 @@
|
||||
from typing import Any, Dict, Generator, Optional
|
||||
|
||||
import pytest
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.vcr import before_record_response
|
||||
|
||||
|
||||
def before_record_response_filter_errors(
|
||||
response: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""In challenges we don't want to record errors (See issue #4461)"""
|
||||
if response["status"]["code"] >= 400:
|
||||
return None
|
||||
|
||||
return before_record_response(response)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_config(get_base_vcr_config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# this fixture is called by the pytest-recording vcr decorator.
|
||||
return get_base_vcr_config | {
|
||||
"before_record_response": before_record_response_filter_errors,
|
||||
}
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
parser.addoption(
|
||||
"--level", action="store", default=None, type=int, help="Specify test level"
|
||||
)
|
||||
parser.addoption(
|
||||
"--beat-challenges",
|
||||
action="store_true",
|
||||
help="Spepcifies whether the test suite should attempt to beat challenges",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config: Config) -> None:
|
||||
level = config.getoption("--level", default=None)
|
||||
config.option.level = level
|
||||
beat_challenges = config.getoption("--beat-challenges", default=False)
|
||||
config.option.beat_challenges = beat_challenges
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def level_to_run(request: FixtureRequest) -> int:
|
||||
## used for challenges in the goal oriented tests
|
||||
return request.config.option.level
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def challenge_name() -> str:
|
||||
return Challenge.DEFAULT_CHALLENGE_NAME
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def check_beat_challenges(request: FixtureRequest) -> None:
|
||||
Challenge.BEAT_CHALLENGES = request.config.getoption("--beat-challenges")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patched_make_workspace(mocker: MockerFixture, workspace: Workspace) -> Generator:
|
||||
def patched_make_workspace(*args: Any, **kwargs: Any) -> str:
|
||||
return workspace.root
|
||||
|
||||
mocker.patch.object(
|
||||
Workspace,
|
||||
"make_workspace",
|
||||
new=patched_make_workspace,
|
||||
)
|
||||
|
||||
yield
|
||||
@@ -1,56 +0,0 @@
|
||||
{
|
||||
"basic_abilities": {
|
||||
"browse_website": {
|
||||
"max_level": 1,
|
||||
"max_level_beaten": null
|
||||
},
|
||||
"write_file": {
|
||||
"max_level": 2,
|
||||
"max_level_beaten": 1
|
||||
}
|
||||
},
|
||||
"debug_code": {
|
||||
"debug_code_challenge_a": {
|
||||
"max_level": 2,
|
||||
"max_level_beaten": 1
|
||||
}
|
||||
},
|
||||
"information_retrieval": {
|
||||
"information_retrieval_challenge_a": {
|
||||
"max_level": 3,
|
||||
"max_level_beaten": null
|
||||
},
|
||||
"information_retrieval_challenge_b": {
|
||||
"max_level": 1,
|
||||
"max_level_beaten": null
|
||||
},
|
||||
"information_retrieval_challenge_c": {
|
||||
"max_level": 3,
|
||||
"max_level_beaten": null
|
||||
}
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubernetes_template_challenge_a": {
|
||||
"max_level": 1,
|
||||
"max_level_beaten": null
|
||||
}
|
||||
},
|
||||
"memory": {
|
||||
"memory_challenge_a": {
|
||||
"max_level": 3,
|
||||
"max_level_beaten": 3
|
||||
},
|
||||
"memory_challenge_b": {
|
||||
"max_level": 5,
|
||||
"max_level_beaten": null
|
||||
},
|
||||
"memory_challenge_c": {
|
||||
"max_level": 5,
|
||||
"max_level_beaten": null
|
||||
},
|
||||
"memory_challenge_d": {
|
||||
"max_level": 5,
|
||||
"max_level_beaten": null
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
# mypy: ignore-errors
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
def two_sum(nums: List, target: int) -> Optional[List[int]]:
|
||||
seen = {}
|
||||
for i, num in enumerate(nums):
|
||||
typo
|
||||
complement = target - num
|
||||
if complement in seen:
|
||||
return [seen[complement], i]
|
||||
seen[num] = i
|
||||
return None
|
||||
@@ -1,31 +0,0 @@
|
||||
# mypy: ignore-errors
|
||||
from code import two_sum
|
||||
from typing import List
|
||||
|
||||
|
||||
def test_two_sum(nums: List, target: int, expected_result: List[int]) -> None:
|
||||
result = two_sum(nums, target)
|
||||
print(result)
|
||||
assert (
|
||||
result == expected_result
|
||||
), f"AssertionError: Expected the output to be {expected_result}"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test the trivial case with the first two numbers
|
||||
nums = [2, 7, 11, 15]
|
||||
target = 9
|
||||
expected_result = [0, 1]
|
||||
test_two_sum(nums, target, expected_result)
|
||||
|
||||
# test for ability to use zero and the same number twice
|
||||
nums = [2, 7, 0, 15, 12, 0]
|
||||
target = 0
|
||||
expected_result = [2, 5]
|
||||
test_two_sum(nums, target, expected_result)
|
||||
|
||||
# test for first and last index usage and negative numbers
|
||||
nums = [-6, 7, 11, 4]
|
||||
target = -2
|
||||
expected_result = [0, 3]
|
||||
test_two_sum(nums, target, expected_result)
|
||||
@@ -1,68 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agents import Agent
|
||||
from autogpt.commands.execute_code import execute_python_file
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import (
|
||||
copy_file_into_workspace,
|
||||
get_workspace_path,
|
||||
run_challenge,
|
||||
)
|
||||
|
||||
CYCLE_COUNT = 5
|
||||
EXPECTED_VALUES = ["[0, 1]", "[2, 5]", "[0, 3]"]
|
||||
DIRECTORY_PATH = Path(__file__).parent / "data"
|
||||
CODE_FILE_PATH = "code.py"
|
||||
TEST_FILE_PATH = "test.py"
|
||||
USER_INPUTS = [
|
||||
"1- Run test.py using the execute_python_file command.\n2- Read code.py using the read_file command.\n3- Modify code.py using the write_to_file command.\nRepeat step 1, 2 and 3 until test.py runs without errors. Do not modify the test.py file.",
|
||||
"1- Run test.py.\n2- Read code.py.\n3- Modify code.py.\nRepeat step 1, 2 and 3 until test.py runs without errors.\n",
|
||||
"Make test.py run without errors.",
|
||||
]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_debug_code_challenge_a(
|
||||
dummy_agent: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
Test whether the agent can debug a simple code snippet.
|
||||
|
||||
:param debug_code_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:patched_api_requestor: Sends api requests to our API CI pipeline
|
||||
:level_to_run: The level to run.
|
||||
"""
|
||||
|
||||
copy_file_into_workspace(workspace, DIRECTORY_PATH, CODE_FILE_PATH)
|
||||
copy_file_into_workspace(workspace, DIRECTORY_PATH, TEST_FILE_PATH)
|
||||
|
||||
run_challenge(
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
monkeypatch,
|
||||
USER_INPUTS[level_to_run - 1],
|
||||
CYCLE_COUNT,
|
||||
)
|
||||
|
||||
output = execute_python_file(
|
||||
get_workspace_path(workspace, TEST_FILE_PATH),
|
||||
agent=dummy_agent,
|
||||
)
|
||||
|
||||
assert "error" not in output.lower(), f"Errors found in output: {output}!"
|
||||
|
||||
for expected_value in EXPECTED_VALUES:
|
||||
assert (
|
||||
expected_value in output
|
||||
), f"Expected output to contain {expected_value}, but it was not found in {output}!"
|
||||
@@ -1,50 +0,0 @@
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.basic_abilities.test_browse_website import USER_INPUTS
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_challenge
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
EXPECTED_REVENUES = [["81"], ["81"], ["81", "53", "24", "21", "11", "7", "4", "3", "2"]]
|
||||
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
USER_INPUTS = [
|
||||
"Write to a file called output.txt containing tesla's revenue in 2022 after searching for 'tesla revenue 2022'.",
|
||||
"Write to a file called output.txt containing tesla's revenue in 2022.",
|
||||
"Write to a file called output.txt containing tesla's revenue every year since its creation.",
|
||||
]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_information_retrieval_challenge_a(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
|
||||
|
||||
:param get_company_revenue_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
"""
|
||||
run_challenge(
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
monkeypatch,
|
||||
USER_INPUTS[level_to_run - 1],
|
||||
CYCLE_COUNT,
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
expected_revenues = EXPECTED_REVENUES[level_to_run - 1]
|
||||
for revenue in expected_revenues:
|
||||
assert (
|
||||
f"{revenue}." in content or f"{revenue}," in content
|
||||
), f"Expected the file to contain {revenue}"
|
||||
@@ -1,52 +0,0 @@
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_challenge
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
OUTPUT_LOCATION = "2010_nobel_prize_winners.txt"
|
||||
USER_INPUTS = [
|
||||
"Write to file the winner's name(s), affiliated university, and discovery of the 2010 nobel prize in physics. Write your final answer to 2010_nobel_prize_winners.txt."
|
||||
]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_information_retrieval_challenge_b(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_b function in a given agent by mocking user inputs and checking the output file content.
|
||||
|
||||
:param get_nobel_prize_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:param patched_api_requestor: APIRequestor Patch to override the openai.api_requestor module for testing.
|
||||
:param level_to_run: The level to run.
|
||||
"""
|
||||
|
||||
run_challenge(
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
monkeypatch,
|
||||
USER_INPUTS[level_to_run - 1],
|
||||
CYCLE_COUNT,
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
assert "Andre Geim" in content, "Expected the file to contain Andre Geim"
|
||||
assert (
|
||||
"Konstantin Novoselov" in content
|
||||
), "Expected the file to contain Konstantin Novoselov"
|
||||
assert (
|
||||
"University of Manchester" in content
|
||||
), "Expected the file to contain University of Manchester"
|
||||
assert "graphene" in content, "Expected the file to contain graphene"
|
||||
@@ -1,47 +0,0 @@
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_challenge
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
COO = [["Luke Lafreniere"], ["Luke Lafreniere"], ["Luke Lafreniere 2017"]]
|
||||
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
USER_INPUTS = [
|
||||
"Write to a file called output.txt containing the name and title of the current Chief Operating Officer of Floatplane Media.",
|
||||
"Write to a file called output.txt containing the name and title of the current Chief Operating Officer of https://www.floatplane.com.",
|
||||
"Write to a file called output.txt containing the name and title of the current Chief Operating Officer of https://www.floatplane.com and the year it was formed.",
|
||||
]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_information_retrieval_challenge_c(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_c function in a given agent by mocking user inputs and checking the output file content.
|
||||
|
||||
:param get_floatplane_ceo_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
"""
|
||||
run_challenge(
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
monkeypatch,
|
||||
USER_INPUTS[level_to_run - 1],
|
||||
CYCLE_COUNT,
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
coo_name = COO[level_to_run - 1]
|
||||
for chief in coo_name:
|
||||
assert chief in content, f"Expected the file to contain {chief}"
|
||||
@@ -1,53 +0,0 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_challenge
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
OUTPUT_LOCATION = "kube.yaml"
|
||||
USER_INPUTS = ["Write a simple kubernetes deployment file and save it as a kube.yaml."]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_kubernetes_template_challenge_a(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs
|
||||
and checking the output file content.
|
||||
|
||||
Args:
|
||||
kubernetes_agent (Agent)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
level_to_run (int)
|
||||
"""
|
||||
run_challenge(
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
monkeypatch,
|
||||
USER_INPUTS[level_to_run - 1],
|
||||
CYCLE_COUNT,
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
with open(file_path, "r") as file:
|
||||
content_string = file.read()
|
||||
|
||||
for word in ["apiVersion", "kind", "metadata", "spec"]:
|
||||
assert word in content_string, f"Expected the file to contain {word}"
|
||||
|
||||
yaml_as_dict: Dict[str, Any] = yaml.safe_load(content_string)
|
||||
for word in ["Service", "Deployment", "Pod"]:
|
||||
assert word in yaml_as_dict.get(
|
||||
"kind", ""
|
||||
), f"Expected the file to contain {word}"
|
||||
@@ -1,86 +0,0 @@
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_challenge
|
||||
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
|
||||
USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file"
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_memory_challenge_a(
|
||||
config: Config,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
The agent reads a file containing a task_id. Then, it reads a series of other files.
|
||||
After reading 'n' files, the agent must write the task_id into a new file.
|
||||
Args:
|
||||
workspace (Workspace)
|
||||
patched_api_requestor (MockerFixture)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
level_to_run (int)
|
||||
"""
|
||||
task_id = "2314"
|
||||
create_instructions_files(workspace, level_to_run, task_id)
|
||||
|
||||
run_challenge(
|
||||
challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
workspace: Workspace,
|
||||
num_files: int,
|
||||
task_id: str,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
workspace (Workspace)
|
||||
num_files (int)
|
||||
task_id (str)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, num_files + 1):
|
||||
content = generate_content(i, task_id, base_filename, num_files)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(workspace, file_name)
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
|
||||
def generate_content(
|
||||
index: int, task_id: str, base_filename: str, num_files: int
|
||||
) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
task_id: str
|
||||
base_filename: str
|
||||
num_files: int
|
||||
|
||||
Returns: str
|
||||
"""
|
||||
if index == 1:
|
||||
return (
|
||||
f"This task_id is {task_id}\nRead the file {base_filename}{index + 1}.txt"
|
||||
)
|
||||
if index != num_files:
|
||||
return f"Read the file {base_filename}{index + 1}.txt"
|
||||
return "Write the task_id into the file output.txt\nShutdown"
|
||||
@@ -1,86 +0,0 @@
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import generate_noise, get_workspace_path, run_challenge
|
||||
|
||||
NOISE = 1000
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file"
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_memory_challenge_b(
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
|
||||
the agent must write all the task_ids into a new file, filtering out the noise.
|
||||
|
||||
Args:
|
||||
workspace (Workspace)
|
||||
patched_api_requestor (MockerFixture)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
level_to_run (int)
|
||||
"""
|
||||
task_ids = [str(i * 1111) for i in range(1, level_to_run + 1)]
|
||||
create_instructions_files(workspace, level_to_run, task_ids)
|
||||
|
||||
run_challenge(
|
||||
challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
for task_id in task_ids:
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
workspace: Workspace,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
level:
|
||||
workspace (Workspace)
|
||||
num_files (int)
|
||||
task_ids (list)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, level + 1):
|
||||
content = generate_content(i, task_ids, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(workspace, file_name)
|
||||
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
|
||||
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
task_ids: list
|
||||
base_filename: str
|
||||
num_files: int
|
||||
|
||||
Returns: str
|
||||
"""
|
||||
task_id = task_ids[index - 1]
|
||||
noise = generate_noise(NOISE)
|
||||
if index != level:
|
||||
if level == 1:
|
||||
return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command."
|
||||
return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command."
|
||||
return f"{noise}\nThis task_id is {task_id}\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n"
|
||||
@@ -1,105 +0,0 @@
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import generate_noise, get_workspace_path, run_challenge
|
||||
|
||||
NOISE = 1200
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file"
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_memory_challenge_c(
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
Instead of reading task Ids from files as with the previous challenges, the agent now must remember
|
||||
phrases which may have semantically similar meaning and the agent must write the phrases to a file
|
||||
after seeing several of them.
|
||||
|
||||
Args:
|
||||
workspace (Workspace)
|
||||
patched_api_requestor (MockerFixture)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
level_to_run (int)
|
||||
"""
|
||||
silly_phrases = [
|
||||
"The purple elephant danced on a rainbow while eating a taco",
|
||||
"The sneaky toaster stole my socks and ran away to Hawaii",
|
||||
"My pet rock sings better than Beyoncé on Tuesdays",
|
||||
"The giant hamster rode a unicycle through the crowded mall",
|
||||
"The talking tree gave me a high-five and then flew away",
|
||||
"I have a collection of invisible hats that I wear on special occasions",
|
||||
"The flying spaghetti monster stole my sandwich and left a note saying 'thanks for the snack'",
|
||||
"My imaginary friend is a dragon who loves to play video games",
|
||||
"I once saw a cloud shaped like a giant chicken eating a pizza",
|
||||
"The ninja unicorn disguised itself as a potted plant and infiltrated the office",
|
||||
]
|
||||
|
||||
level_silly_phrases = silly_phrases[:level_to_run]
|
||||
create_instructions_files(
|
||||
workspace,
|
||||
level_to_run,
|
||||
level_silly_phrases,
|
||||
)
|
||||
|
||||
run_challenge(
|
||||
challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, agent=workspace)
|
||||
for phrase in level_silly_phrases:
|
||||
assert phrase in content, f"Expected the file to contain {phrase}"
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
workspace: Workspace,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
level:
|
||||
workspace (Workspace)
|
||||
num_files (int)
|
||||
task_ids (list)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, level + 1):
|
||||
content = generate_content(i, task_ids, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(workspace, file_name)
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
|
||||
def generate_content(
|
||||
index: int, silly_phrases: list, base_filename: str, level: int
|
||||
) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
task_ids: list
|
||||
base_filename: str
|
||||
num_files: int
|
||||
|
||||
Returns: str
|
||||
"""
|
||||
phrase = silly_phrases[index - 1]
|
||||
noise = generate_noise(NOISE)
|
||||
if index != level:
|
||||
if level == 1:
|
||||
return f"{noise}\nThe current phrase to remember is '{phrase}'.\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command."
|
||||
return f"{noise}\nThe current phrase is '{phrase}'.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command."
|
||||
return f"{noise}\nThis phrase is '{phrase}'\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n"
|
||||
@@ -1,239 +0,0 @@
|
||||
import json
|
||||
from typing import Dict
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_challenge
|
||||
|
||||
LEVEL_CURRENTLY_BEATEN = 1
|
||||
MAX_LEVEL = 5
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file"
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_memory_challenge_d(
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
workspace: Workspace,
|
||||
patched_make_workspace: pytest.fixture,
|
||||
) -> None:
|
||||
"""
|
||||
The agent is given a series of events and must remember the respective beliefs of the characters.
|
||||
Args:
|
||||
workspace (Workspace)
|
||||
user_selected_level (int)
|
||||
"""
|
||||
sally_anne_test_phrases = [
|
||||
"Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A).",
|
||||
"Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speaks to Sally about the marble A as instructed by Anne.",
|
||||
"Sally gives a new marble (marble C) to Charlie who is outside with her. Charlie enters the room and exchanges marble C with marble B in Anne's basket (basket A). Anne tells Charlie to tell Sally that he put marble C into the red box. Charlie leaves the room and speak to Sally about marble C as instructed by Anne. Meanwhile, after Charlie leaves the room, Bob enters into the room and moves marble A from the green box to under the sofa, but tells Anne to tell Sally that marble A is in the green box. Anne leaves the room and speak to Sally about the marble A as instructed by Bob",
|
||||
"Sally gives a new marble (marble D) to Anne. Anne gives the marble to Charlie. Charlie enters the room and gives marble D to Bob. Bob tells Charlie to tell Sally that he put marble D under the sofa. Bob put marble D under the sofa Charlie leaves the room and speaks to Sally about marble D. Meanwhile, after Charlie leaves the room, Bob takes marble A from under the sofa and places it in the blue box.",
|
||||
"Sally gives a new marble (marble E) to Charlie who is outside with her. Charlie enters the room and places marble E in the red box. Anne, who is already in the room, takes marble E from the red box, and hides it under the sofa. Then Anne leaves the room and tells Sally that marble E is in the green box. Meanwhile, after Anne leaves the room, Charlie who re-enters the room takes marble D from under the sofa and places it in his own basket (basket C).",
|
||||
]
|
||||
level_sally_anne_test_phrases = sally_anne_test_phrases[:level_to_run]
|
||||
create_instructions_files(workspace, level_to_run, level_sally_anne_test_phrases)
|
||||
run_challenge(
|
||||
challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2
|
||||
)
|
||||
|
||||
file_path = get_workspace_path(workspace, OUTPUT_LOCATION)
|
||||
|
||||
content = read_file(file_path, workspace)
|
||||
check_beliefs(content, level_to_run)
|
||||
|
||||
|
||||
def check_beliefs(content: str, level: int) -> None:
|
||||
# Define the expected beliefs for each level
|
||||
expected_beliefs = {
|
||||
1: {
|
||||
"Sally": {
|
||||
"marble A": "basket S",
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "basket A",
|
||||
},
|
||||
},
|
||||
2: {
|
||||
"Sally": {
|
||||
"marble A": "sofa", # Because Charlie told her
|
||||
"marble B": "lost", # Because Bob told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "green box", # Because she moved it there
|
||||
"marble B": "basket A", # Because Bob put it there and she was in the room
|
||||
},
|
||||
"Bob": {
|
||||
"marble B": "basket A", # Last place he put it
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "sofa", # Because Anne told him to tell Sally so
|
||||
},
|
||||
},
|
||||
3: {
|
||||
"Sally": {
|
||||
"marble A": "green box", # Because Anne told her
|
||||
"marble C": "red box", # Because Charlie told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "sofa", # Because Bob moved it there and told her
|
||||
"marble B": "basket A", # Because Charlie exchanged marble C with marble B in her basket
|
||||
"marble C": "basket A", # Because Charlie exchanged marble C with marble B in her basket
|
||||
},
|
||||
"Bob": {
|
||||
"marble A": "sofa", # Because he moved it there
|
||||
"marble B": "basket A",
|
||||
# Because Charlie exchanged marble C with marble B in Anne's basket, and he was in the room
|
||||
"marble C": "basket A",
|
||||
# Because Charlie exchanged marble C with marble B in Anne's basket, and he was in the room
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "sofa", # Last place he knew it was
|
||||
"marble B": "basket A", # Because he exchanged marble C with marble B in Anne's basket
|
||||
"marble C": "red box", # Because Anne told him to tell Sally so
|
||||
},
|
||||
},
|
||||
4: {
|
||||
"Sally": {
|
||||
"marble A": "green box", # Because Anne told her in the last conversation
|
||||
"marble C": "red box", # Because Charlie told her
|
||||
"marble D": "sofa", # Because Charlie told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "blue box", # Because Bob moved it there, and she was not in the room to see
|
||||
"marble B": "basket A", # Last place she knew it was
|
||||
"marble C": "basket A", # Last place she knew it was
|
||||
"marble D": "sofa", # Because Bob moved it there, and she was in the room to see
|
||||
},
|
||||
"Bob": {
|
||||
"marble A": "blue box", # Because he moved it there
|
||||
"marble B": "basket A", # Last place he knew it was
|
||||
"marble C": "basket A", # Last place he knew it was
|
||||
"marble D": "sofa", # Because he moved it there
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "sofa", # Last place he knew it was
|
||||
"marble B": "basket A", # Last place he knew it was
|
||||
"marble C": "red box", # Last place he knew it was
|
||||
"marble D": "sofa", # Because Bob told him to tell Sally so
|
||||
},
|
||||
},
|
||||
5: {
|
||||
"Sally": {
|
||||
"marble A": "green box", # Because Anne told her in the last level
|
||||
"marble C": "red box", # Because Charlie told her
|
||||
"marble D": "sofa", # Because Charlie told her
|
||||
"marble E": "green box", # Because Anne told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "blue box", # Last place she knew it was
|
||||
"marble B": "basket A", # Last place she knew it was
|
||||
"marble C": "basket A", # Last place she knew it was
|
||||
"marble D": "basket C", # Last place she knew it was
|
||||
"marble E": "sofa", # Because she moved it there
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "blue box", # Last place he knew it was
|
||||
"marble B": "basket A", # Last place he knew it was
|
||||
"marble C": "basket A", # Last place he knew it was
|
||||
"marble D": "basket C", # Because he moved it there
|
||||
"marble E": "red box", # Last place he knew it was
|
||||
},
|
||||
"Bob": {
|
||||
"marble A": "blue box", # Last place he knew it was
|
||||
"marble C": "red box", # Last place he knew it was
|
||||
"marble D": "sofa", # Last place he knew it was
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# Extract the beliefs from the AI's response
|
||||
ai_beliefs = extract_beliefs(content)
|
||||
# Check the AI's beliefs against the expected beliefs
|
||||
for character, belief in expected_beliefs[level].items():
|
||||
for marble, location in belief.items():
|
||||
ai_belief = ai_beliefs.get(character, {}).get(marble, "")
|
||||
assert (
|
||||
location in ai_belief
|
||||
), f"For {character}'s {marble}, expected '{location}' to be in '{ai_belief}'"
|
||||
|
||||
|
||||
def extract_beliefs(content: str) -> Dict[str, Dict[str, str]]:
|
||||
"""Extract the beliefs of each character from the AI's output."""
|
||||
# Parse the JSON content
|
||||
content_dict = json.loads(content)
|
||||
beliefs = content_dict.get("beliefs", {})
|
||||
return beliefs
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
workspace: Workspace,
|
||||
level: int,
|
||||
test_phrases: list,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
level:
|
||||
workspace (Workspace)
|
||||
test_phrases (list)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, level + 1):
|
||||
content = generate_content(i, test_phrases, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(workspace, file_name)
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
|
||||
def generate_content(
|
||||
index: int, test_phrases: list, base_filename: str, level: int
|
||||
) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
test_phrases: list
|
||||
base_filename: str
|
||||
level: int
|
||||
Returns: str
|
||||
"""
|
||||
phrase = test_phrases[index - 1]
|
||||
|
||||
if index != level:
|
||||
return f"The current event is '{phrase}'.\nRead the file {base_filename}{index + 1}.txt using the read_file command."
|
||||
else:
|
||||
return f"""
|
||||
This event is '{phrase}'
|
||||
rules:
|
||||
1. Everyone inside the room see and know what are the actions of the people inside the room.
|
||||
2. Everyone outside the room do not see and do not know the actions of the people inside the room.
|
||||
3. Only write about the marbles that are present in the level
|
||||
4. Marbles names are marble A, marble B, marble C, marble D, ...
|
||||
5. Believed_location_of_the_specific_marble describes where is the marble like drawer, basket S, sofa, ...
|
||||
6. Do not use expression like <Anne's basket> use <basket A> instead.
|
||||
7. All characters believe what they're being told by other characters.
|
||||
Instructions:
|
||||
I) Write the following information in the file output.txt in JSON format:
|
||||
1. The respective beliefs of the characters (which means where every marble is according to character x, y, z. Character x should say where it believes every marble it is aware exist is)
|
||||
The format should be as follows:
|
||||
{{
|
||||
"beliefs": {{
|
||||
"<character_name>": {{
|
||||
"<marble_name>": "<believed_location_of_the_specific_marble>",
|
||||
...
|
||||
}},
|
||||
...
|
||||
}},
|
||||
}}
|
||||
Example of output (only use this to understand and learn how to use the format. Nothing else):
|
||||
{{"beliefs": {{"Sally": {{"marble A": "basket A"}}, "Bob": {{"marble B": "basket S"}}, "Anne": {{"marble A": "green box"}}, "Charlie": {{"marble B": "sofa"}}}}
|
||||
II) The file output.txt has not been created yet. You need to create it. After that, use the task_complete command.
|
||||
"""
|
||||
@@ -1,7 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
"""Jsonifiable representation of a task"""
|
||||
|
||||
user_input: str
|
||||
@@ -1,59 +0,0 @@
|
||||
import importlib.util
|
||||
import inspect
|
||||
import os
|
||||
from types import ModuleType
|
||||
from typing import List
|
||||
|
||||
# Path to the challenges folder
|
||||
CHALLENGES_DIR = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "../challenges"
|
||||
)
|
||||
|
||||
|
||||
def get_python_files(directory: str, exclude_file: str) -> List[str]:
|
||||
"""Recursively get all python files in a directory and subdirectories."""
|
||||
python_files: List[str] = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for file in files:
|
||||
if (
|
||||
file.endswith(".py")
|
||||
and file.startswith("test_")
|
||||
and file != exclude_file
|
||||
):
|
||||
python_files.append(os.path.join(root, file))
|
||||
return python_files
|
||||
|
||||
|
||||
def load_module_from_file(test_file: str) -> ModuleType:
|
||||
spec = importlib.util.spec_from_file_location("module.name", test_file)
|
||||
assert spec is not None, f"Unable to get spec for module in file {test_file}"
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
assert (
|
||||
spec.loader is not None
|
||||
), f"Unable to get loader for module in file {test_file}"
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def get_test_functions(module: ModuleType) -> List:
|
||||
return [
|
||||
o
|
||||
for o in inspect.getmembers(module)
|
||||
if inspect.isfunction(o[1]) and o[0].startswith("test_")
|
||||
]
|
||||
|
||||
|
||||
def assert_single_test_function(functions_list: List, test_file: str) -> None:
|
||||
assert len(functions_list) == 1, f"{test_file} should contain only one function"
|
||||
assert (
|
||||
functions_list[0][0][5:] == os.path.basename(test_file)[5:-3]
|
||||
), f"The function in {test_file} should have the same name as the file without 'test_' prefix"
|
||||
|
||||
|
||||
def test_method_name_and_count() -> None:
|
||||
current_file: str = os.path.basename(__file__)
|
||||
test_files: List[str] = get_python_files(CHALLENGES_DIR, current_file)
|
||||
for test_file in test_files:
|
||||
module = load_module_from_file(test_file)
|
||||
functions_list = get_test_functions(module)
|
||||
assert_single_test_function(functions_list, test_file)
|
||||
@@ -1,81 +0,0 @@
|
||||
import contextlib
|
||||
import random
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any, AsyncIterator
|
||||
|
||||
import pytest
|
||||
|
||||
from agbenchmark_config.benchmarks import run_specific_agent
|
||||
from autogpt.logs import LogCycleHandler
|
||||
from autogpt.workspace import Workspace
|
||||
from tests.challenges.schema import Task
|
||||
|
||||
|
||||
def generate_noise(noise_size: int) -> str:
|
||||
random.seed(42)
|
||||
return "".join(
|
||||
random.choices(
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
|
||||
k=noise_size,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def setup_mock_input(monkeypatch: pytest.MonkeyPatch, cycle_count: int) -> None:
|
||||
"""
|
||||
Sets up the mock input for testing.
|
||||
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:param cycle_count: The number of cycles to mock.
|
||||
"""
|
||||
input_sequence = ["y"] * (cycle_count) + ["EXIT"]
|
||||
|
||||
async def input_generator() -> AsyncIterator[str]:
|
||||
"""
|
||||
Creates a generator that yields input strings from the given sequence.
|
||||
"""
|
||||
for input in input_sequence:
|
||||
yield input
|
||||
|
||||
gen = input_generator()
|
||||
monkeypatch.setattr(
|
||||
"autogpt.app.utils.session.prompt_async", lambda _, **kwargs: anext(gen)
|
||||
)
|
||||
|
||||
|
||||
def setup_mock_log_cycle_agent_name(
|
||||
monkeypatch: pytest.MonkeyPatch, challenge_name: str, level_to_run: int
|
||||
) -> None:
|
||||
def mock_get_agent_short_name(*args: Any, **kwargs: Any) -> str:
|
||||
return f"{challenge_name}_level_{level_to_run}"
|
||||
|
||||
monkeypatch.setattr(
|
||||
LogCycleHandler, "get_agent_short_name", mock_get_agent_short_name
|
||||
)
|
||||
|
||||
|
||||
def get_workspace_path(workspace: Workspace, file_name: str) -> str:
|
||||
return str(workspace.get_path(file_name))
|
||||
|
||||
|
||||
def copy_file_into_workspace(
|
||||
workspace: Workspace, directory_path: Path, file_path: str
|
||||
) -> None:
|
||||
workspace_code_file_path = get_workspace_path(workspace, file_path)
|
||||
code_file_path = directory_path / file_path
|
||||
shutil.copy(code_file_path, workspace_code_file_path)
|
||||
|
||||
|
||||
def run_challenge(
|
||||
challenge_name: str,
|
||||
level_to_run: int,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
user_input: str,
|
||||
cycle_count: int,
|
||||
) -> None:
|
||||
setup_mock_input(monkeypatch, cycle_count)
|
||||
setup_mock_log_cycle_agent_name(monkeypatch, challenge_name, level_to_run)
|
||||
task = Task(user_input=user_input)
|
||||
with contextlib.suppress(SystemExit):
|
||||
run_specific_agent(task.user_input)
|
||||
@@ -1,44 +0,0 @@
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
def deep_merge(source: Dict[Any, Any], dest: Dict[Any, Any]) -> Dict[Any, Any]:
|
||||
for key, value in source.items():
|
||||
if isinstance(value, Dict):
|
||||
dest[key] = deep_merge(value, dest.get(key, {}))
|
||||
else:
|
||||
dest[key] = value
|
||||
return dest
|
||||
|
||||
|
||||
import collections
|
||||
|
||||
|
||||
def recursive_sort_dict(data: dict) -> dict:
|
||||
for key, value in data.items():
|
||||
if isinstance(value, dict):
|
||||
data[key] = recursive_sort_dict(value)
|
||||
return collections.OrderedDict(sorted(data.items()))
|
||||
|
||||
# setup
|
||||
|
||||
|
||||
cwd = os.getcwd() # get current working directory
|
||||
new_score_filename_pattern = os.path.join(cwd, "tests/challenges/new_score_*.json")
|
||||
current_score_filename = os.path.join(cwd, "tests/challenges/current_score.json")
|
||||
|
||||
merged_data: Dict[str, Any] = {}
|
||||
for filename in glob.glob(new_score_filename_pattern):
|
||||
with open(filename, "r") as f_new:
|
||||
data = json.load(f_new)
|
||||
merged_data = deep_merge(
|
||||
data, merged_data
|
||||
) # deep merge the new data with the merged data
|
||||
os.remove(filename) # remove the individual file
|
||||
sorted_data = recursive_sort_dict(merged_data)
|
||||
|
||||
with open(current_score_filename, "w") as f_current:
|
||||
json_data = json.dumps(sorted_data, indent=4)
|
||||
f_current.write(json_data + "\n")
|
||||
@@ -8,13 +8,13 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.main import _configure_openai_provider
|
||||
from autogpt.config import AIConfig, Config, ConfigBuilder
|
||||
from autogpt.config import AIProfile, Config, ConfigBuilder
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider, OpenAIProvider
|
||||
from autogpt.file_workspace import FileWorkspace
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs.config import configure_logging
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
pytest_plugins = [
|
||||
"tests.integration.agent_factory",
|
||||
@@ -24,21 +24,37 @@ pytest_plugins = [
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def workspace_root(tmp_path: Path) -> Path:
|
||||
return tmp_path / "home/users/monty/auto_gpt_workspace"
|
||||
def tmp_project_root(tmp_path: Path) -> Path:
|
||||
return tmp_path
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def workspace(workspace_root: Path) -> Workspace:
|
||||
workspace_root = Workspace.make_workspace(workspace_root)
|
||||
return Workspace(workspace_root, restrict_to_workspace=True)
|
||||
def app_data_dir(tmp_project_root: Path) -> Path:
|
||||
return tmp_project_root / "data"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def agent_data_dir(app_data_dir: Path) -> Path:
|
||||
return app_data_dir / "agents/AutoGPT"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def workspace_root(agent_data_dir: Path) -> Path:
|
||||
return agent_data_dir / "workspace"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def workspace(workspace_root: Path) -> FileWorkspace:
|
||||
workspace = FileWorkspace(workspace_root, restrict_to_root=True)
|
||||
workspace.initialize()
|
||||
return workspace
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_plugins_config_file():
|
||||
"""Create a plugins_config.yaml file in a temp directory so that it doesn't mess with existing ones"""
|
||||
config_directory = TemporaryDirectory()
|
||||
config_file = os.path.join(config_directory.name, "plugins_config.yaml")
|
||||
config_file = Path(config_directory.name) / "plugins_config.yaml"
|
||||
with open(config_file, "w+") as f:
|
||||
f.write(yaml.dump({}))
|
||||
|
||||
@@ -46,12 +62,17 @@ def temp_plugins_config_file():
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def config(temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace):
|
||||
config = ConfigBuilder.build_config_from_env(workspace.root.parent)
|
||||
def config(
|
||||
temp_plugins_config_file: Path,
|
||||
tmp_project_root: Path,
|
||||
app_data_dir: Path,
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
config = ConfigBuilder.build_config_from_env(project_root=tmp_project_root)
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
os.environ["OPENAI_API_KEY"] = "sk-dummy"
|
||||
|
||||
config.workspace_path = workspace.root
|
||||
config.app_data_dir = app_data_dir
|
||||
|
||||
config.plugins_dir = "tests/unit/data/test_plugins"
|
||||
config.plugins_config_file = temp_plugins_config_file
|
||||
@@ -63,23 +84,20 @@ def config(temp_plugins_config_file: str, mocker: MockerFixture, workspace: Work
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
config.plugins_config = PluginsConfig.load_config(
|
||||
plugins_config_file=config.workdir / config.plugins_config_file,
|
||||
plugins_config_file=config.plugins_config_file,
|
||||
plugins_denylist=config.plugins_denylist,
|
||||
plugins_allowlist=config.plugins_allowlist,
|
||||
)
|
||||
|
||||
# Do a little setup and teardown since the config object is a singleton
|
||||
mocker.patch.multiple(
|
||||
config,
|
||||
workspace_path=workspace.root,
|
||||
file_logger_path=workspace.get_path("file_logger.log"),
|
||||
)
|
||||
yield config
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def setup_logger(config: Config):
|
||||
configure_logging(config, Path(__file__).parent / "logs")
|
||||
configure_logging(
|
||||
debug_mode=config.debug_mode,
|
||||
plain_output=config.plain_output,
|
||||
log_dir=Path(__file__).parent / "logs",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -95,17 +113,16 @@ def llm_provider(config: Config) -> OpenAIProvider:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent(config: Config, llm_provider: ChatModelProvider) -> Agent:
|
||||
ai_config = AIConfig(
|
||||
def agent(
|
||||
agent_data_dir: Path, config: Config, llm_provider: ChatModelProvider
|
||||
) -> Agent:
|
||||
ai_profile = AIProfile(
|
||||
ai_name="Base",
|
||||
ai_role="A base AI",
|
||||
ai_goals=[],
|
||||
)
|
||||
|
||||
command_registry = CommandRegistry()
|
||||
config.memory_backend = "json_file"
|
||||
memory_json_file = get_memory(config)
|
||||
memory_json_file.clear()
|
||||
|
||||
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
|
||||
agent_prompt_config.use_functions_api = config.openai_functions
|
||||
@@ -113,10 +130,11 @@ def agent(config: Config, llm_provider: ChatModelProvider) -> Agent:
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
ai_config=ai_config,
|
||||
ai_profile=ai_profile,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=config.fast_llm,
|
||||
smart_llm=config.smart_llm,
|
||||
allow_fs_access=not config.restrict_to_workspace,
|
||||
use_functions_api=config.openai_functions,
|
||||
plugins=config.plugins,
|
||||
),
|
||||
@@ -124,10 +142,11 @@ def agent(config: Config, llm_provider: ChatModelProvider) -> Agent:
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
return Agent(
|
||||
agent = Agent(
|
||||
settings=agent_settings,
|
||||
llm_provider=llm_provider,
|
||||
command_registry=command_registry,
|
||||
memory=memory_json_file,
|
||||
legacy_config=config,
|
||||
)
|
||||
agent.attach_fs(agent_data_dir)
|
||||
return agent
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.config import AIConfig, Config
|
||||
from autogpt.config import AIProfile, Config
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
@@ -22,7 +22,7 @@ def memory_json_file(config: Config):
|
||||
def dummy_agent(config: Config, llm_provider, memory_json_file):
|
||||
command_registry = CommandRegistry()
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_profile = AIProfile(
|
||||
ai_name="Dummy Agent",
|
||||
ai_role="Dummy Role",
|
||||
ai_goals=[
|
||||
@@ -35,7 +35,7 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
ai_config=ai_config,
|
||||
ai_profile=ai_profile,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=config.fast_llm,
|
||||
smart_llm=config.smart_llm,
|
||||
@@ -50,7 +50,6 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
|
||||
settings=agent_settings,
|
||||
llm_provider=llm_provider,
|
||||
command_registry=command_registry,
|
||||
memory=memory_json_file,
|
||||
legacy_config=config,
|
||||
)
|
||||
|
||||
|
||||
@@ -4,11 +4,13 @@ import orjson
|
||||
import pytest
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.file_workspace import FileWorkspace
|
||||
from autogpt.memory.vector import JSONFileMemory, MemoryItem
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
def test_json_memory_init_without_backing_file(config: Config, workspace: Workspace):
|
||||
def test_json_memory_init_without_backing_file(
|
||||
config: Config, workspace: FileWorkspace
|
||||
):
|
||||
index_file = workspace.root / f"{config.memory_index}.json"
|
||||
|
||||
assert not index_file.exists()
|
||||
@@ -17,7 +19,9 @@ def test_json_memory_init_without_backing_file(config: Config, workspace: Worksp
|
||||
assert index_file.read_text() == "[]"
|
||||
|
||||
|
||||
def test_json_memory_init_with_backing_empty_file(config: Config, workspace: Workspace):
|
||||
def test_json_memory_init_with_backing_empty_file(
|
||||
config: Config, workspace: FileWorkspace
|
||||
):
|
||||
index_file = workspace.root / f"{config.memory_index}.json"
|
||||
index_file.touch()
|
||||
|
||||
@@ -28,7 +32,7 @@ def test_json_memory_init_with_backing_empty_file(config: Config, workspace: Wor
|
||||
|
||||
|
||||
def test_json_memory_init_with_backing_invalid_file(
|
||||
config: Config, workspace: Workspace
|
||||
config: Config, workspace: FileWorkspace
|
||||
):
|
||||
index_file = workspace.root / f"{config.memory_index}.json"
|
||||
index_file.touch()
|
||||
@@ -11,7 +11,6 @@ from autogpt.agents.utils.exceptions import (
|
||||
InvalidArgumentError,
|
||||
OperationNotAllowedError,
|
||||
)
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -20,8 +19,8 @@ def random_code(random_string) -> str:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def python_test_file(config: Config, random_code: str):
|
||||
temp_file = tempfile.NamedTemporaryFile(dir=config.workspace_path, suffix=".py")
|
||||
def python_test_file(agent: Agent, random_code: str):
|
||||
temp_file = tempfile.NamedTemporaryFile(dir=agent.workspace.root, suffix=".py")
|
||||
temp_file.write(str.encode(random_code))
|
||||
temp_file.flush()
|
||||
|
||||
@@ -30,8 +29,8 @@ def python_test_file(config: Config, random_code: str):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def python_test_args_file(config: Config):
|
||||
temp_file = tempfile.NamedTemporaryFile(dir=config.workspace_path, suffix=".py")
|
||||
def python_test_args_file(agent: Agent):
|
||||
temp_file = tempfile.NamedTemporaryFile(dir=agent.workspace.root, suffix=".py")
|
||||
temp_file.write(str.encode("import sys\nprint(sys.argv[1], sys.argv[2])"))
|
||||
temp_file.flush()
|
||||
|
||||
|
||||
@@ -2,78 +2,69 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.app.setup import generate_aiconfig_automatic, interactive_ai_config_setup
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.app.setup import (
|
||||
apply_overrides_to_ai_settings,
|
||||
interactively_revise_ai_settings,
|
||||
)
|
||||
from autogpt.config import AIDirectives, Config
|
||||
from autogpt.config.ai_profile import AIProfile
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.requires_openai_api_key
|
||||
async def test_generate_aiconfig_automatic_default(
|
||||
patched_api_requestor, config, llm_provider
|
||||
):
|
||||
user_inputs = [""]
|
||||
with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
|
||||
ai_config = await interactive_ai_config_setup(config, llm_provider)
|
||||
@pytest.mark.asyncio
|
||||
async def test_apply_overrides_to_ai_settings():
|
||||
ai_profile = AIProfile(ai_name="Test AI", ai_role="Test Role")
|
||||
directives = AIDirectives(
|
||||
resources=["Resource1"],
|
||||
constraints=["Constraint1"],
|
||||
best_practices=["BestPractice1"],
|
||||
)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name is not None
|
||||
assert ai_config.ai_role is not None
|
||||
assert 1 <= len(ai_config.ai_goals) <= 5
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile,
|
||||
directives,
|
||||
override_name="New AI",
|
||||
override_role="New Role",
|
||||
replace_directives=True,
|
||||
resources=["NewResource"],
|
||||
constraints=["NewConstraint"],
|
||||
best_practices=["NewBestPractice"],
|
||||
)
|
||||
|
||||
assert ai_profile.ai_name == "New AI"
|
||||
assert ai_profile.ai_role == "New Role"
|
||||
assert directives.resources == ["NewResource"]
|
||||
assert directives.constraints == ["NewConstraint"]
|
||||
assert directives.best_practices == ["NewBestPractice"]
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.requires_openai_api_key
|
||||
async def test_generate_aiconfig_automatic_typical(
|
||||
patched_api_requestor, config, llm_provider
|
||||
):
|
||||
user_prompt = "Help me create a rock opera about cybernetic giraffes"
|
||||
ai_config = await generate_aiconfig_automatic(user_prompt, config, llm_provider)
|
||||
@pytest.mark.asyncio
|
||||
async def test_interactively_revise_ai_settings(config: Config):
|
||||
ai_profile = AIProfile(ai_name="Test AI", ai_role="Test Role")
|
||||
directives = AIDirectives(
|
||||
resources=["Resource1"],
|
||||
constraints=["Constraint1"],
|
||||
best_practices=["BestPractice1"],
|
||||
)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name is not None
|
||||
assert ai_config.ai_role is not None
|
||||
assert 1 <= len(ai_config.ai_goals) <= 5
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.requires_openai_api_key
|
||||
async def test_generate_aiconfig_automatic_fallback(
|
||||
patched_api_requestor, config, llm_provider
|
||||
):
|
||||
user_inputs = [
|
||||
"T&GF£OIBECC()!*",
|
||||
"Chef-GPT",
|
||||
"an AI designed to browse bake a cake.",
|
||||
"Purchase ingredients",
|
||||
"Bake a cake",
|
||||
"n",
|
||||
"New AI",
|
||||
"New Role",
|
||||
"NewConstraint",
|
||||
"",
|
||||
"NewResource",
|
||||
"",
|
||||
"NewBestPractice",
|
||||
"",
|
||||
"y",
|
||||
]
|
||||
with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
|
||||
ai_config = await interactive_ai_config_setup(config, llm_provider)
|
||||
with patch("autogpt.app.setup.clean_input", side_effect=user_inputs):
|
||||
ai_profile, directives = await interactively_revise_ai_settings(
|
||||
ai_profile, directives, config
|
||||
)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name == "Chef-GPT"
|
||||
assert ai_config.ai_role == "an AI designed to browse bake a cake."
|
||||
assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"]
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.requires_openai_api_key
|
||||
async def test_prompt_user_manual_mode(patched_api_requestor, config, llm_provider):
|
||||
user_inputs = [
|
||||
"--manual",
|
||||
"Chef-GPT",
|
||||
"an AI designed to browse bake a cake.",
|
||||
"Purchase ingredients",
|
||||
"Bake a cake",
|
||||
"",
|
||||
"",
|
||||
]
|
||||
with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
|
||||
ai_config = await interactive_ai_config_setup(config, llm_provider)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name == "Chef-GPT"
|
||||
assert ai_config.ai_role == "an AI designed to browse bake a cake."
|
||||
assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"]
|
||||
assert ai_profile.ai_name == "New AI"
|
||||
assert ai_profile.ai_role == "New Role"
|
||||
assert directives.resources == ["NewResource"]
|
||||
assert directives.constraints == ["NewConstraint"]
|
||||
assert directives.best_practices == ["NewBestPractice"]
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
|
||||
"""
|
||||
Test cases for the AIConfig class, which handles loads the AI configuration
|
||||
settings from a YAML file.
|
||||
"""
|
||||
|
||||
|
||||
def test_goals_are_always_lists_of_strings(tmp_path):
|
||||
"""Test if the goals attribute is always a list of strings."""
|
||||
|
||||
yaml_content = """
|
||||
ai_goals:
|
||||
- Goal 1: Make a sandwich
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- "Goal 4: Wake up"
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
ai_settings_file = tmp_path / "ai_settings.yaml"
|
||||
ai_settings_file.write_text(yaml_content)
|
||||
|
||||
ai_config = AIConfig.load(ai_settings_file)
|
||||
|
||||
assert len(ai_config.ai_goals) == 4
|
||||
assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich"
|
||||
assert ai_config.ai_goals[1] == "Goal 2, Eat the sandwich"
|
||||
assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep"
|
||||
assert ai_config.ai_goals[3] == "Goal 4: Wake up"
|
||||
|
||||
ai_settings_file.write_text("")
|
||||
ai_config.save(ai_settings_file)
|
||||
|
||||
yaml_content2 = """ai_goals:
|
||||
- 'Goal 1: Make a sandwich'
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- 'Goal 4: Wake up'
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
assert ai_settings_file.read_text() == yaml_content2
|
||||
|
||||
|
||||
def test_ai_config_file_not_exists(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
ai_settings_file = workspace.get_path("ai_settings.yaml")
|
||||
|
||||
ai_config = AIConfig.load(str(ai_settings_file))
|
||||
assert ai_config.ai_name == ""
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
assert ai_config.api_budget == 0.0
|
||||
|
||||
|
||||
def test_ai_config_file_is_empty(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
ai_settings_file = workspace.get_path("ai_settings.yaml")
|
||||
ai_settings_file.write_text("")
|
||||
|
||||
ai_config = AIConfig.load(str(ai_settings_file))
|
||||
assert ai_config.ai_name == ""
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
assert ai_config.api_budget == 0.0
|
||||
70
autogpts/autogpt/tests/unit/test_ai_profile.py
Normal file
70
autogpts/autogpt/tests/unit/test_ai_profile.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from autogpt.config.ai_profile import AIProfile
|
||||
|
||||
"""
|
||||
Test cases for the AIProfile class, which handles loads the AI configuration
|
||||
settings from a YAML file.
|
||||
"""
|
||||
|
||||
|
||||
def test_goals_are_always_lists_of_strings(tmp_path):
|
||||
"""Test if the goals attribute is always a list of strings."""
|
||||
|
||||
yaml_content = """
|
||||
ai_goals:
|
||||
- Goal 1: Make a sandwich
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- "Goal 4: Wake up"
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
ai_settings_file = tmp_path / "ai_settings.yaml"
|
||||
ai_settings_file.write_text(yaml_content)
|
||||
|
||||
ai_profile = AIProfile.load(ai_settings_file)
|
||||
|
||||
assert len(ai_profile.ai_goals) == 4
|
||||
assert ai_profile.ai_goals[0] == "Goal 1: Make a sandwich"
|
||||
assert ai_profile.ai_goals[1] == "Goal 2, Eat the sandwich"
|
||||
assert ai_profile.ai_goals[2] == "Goal 3 - Go to sleep"
|
||||
assert ai_profile.ai_goals[3] == "Goal 4: Wake up"
|
||||
|
||||
ai_settings_file.write_text("")
|
||||
ai_profile.save(ai_settings_file)
|
||||
|
||||
yaml_content2 = """ai_goals:
|
||||
- 'Goal 1: Make a sandwich'
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- 'Goal 4: Wake up'
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
assert ai_settings_file.read_text() == yaml_content2
|
||||
|
||||
|
||||
def test_ai_profile_file_not_exists(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
ai_settings_file = workspace.get_path("ai_settings.yaml")
|
||||
|
||||
ai_profile = AIProfile.load(str(ai_settings_file))
|
||||
assert ai_profile.ai_name == ""
|
||||
assert ai_profile.ai_role == ""
|
||||
assert ai_profile.ai_goals == []
|
||||
assert ai_profile.api_budget == 0.0
|
||||
|
||||
|
||||
def test_ai_profile_file_is_empty(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
ai_settings_file = workspace.get_path("ai_settings.yaml")
|
||||
ai_settings_file.write_text("")
|
||||
|
||||
ai_profile = AIProfile.load(str(ai_settings_file))
|
||||
assert ai_profile.ai_name == ""
|
||||
assert ai_profile.ai_role == ""
|
||||
assert ai_profile.ai_goals == []
|
||||
assert ai_profile.api_budget == 0.0
|
||||
@@ -9,18 +9,18 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
|
||||
from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, apply_overrides_to_config
|
||||
from autogpt.config import Config, ConfigBuilder
|
||||
from autogpt.workspace.workspace import Workspace
|
||||
from autogpt.file_workspace import FileWorkspace
|
||||
|
||||
|
||||
def test_initial_values(config: Config) -> None:
|
||||
"""
|
||||
Test if the initial values of the config class attributes are set correctly.
|
||||
"""
|
||||
assert config.debug_mode == False
|
||||
assert config.continuous_mode == False
|
||||
assert config.speak_mode == False
|
||||
assert config.debug_mode is False
|
||||
assert config.continuous_mode is False
|
||||
assert config.tts_config.speak_mode is False
|
||||
assert config.fast_llm == "gpt-3.5-turbo-16k"
|
||||
assert config.smart_llm == "gpt-4-0314"
|
||||
|
||||
@@ -33,7 +33,7 @@ def test_set_continuous_mode(config: Config) -> None:
|
||||
continuous_mode = config.continuous_mode
|
||||
|
||||
config.continuous_mode = True
|
||||
assert config.continuous_mode == True
|
||||
assert config.continuous_mode is True
|
||||
|
||||
# Reset continuous mode
|
||||
config.continuous_mode = continuous_mode
|
||||
@@ -44,13 +44,13 @@ def test_set_speak_mode(config: Config) -> None:
|
||||
Test if the set_speak_mode() method updates the speak_mode attribute.
|
||||
"""
|
||||
# Store speak mode to reset it after the test
|
||||
speak_mode = config.speak_mode
|
||||
speak_mode = config.tts_config.speak_mode
|
||||
|
||||
config.speak_mode = True
|
||||
assert config.speak_mode == True
|
||||
config.tts_config.speak_mode = True
|
||||
assert config.tts_config.speak_mode is True
|
||||
|
||||
# Reset speak mode
|
||||
config.speak_mode = speak_mode
|
||||
config.tts_config.speak_mode = speak_mode
|
||||
|
||||
|
||||
def test_set_fast_llm(config: Config) -> None:
|
||||
@@ -89,7 +89,7 @@ def test_set_debug_mode(config: Config) -> None:
|
||||
debug_mode = config.debug_mode
|
||||
|
||||
config.debug_mode = True
|
||||
assert config.debug_mode == True
|
||||
assert config.debug_mode is True
|
||||
|
||||
# Reset debug mode
|
||||
config.debug_mode = debug_mode
|
||||
@@ -98,7 +98,7 @@ def test_set_debug_mode(config: Config) -> None:
|
||||
@patch("openai.Model.list")
|
||||
def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config) -> None:
|
||||
"""
|
||||
Test if models update to gpt-3.5-turbo if both are set to gpt-4.
|
||||
Test if models update to gpt-3.5-turbo if gpt-4 is not available.
|
||||
"""
|
||||
fast_llm = config.fast_llm
|
||||
smart_llm = config.smart_llm
|
||||
@@ -108,21 +108,10 @@ def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config)
|
||||
|
||||
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
|
||||
|
||||
create_config(
|
||||
apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=False,
|
||||
continuous_limit=False,
|
||||
ai_settings_file="",
|
||||
prompt_settings_file="",
|
||||
skip_reprompt=False,
|
||||
speak=False,
|
||||
debug=False,
|
||||
gpt3only=False,
|
||||
gpt4only=False,
|
||||
memory_type="",
|
||||
browser_name="",
|
||||
allow_downloads=False,
|
||||
skip_news=False,
|
||||
)
|
||||
|
||||
assert config.fast_llm == "gpt-3.5-turbo"
|
||||
@@ -133,13 +122,13 @@ def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config)
|
||||
config.smart_llm = smart_llm
|
||||
|
||||
|
||||
def test_missing_azure_config(workspace: Workspace) -> None:
|
||||
def test_missing_azure_config(workspace: FileWorkspace) -> None:
|
||||
config_file = workspace.get_path("azure_config.yaml")
|
||||
with pytest.raises(FileNotFoundError):
|
||||
ConfigBuilder.load_azure_config(str(config_file))
|
||||
ConfigBuilder.load_azure_config(config_file)
|
||||
|
||||
config_file.write_text("")
|
||||
azure_config = ConfigBuilder.load_azure_config(str(config_file))
|
||||
azure_config = ConfigBuilder.load_azure_config(config_file)
|
||||
|
||||
assert azure_config["openai_api_type"] == "azure"
|
||||
assert azure_config["openai_api_base"] == ""
|
||||
@@ -147,9 +136,9 @@ def test_missing_azure_config(workspace: Workspace) -> None:
|
||||
assert azure_config["azure_model_to_deployment_id_map"] == {}
|
||||
|
||||
|
||||
def test_azure_config(config: Config, workspace: Workspace) -> None:
|
||||
def test_azure_config(config: Config, workspace: FileWorkspace) -> None:
|
||||
config_file = workspace.get_path("azure_config.yaml")
|
||||
yaml_content = f"""
|
||||
yaml_content = """
|
||||
azure_api_type: azure
|
||||
azure_api_base: https://dummy.openai.azure.com
|
||||
azure_api_version: 2023-06-01-preview
|
||||
@@ -162,7 +151,7 @@ azure_model_map:
|
||||
|
||||
os.environ["USE_AZURE"] = "True"
|
||||
os.environ["AZURE_CONFIG_FILE"] = str(config_file)
|
||||
config = ConfigBuilder.build_config_from_env(workspace.root.parent)
|
||||
config = ConfigBuilder.build_config_from_env(project_root=workspace.root.parent)
|
||||
|
||||
assert config.openai_api_type == "azure"
|
||||
assert config.openai_api_base == "https://dummy.openai.azure.com"
|
||||
@@ -209,21 +198,9 @@ azure_model_map:
|
||||
def test_create_config_gpt4only(config: Config) -> None:
|
||||
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
|
||||
mock_get_models.return_value = [{"id": GPT_4_MODEL}]
|
||||
create_config(
|
||||
apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=False,
|
||||
continuous_limit=None,
|
||||
ai_settings_file=None,
|
||||
prompt_settings_file=None,
|
||||
skip_reprompt=False,
|
||||
speak=False,
|
||||
debug=False,
|
||||
gpt3only=False,
|
||||
gpt4only=True,
|
||||
memory_type=None,
|
||||
browser_name=None,
|
||||
allow_downloads=False,
|
||||
skip_news=False,
|
||||
)
|
||||
assert config.fast_llm == GPT_4_MODEL
|
||||
assert config.smart_llm == GPT_4_MODEL
|
||||
@@ -232,21 +209,9 @@ def test_create_config_gpt4only(config: Config) -> None:
|
||||
def test_create_config_gpt3only(config: Config) -> None:
|
||||
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
|
||||
mock_get_models.return_value = [{"id": GPT_3_MODEL}]
|
||||
create_config(
|
||||
apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=False,
|
||||
continuous_limit=None,
|
||||
ai_settings_file=None,
|
||||
prompt_settings_file=None,
|
||||
skip_reprompt=False,
|
||||
speak=False,
|
||||
debug=False,
|
||||
gpt3only=True,
|
||||
gpt4only=False,
|
||||
memory_type=None,
|
||||
browser_name=None,
|
||||
allow_downloads=False,
|
||||
skip_news=False,
|
||||
)
|
||||
assert config.fast_llm == GPT_3_MODEL
|
||||
assert config.smart_llm == GPT_3_MODEL
|
||||
|
||||
@@ -15,9 +15,9 @@ import autogpt.commands.file_operations as file_ops
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.agents.utils.exceptions import DuplicateOperationError
|
||||
from autogpt.config import Config
|
||||
from autogpt.file_workspace import FileWorkspace
|
||||
from autogpt.memory.vector.memory_item import MemoryItem
|
||||
from autogpt.memory.vector.utils import Embedding
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -50,7 +50,7 @@ def test_file_name():
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_file_path(test_file_name: Path, workspace: Workspace):
|
||||
def test_file_path(test_file_name: Path, workspace: FileWorkspace):
|
||||
return workspace.get_path(test_file_name)
|
||||
|
||||
|
||||
@@ -73,12 +73,12 @@ def test_file_with_content_path(test_file: TextIOWrapper, file_content, agent: A
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_directory(workspace: Workspace):
|
||||
def test_directory(workspace: FileWorkspace):
|
||||
return workspace.get_path("test_directory")
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_nested_file(workspace: Workspace):
|
||||
def test_nested_file(workspace: FileWorkspace):
|
||||
return workspace.get_path("nested/test_file.txt")
|
||||
|
||||
|
||||
@@ -169,7 +169,7 @@ def test_is_duplicate_operation(agent: Agent, mocker: MockerFixture):
|
||||
# Test logging a file operation
|
||||
def test_log_operation(agent: Agent):
|
||||
file_ops.log_operation("log_test", "path/to/test", agent=agent)
|
||||
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
assert f"log_test: path/to/test\n" in content
|
||||
|
||||
@@ -183,7 +183,7 @@ def test_text_checksum(file_content: str):
|
||||
|
||||
def test_log_operation_with_checksum(agent: Agent):
|
||||
file_ops.log_operation("log_test", "path/to/test", agent=agent, checksum="ABCDEF")
|
||||
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
assert f"log_test: path/to/test #ABCDEF\n" in content
|
||||
|
||||
@@ -224,7 +224,7 @@ def test_write_file_logs_checksum(test_file_name: Path, agent: Agent):
|
||||
new_content = "This is new content.\n"
|
||||
new_checksum = file_ops.text_checksum(new_content)
|
||||
file_ops.write_to_file(str(test_file_name), new_content, agent=agent)
|
||||
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f:
|
||||
log_entry = f.read()
|
||||
assert log_entry == f"write: {test_file_name} #{new_checksum}\n"
|
||||
|
||||
@@ -264,9 +264,17 @@ def test_append_to_file_uses_checksum_from_appended_file(
|
||||
test_file_name: Path, agent: Agent
|
||||
):
|
||||
append_text = "This is appended text.\n"
|
||||
file_ops.append_to_file(test_file_name, append_text, agent=agent)
|
||||
file_ops.append_to_file(test_file_name, append_text, agent=agent)
|
||||
with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f:
|
||||
file_ops.append_to_file(
|
||||
agent.workspace.get_path(test_file_name),
|
||||
append_text,
|
||||
agent=agent,
|
||||
)
|
||||
file_ops.append_to_file(
|
||||
agent.workspace.get_path(test_file_name),
|
||||
append_text,
|
||||
agent=agent,
|
||||
)
|
||||
with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f:
|
||||
log_contents = f.read()
|
||||
|
||||
digest = hashlib.md5()
|
||||
@@ -280,7 +288,7 @@ def test_append_to_file_uses_checksum_from_appended_file(
|
||||
)
|
||||
|
||||
|
||||
def test_list_files(workspace: Workspace, test_directory: Path, agent: Agent):
|
||||
def test_list_files(workspace: FileWorkspace, test_directory: Path, agent: Agent):
|
||||
# Case 1: Create files A and B, search for A, and ensure we don't return A and B
|
||||
file_a = workspace.get_path("file_a.txt")
|
||||
file_b = workspace.get_path("file_b.txt")
|
||||
|
||||
@@ -71,7 +71,7 @@ def test_create_base_config(config: Config):
|
||||
|
||||
os.remove(config.plugins_config_file)
|
||||
plugins_config = PluginsConfig.load_config(
|
||||
plugins_config_file=config.workdir / config.plugins_config_file,
|
||||
plugins_config_file=config.plugins_config_file,
|
||||
plugins_denylist=config.plugins_denylist,
|
||||
plugins_allowlist=config.plugins_allowlist,
|
||||
)
|
||||
@@ -107,7 +107,7 @@ def test_load_config(config: Config):
|
||||
|
||||
# Load the config from disk
|
||||
plugins_config = PluginsConfig.load_config(
|
||||
plugins_config_file=config.workdir / config.plugins_config_file,
|
||||
plugins_config_file=config.plugins_config_file,
|
||||
plugins_denylist=config.plugins_denylist,
|
||||
plugins_allowlist=config.plugins_allowlist,
|
||||
)
|
||||
|
||||
@@ -29,8 +29,8 @@ def test_safe_google_results_invalid_input():
|
||||
(
|
||||
"test",
|
||||
1,
|
||||
'[\n {\n "title": "Result 1",\n "link": "https://example.com/result1"\n }\n]',
|
||||
[{"title": "Result 1", "link": "https://example.com/result1"}],
|
||||
'[\n {\n "title": "Result 1",\n "url": "https://example.com/result1"\n }\n]',
|
||||
[{"title": "Result 1", "href": "https://example.com/result1"}],
|
||||
),
|
||||
("", 1, "[]", []),
|
||||
("no results", 1, "[]", []),
|
||||
|
||||
@@ -3,7 +3,7 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.workspace import Workspace
|
||||
from autogpt.file_workspace import FileWorkspace
|
||||
|
||||
_WORKSPACE_ROOT = Path("home/users/monty/auto_gpt_workspace")
|
||||
|
||||
@@ -40,7 +40,7 @@ _INACCESSIBLE_PATHS = (
|
||||
"test_folder/{null_byte}",
|
||||
"test_folder/{null_byte}test_file.txt",
|
||||
],
|
||||
Workspace.NULL_BYTES,
|
||||
FileWorkspace.NULL_BYTES,
|
||||
)
|
||||
]
|
||||
+ [
|
||||
@@ -68,7 +68,7 @@ def inaccessible_path(request):
|
||||
|
||||
|
||||
def test_sanitize_path_accessible(accessible_path, workspace_root):
|
||||
full_path = Workspace._sanitize_path(
|
||||
full_path = FileWorkspace._sanitize_path(
|
||||
accessible_path,
|
||||
root=workspace_root,
|
||||
restrict_to_root=True,
|
||||
@@ -79,7 +79,7 @@ def test_sanitize_path_accessible(accessible_path, workspace_root):
|
||||
|
||||
def test_sanitize_path_inaccessible(inaccessible_path, workspace_root):
|
||||
with pytest.raises(ValueError):
|
||||
Workspace._sanitize_path(
|
||||
FileWorkspace._sanitize_path(
|
||||
inaccessible_path,
|
||||
root=workspace_root,
|
||||
restrict_to_root=True,
|
||||
@@ -87,13 +87,13 @@ def test_sanitize_path_inaccessible(inaccessible_path, workspace_root):
|
||||
|
||||
|
||||
def test_get_path_accessible(accessible_path, workspace_root):
|
||||
workspace = Workspace(workspace_root, True)
|
||||
workspace = FileWorkspace(workspace_root, True)
|
||||
full_path = workspace.get_path(accessible_path)
|
||||
assert full_path.is_absolute()
|
||||
assert full_path.is_relative_to(workspace_root)
|
||||
|
||||
|
||||
def test_get_path_inaccessible(inaccessible_path, workspace_root):
|
||||
workspace = Workspace(workspace_root, True)
|
||||
workspace = FileWorkspace(workspace_root, True)
|
||||
with pytest.raises(ValueError):
|
||||
workspace.get_path(inaccessible_path)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user