mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-09 09:14:19 +01:00
Release v0.4.4 (#4906)
This commit is contained in:
@@ -58,15 +58,19 @@ OPENAI_API_KEY=your-openai-api-key
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
# USE_AZURE=False
|
||||
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-3.5-turbo
|
||||
## SMART_LLM - Smart language model (Default: gpt-4)
|
||||
# SMART_LLM=gpt-4
|
||||
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
|
||||
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -1 +1,2 @@
|
||||
.github/workflows/ @Significant-Gravitas/Auto-GPT-Source
|
||||
.github/workflows/ @Significant-Gravitas/maintainers
|
||||
autogpt/core @collijk
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
6
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -140,8 +140,8 @@ body:
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
|
||||
|
||||
- type: input
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
@@ -152,7 +152,7 @@ body:
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
|
||||
13
.github/workflows/ci.yml
vendored
13
.github/workflows/ci.yml
vendored
@@ -108,22 +108,27 @@ jobs:
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin ${{ github.event.pull_request.base.ref }}
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/${{ github.event.pull_request.base.ref }}
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '${{ github.event.pull_request.base.ref }}'."
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '${{ github.event.pull_request.base.ref }}'."
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
||||
@@ -36,7 +36,7 @@ repos:
|
||||
types: [ python ]
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
entry: pytest --cov=autogpt tests/unit
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
|
||||
43
BULLETIN.md
43
BULLETIN.md
@@ -1,22 +1,29 @@
|
||||
# Website and Documentation Site 📰📖
|
||||
Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
|
||||
The documentation also has a place here, at *https://docs.agpt.co*
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
|
||||
|
||||
# For contributors 👷🏼
|
||||
Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core to make it more extensible and make room for structural performance-oriented R&D.
|
||||
# v0.4.4 RELEASE HIGHLIGHTS! 🚀
|
||||
# -----------------------------
|
||||
## GPT-4 is back!
|
||||
Following OpenAI's recent GPT-4 GA announcement, the SMART_LLM .env setting
|
||||
now defaults to GPT-4, and Auto-GPT will use GPT-4 by default in its main loop.
|
||||
|
||||
Check out the contribution guide on our wiki:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
### !! High Costs Warning !! 💰💀🚨
|
||||
GPT-4 costs ~20x more than GPT-3.5-turbo.
|
||||
Please take note of this before using SMART_LLM. You can use `--gpt3only`
|
||||
or `--gpt4only` to force the use of GPT-3.5-turbo or GPT-4, respectively,
|
||||
at runtime.
|
||||
|
||||
# 🚀 v0.4.3 Release 🚀
|
||||
We're happy to announce the 0.4.3 maintenance release, which primarily focuses on refining the LLM command execution,
|
||||
extending support for OpenAI's latest models (including the powerful GPT-3 16k model), and laying the groundwork
|
||||
for future compatibility with OpenAI's function calling feature.
|
||||
## Re-arch v1 preview release!
|
||||
We've released a preview version of the re-arch code, under `autogpt/core`.
|
||||
This is a major milestone for us, and we're excited to continue working on it.
|
||||
We look forward to your feedback. Follow the process here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/issues/4770.
|
||||
|
||||
Key Highlights:
|
||||
- OpenAI API Key Prompt: Auto-GPT will now courteously prompt users for their OpenAI API key, if it's not already provided.
|
||||
- Summarization Enhancements: We've optimized Auto-GPT's use of the LLM context window even further.
|
||||
- JSON Memory Reading: Support for reading memories from JSON files has been improved, resulting in enhanced task execution.
|
||||
- Deprecated commands, removed for a leaner, more performant LLM: analyze_code, write_tests, improve_code, audio_text, web_playwright, web_requests
|
||||
## Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases
|
||||
## Other highlights
|
||||
Other fixes include plugins regressions, Azure config and security patches.
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases.
|
||||
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
import signal
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
@@ -64,7 +65,7 @@ class Agent:
|
||||
ai_config: AIConfig,
|
||||
system_prompt: str,
|
||||
triggering_prompt: str,
|
||||
workspace_directory: str,
|
||||
workspace_directory: str | Path,
|
||||
config: Config,
|
||||
):
|
||||
self.ai_name = ai_name
|
||||
@@ -80,13 +81,11 @@ class Agent:
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.cycle_count = 0
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
self.fast_token_limit = OPEN_AI_CHAT_MODELS.get(
|
||||
config.fast_llm_model
|
||||
).max_tokens
|
||||
self.smart_token_limit = OPEN_AI_CHAT_MODELS.get(config.smart_llm).max_tokens
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Avoid circular imports
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.app import execute_command, extract_command
|
||||
|
||||
# Interaction Loop
|
||||
self.cycle_count = 0
|
||||
@@ -137,8 +136,8 @@ class Agent:
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.fast_token_limit,
|
||||
self.config.fast_llm_model,
|
||||
self.smart_token_limit,
|
||||
self.config.smart_llm,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -162,11 +161,11 @@ class Agent:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, self.config
|
||||
)
|
||||
command_name, arguments = get_command(
|
||||
command_name, arguments = extract_command(
|
||||
assistant_reply_json, assistant_reply, self.config
|
||||
)
|
||||
if self.config.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
say_text(f"I want to execute {command_name}", self.config)
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
||||
@@ -195,8 +194,9 @@ class Agent:
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
logger.info(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands, "
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"Enter '{self.config.authorise_key}' to authorise command, "
|
||||
f"'{self.config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{self.config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
@@ -224,8 +224,8 @@ class Agent:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
f"Invalid input format. Please enter '{self.config.authorise_key} -n' "
|
||||
"where n is the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
@@ -281,12 +281,12 @@ class Agent:
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(
|
||||
str(command_result), self.config.fast_llm_model
|
||||
str(command_result), self.config.smart_llm
|
||||
)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), self.config.fast_llm_model
|
||||
str(self.history.summary_message()), self.config.smart_llm
|
||||
)
|
||||
if result_tlength + memory_tlength + 600 > self.fast_token_limit:
|
||||
if result_tlength + memory_tlength + 600 > self.smart_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ def is_valid_int(value: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def get_command(
|
||||
def extract_command(
|
||||
assistant_reply_json: Dict, assistant_reply: ChatModelResponse, config: Config
|
||||
):
|
||||
"""Parse the response and return the command name and arguments
|
||||
@@ -78,21 +78,6 @@ def get_command(
|
||||
return "Error:", str(e)
|
||||
|
||||
|
||||
def map_command_synonyms(command_name: str):
|
||||
"""Takes the original command name given by the AI, and checks if the
|
||||
string matches a list of common/known hallucinations
|
||||
"""
|
||||
synonyms = [
|
||||
("write_file", "write_to_file"),
|
||||
("create_file", "write_to_file"),
|
||||
("search", "google"),
|
||||
]
|
||||
for seen_command, actual_command_name in synonyms:
|
||||
if command_name == seen_command:
|
||||
return actual_command_name
|
||||
return command_name
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_name: str,
|
||||
arguments: dict[str, str],
|
||||
@@ -109,28 +94,21 @@ def execute_command(
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
cmd = agent.command_registry.commands.get(command_name)
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
if command := agent.command_registry.get_command(command_name):
|
||||
return command(**arguments, agent=agent)
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments, agent=agent)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
# Handle non-native commands (e.g. from plugins)
|
||||
for command in agent.ai_config.prompt_generator.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
|
||||
raise RuntimeError(
|
||||
f"Cannot execute '{command_name}': unknown command."
|
||||
" Do not try to use this command again."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@@ -65,6 +67,22 @@ import click
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-goal",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help="AI goal override; may be used multiple times to pass multiple goals",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
@@ -83,6 +101,9 @@ def main(
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
ai_goal: tuple[str],
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
@@ -109,6 +130,9 @@ def main(
|
||||
skip_news,
|
||||
workspace_directory,
|
||||
install_plugin_deps,
|
||||
ai_name,
|
||||
ai_role,
|
||||
ai_goal,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ def command(
|
||||
parameters: dict[str, CommandParameterSpec],
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
aliases: list[str] = [],
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
@@ -40,6 +41,7 @@ def command(
|
||||
parameters=typed_parameters,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
aliases=aliases,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
|
||||
@@ -189,6 +189,7 @@ def ingest_file(
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
aliases=["write_file", "create_file"],
|
||||
)
|
||||
def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
@@ -23,6 +23,7 @@ DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
aliases=["search"],
|
||||
)
|
||||
def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
@@ -67,6 +68,7 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
lambda config: bool(config.google_api_key)
|
||||
and bool(config.google_custom_search_engine_id),
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
aliases=["search"],
|
||||
)
|
||||
def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
@@ -124,7 +126,7 @@ def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
Return the results of a google search in a safe format.
|
||||
Return the results of a Google search in a safe format.
|
||||
|
||||
Args:
|
||||
results (str | list): The search results.
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
"""
|
||||
This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
from .ai_config import AIConfig
|
||||
from .config import Config, ConfigBuilder, check_openai_api_key
|
||||
|
||||
__all__ = [
|
||||
"check_openai_api_key",
|
||||
"AIConfig",
|
||||
"Config",
|
||||
"ConfigBuilder",
|
||||
]
|
||||
|
||||
@@ -35,7 +35,7 @@ class AIConfig:
|
||||
self,
|
||||
ai_name: str = "",
|
||||
ai_role: str = "",
|
||||
ai_goals: list | None = None,
|
||||
ai_goals: list[str] = [],
|
||||
api_budget: float = 0.0,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -49,8 +49,6 @@ class AIConfig:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if ai_goals is None:
|
||||
ai_goals = []
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
@@ -61,13 +59,12 @@ class AIConfig:
|
||||
@staticmethod
|
||||
def load(ai_settings_file: str = SAVE_FILE) -> "AIConfig":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
|
||||
loaded from yaml file if yaml file exists, else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
ai_settings_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
ai_settings_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): An instance of given cls object
|
||||
|
||||
@@ -1,28 +1,32 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
from typing import Dict
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.core.configuration.schema import Configurable, SystemSettings
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
from typing import Optional
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
|
||||
|
||||
class ConfigSettings(SystemSettings):
|
||||
fast_llm_model: str
|
||||
smart_llm_model: str
|
||||
class Config(SystemSettings):
|
||||
fast_llm: str
|
||||
smart_llm: str
|
||||
continuous_mode: bool
|
||||
skip_news: bool
|
||||
workspace_path: Optional[str]
|
||||
file_logger_path: Optional[str]
|
||||
workspace_path: Optional[str] = None
|
||||
file_logger_path: Optional[str] = None
|
||||
debug_mode: bool
|
||||
plugins_dir: str
|
||||
plugins_config: dict[str, str]
|
||||
plugins_config: PluginsConfig
|
||||
continuous_limit: int
|
||||
speak_mode: bool
|
||||
skip_reprompt: bool
|
||||
@@ -37,31 +41,33 @@ class ConfigSettings(SystemSettings):
|
||||
prompt_settings_file: str
|
||||
embedding_model: str
|
||||
browse_spacy_language_model: str
|
||||
openai_api_key: Optional[str]
|
||||
openai_organization: Optional[str]
|
||||
openai_api_key: Optional[str] = None
|
||||
openai_organization: Optional[str] = None
|
||||
temperature: float
|
||||
use_azure: bool
|
||||
azure_config_file: Optional[str] = None
|
||||
azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None
|
||||
execute_local_commands: bool
|
||||
restrict_to_workspace: bool
|
||||
openai_api_type: Optional[str]
|
||||
openai_api_base: Optional[str]
|
||||
openai_api_version: Optional[str]
|
||||
openai_api_type: Optional[str] = None
|
||||
openai_api_base: Optional[str] = None
|
||||
openai_api_version: Optional[str] = None
|
||||
openai_functions: bool
|
||||
elevenlabs_api_key: Optional[str]
|
||||
elevenlabs_api_key: Optional[str] = None
|
||||
streamelements_voice: str
|
||||
text_to_speech_provider: str
|
||||
github_api_key: Optional[str]
|
||||
github_username: Optional[str]
|
||||
google_api_key: Optional[str]
|
||||
google_custom_search_engine_id: Optional[str]
|
||||
image_provider: Optional[str]
|
||||
github_api_key: Optional[str] = None
|
||||
github_username: Optional[str] = None
|
||||
google_api_key: Optional[str] = None
|
||||
google_custom_search_engine_id: Optional[str] = None
|
||||
image_provider: Optional[str] = None
|
||||
image_size: int
|
||||
huggingface_api_token: Optional[str]
|
||||
huggingface_api_token: Optional[str] = None
|
||||
huggingface_image_model: str
|
||||
audio_to_text_provider: str
|
||||
huggingface_audio_to_text_model: Optional[str]
|
||||
sd_webui_url: Optional[str]
|
||||
sd_webui_auth: Optional[str]
|
||||
huggingface_audio_to_text_model: Optional[str] = None
|
||||
sd_webui_url: Optional[str] = None
|
||||
sd_webui_auth: Optional[str] = None
|
||||
selenium_web_browser: str
|
||||
selenium_headless: bool
|
||||
user_agent: str
|
||||
@@ -76,12 +82,73 @@ class ConfigSettings(SystemSettings):
|
||||
plugins_openai: list[str]
|
||||
plugins_config_file: str
|
||||
chat_messages_enabled: bool
|
||||
elevenlabs_voice_id: Optional[str]
|
||||
elevenlabs_voice_id: Optional[str] = None
|
||||
plugins: list[str]
|
||||
authorise_key: str
|
||||
|
||||
def get_openai_credentials(self, model: str) -> dict[str, str]:
|
||||
credentials = {
|
||||
"api_key": self.openai_api_key,
|
||||
"api_base": self.openai_api_base,
|
||||
"organization": self.openai_organization,
|
||||
}
|
||||
if self.use_azure:
|
||||
azure_credentials = self.get_azure_credentials(model)
|
||||
credentials.update(azure_credentials)
|
||||
return credentials
|
||||
|
||||
class Config(Configurable):
|
||||
def get_azure_credentials(self, model: str) -> dict[str, str]:
|
||||
"""Get the kwargs for the Azure API."""
|
||||
|
||||
# Fix --gpt3only and --gpt4only in combination with Azure
|
||||
fast_llm = (
|
||||
self.fast_llm
|
||||
if not (
|
||||
self.fast_llm == self.smart_llm
|
||||
and self.fast_llm.startswith(GPT_4_MODEL)
|
||||
)
|
||||
else f"not_{self.fast_llm}"
|
||||
)
|
||||
smart_llm = (
|
||||
self.smart_llm
|
||||
if not (
|
||||
self.smart_llm == self.fast_llm
|
||||
and self.smart_llm.startswith(GPT_3_MODEL)
|
||||
)
|
||||
else f"not_{self.smart_llm}"
|
||||
)
|
||||
|
||||
deployment_id = {
|
||||
fast_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
smart_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
self.embedding_model: self.azure_model_to_deployment_id_map.get(
|
||||
"embedding_model_deployment_id"
|
||||
),
|
||||
}.get(model, None)
|
||||
|
||||
kwargs = {
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
}
|
||||
if model == self.embedding_model:
|
||||
kwargs["engine"] = deployment_id
|
||||
else:
|
||||
kwargs["deployment_id"] = deployment_id
|
||||
return kwargs
|
||||
|
||||
|
||||
class ConfigBuilder(Configurable[Config]):
|
||||
default_plugins_config_file = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml"
|
||||
)
|
||||
@@ -96,17 +163,17 @@ class Config(Configurable):
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
|
||||
defaults_settings = ConfigSettings(
|
||||
default_settings = Config(
|
||||
name="Default Server Config",
|
||||
description="This is a default server configuration",
|
||||
smart_llm_model="gpt-3.5-turbo",
|
||||
fast_llm_model="gpt-3.5-turbo",
|
||||
smart_llm="gpt-4",
|
||||
fast_llm="gpt-3.5-turbo",
|
||||
continuous_mode=False,
|
||||
continuous_limit=0,
|
||||
skip_news=False,
|
||||
debug_mode=False,
|
||||
plugins_dir="plugins",
|
||||
plugins_config={},
|
||||
plugins_config=PluginsConfig(plugins={}),
|
||||
speak_mode=False,
|
||||
skip_reprompt=False,
|
||||
allow_downloads=False,
|
||||
@@ -122,6 +189,7 @@ class Config(Configurable):
|
||||
browse_spacy_language_model="en_core_web_sm",
|
||||
temperature=0,
|
||||
use_azure=False,
|
||||
azure_config_file=AZURE_CONFIG_FILE,
|
||||
execute_local_commands=False,
|
||||
restrict_to_workspace=True,
|
||||
openai_functions=False,
|
||||
@@ -150,7 +218,7 @@ class Config(Configurable):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls):
|
||||
def build_config_from_env(cls) -> Config:
|
||||
"""Initialize the Config class"""
|
||||
config_dict = {
|
||||
"authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
|
||||
@@ -159,12 +227,13 @@ class Config(Configurable):
|
||||
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
|
||||
"ai_settings_file": os.getenv("AI_SETTINGS_FILE"),
|
||||
"prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"),
|
||||
"fast_llm_model": os.getenv("FAST_LLM_MODEL"),
|
||||
"smart_llm_model": os.getenv("SMART_LLM_MODEL"),
|
||||
"fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
|
||||
"smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
|
||||
"embedding_model": os.getenv("EMBEDDING_MODEL"),
|
||||
"browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"),
|
||||
"openai_api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"use_azure": os.getenv("USE_AZURE") == "True",
|
||||
"azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE),
|
||||
"execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False")
|
||||
== "True",
|
||||
"restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True")
|
||||
@@ -198,21 +267,16 @@ class Config(Configurable):
|
||||
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
|
||||
}
|
||||
|
||||
# Converting to a list from comma-separated string
|
||||
disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
if disabled_command_categories:
|
||||
config_dict[
|
||||
"disabled_command_categories"
|
||||
] = disabled_command_categories.split(",")
|
||||
config_dict["disabled_command_categories"] = _safe_split(
|
||||
os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
)
|
||||
|
||||
# Converting to a list from comma-separated string
|
||||
shell_denylist = os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
|
||||
if shell_denylist:
|
||||
config_dict["shell_denylist"] = shell_denylist.split(",")
|
||||
|
||||
shell_allowlist = os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
|
||||
if shell_allowlist:
|
||||
config_dict["shell_allowlist"] = shell_allowlist.split(",")
|
||||
config_dict["shell_denylist"] = _safe_split(
|
||||
os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
|
||||
)
|
||||
config_dict["shell_allowlist"] = _safe_split(
|
||||
os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
|
||||
)
|
||||
|
||||
config_dict["google_custom_search_engine_id"] = os.getenv(
|
||||
"GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
@@ -222,13 +286,13 @@ class Config(Configurable):
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
)
|
||||
|
||||
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
|
||||
if plugins_allowlist:
|
||||
config_dict["plugins_allowlist"] = plugins_allowlist.split(",")
|
||||
|
||||
plugins_denylist = os.getenv("DENYLISTED_PLUGINS")
|
||||
if plugins_denylist:
|
||||
config_dict["plugins_denylist"] = plugins_denylist.split(",")
|
||||
config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS"))
|
||||
config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS"))
|
||||
config_dict["plugins_config"] = PluginsConfig.load_config(
|
||||
config_dict["plugins_config_file"],
|
||||
config_dict["plugins_denylist"],
|
||||
config_dict["plugins_allowlist"],
|
||||
)
|
||||
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["image_size"] = int(os.getenv("IMAGE_SIZE"))
|
||||
@@ -238,12 +302,10 @@ class Config(Configurable):
|
||||
config_dict["temperature"] = float(os.getenv("TEMPERATURE"))
|
||||
|
||||
if config_dict["use_azure"]:
|
||||
azure_config = cls.load_azure_config()
|
||||
config_dict["openai_api_type"] = azure_config["openai_api_type"]
|
||||
config_dict["openai_api_base"] = azure_config["openai_api_base"]
|
||||
config_dict["openai_api_version"] = azure_config["openai_api_version"]
|
||||
azure_config = cls.load_azure_config(config_dict["azure_config_file"])
|
||||
config_dict.update(azure_config)
|
||||
|
||||
if os.getenv("OPENAI_API_BASE_URL"):
|
||||
elif os.getenv("OPENAI_API_BASE_URL"):
|
||||
config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL")
|
||||
|
||||
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
@@ -272,10 +334,11 @@ class Config(Configurable):
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
|
||||
return {
|
||||
"openai_api_type": config_params.get("azure_api_type") or "azure",
|
||||
"openai_api_base": config_params.get("azure_api_base") or "",
|
||||
"openai_api_version": config_params.get("azure_api_version")
|
||||
or "2023-03-15-preview",
|
||||
"openai_api_type": config_params.get("azure_api_type", "azure"),
|
||||
"openai_api_base": config_params.get("azure_api_base", ""),
|
||||
"openai_api_version": config_params.get(
|
||||
"azure_api_version", "2023-03-15-preview"
|
||||
),
|
||||
"azure_model_to_deployment_id_map": config_params.get(
|
||||
"azure_model_map", {}
|
||||
),
|
||||
@@ -310,3 +373,10 @@ def check_openai_api_key(config: Config) -> None:
|
||||
else:
|
||||
print("Invalid OpenAI API key!")
|
||||
exit(1)
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
|
||||
@@ -7,6 +7,7 @@ import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
|
||||
from autogpt.llm.utils import check_model
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
@@ -14,9 +15,6 @@ from autogpt.memory.vector import get_supported_memory_backends
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
|
||||
|
||||
def create_config(
|
||||
config: Config,
|
||||
@@ -87,21 +85,21 @@ def create_config(
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config
|
||||
config.fast_llm_model = GPT_3_MODEL
|
||||
config.smart_llm_model = GPT_3_MODEL
|
||||
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
|
||||
config.fast_llm = GPT_3_MODEL
|
||||
config.smart_llm = GPT_3_MODEL
|
||||
elif (
|
||||
gpt4only
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm_model") == GPT_4_MODEL
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm", config=config)
|
||||
== GPT_4_MODEL
|
||||
):
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config
|
||||
config.fast_llm_model = GPT_4_MODEL
|
||||
config.smart_llm_model = GPT_4_MODEL
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
|
||||
config.fast_llm = GPT_4_MODEL
|
||||
config.smart_llm = GPT_4_MODEL
|
||||
else:
|
||||
config.fast_llm_model = check_model(config.fast_llm_model, "fast_llm_model")
|
||||
config.smart_llm_model = check_model(config.smart_llm_model, "smart_llm_model")
|
||||
config.fast_llm = check_model(config.fast_llm, "fast_llm", config=config)
|
||||
config.smart_llm = check_model(config.smart_llm, "smart_llm", config=config)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
|
||||
62
autogpt/core/README.md
Normal file
62
autogpt/core/README.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Run instructions
|
||||
|
||||
There are two client applications for Auto-GPT included.
|
||||
|
||||
## CLI Application
|
||||
|
||||
:star2: **This is the reference application I'm working with for now** :star2:
|
||||
|
||||
The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py)
|
||||
|
||||
Auto-GPT must be installed in your python environment to run this application. To do so, run
|
||||
|
||||
```
|
||||
pip install -e REPOSITORY_ROOT
|
||||
```
|
||||
|
||||
where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine.
|
||||
|
||||
You'll then need a settings file. Run
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings
|
||||
```
|
||||
|
||||
This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model.
|
||||
|
||||
You can then run Auto-GPT with
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run
|
||||
```
|
||||
|
||||
to launch the interaction loop.
|
||||
|
||||
## CLI Web App
|
||||
|
||||
The second app is still a CLI, but it sets up a local webserver that the client application talks to rather than invoking calls to the Agent library code directly. This application is essentially a sketch at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/client/client.py)
|
||||
- [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/server/api.py)
|
||||
|
||||
To run, you still need to generate a default configuration. You can do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings
|
||||
```
|
||||
|
||||
It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key.
|
||||
|
||||
To run, do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client
|
||||
```
|
||||
|
||||
This will launch a webserver and then start the client cli application to communicate with it.
|
||||
|
||||
:warning: I am not actively developing this application. It is a very good place to get involved if you have web application design experience and are looking to get involved in the re-arch.
|
||||
0
autogpt/core/__init__.py
Normal file
0
autogpt/core/__init__.py
Normal file
4
autogpt/core/ability/__init__.py
Normal file
4
autogpt/core/ability/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
"""The command system provides a way to extend the functionality of the AI agent."""
|
||||
from autogpt.core.ability.base import Ability, AbilityRegistry
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry
|
||||
92
autogpt/core/ability/base.py
Normal file
92
autogpt/core/ability/base.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import abc
|
||||
from pprint import pformat
|
||||
from typing import ClassVar
|
||||
|
||||
import inflection
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
|
||||
|
||||
class AbilityConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
from autogpt.core.plugin.base import PluginLocation
|
||||
|
||||
location: PluginLocation
|
||||
packages_required: list[str] = Field(default_factory=list)
|
||||
language_model_required: LanguageModelConfiguration = None
|
||||
memory_provider_required: bool = False
|
||||
workspace_required: bool = False
|
||||
|
||||
|
||||
class Ability(abc.ABC):
|
||||
"""A class representing an agent ability."""
|
||||
|
||||
default_configuration: ClassVar[AbilityConfiguration]
|
||||
|
||||
@classmethod
|
||||
def name(cls) -> str:
|
||||
"""The name of the ability."""
|
||||
return inflection.underscore(cls.__name__)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def description(cls) -> str:
|
||||
"""A detailed description of what the ability does."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def arguments(cls) -> dict:
|
||||
"""A dict of arguments in standard json schema format."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
"""A list of required arguments."""
|
||||
return []
|
||||
|
||||
@abc.abstractmethod
|
||||
async def __call__(self, *args, **kwargs) -> AbilityResult:
|
||||
...
|
||||
|
||||
def __str__(self) -> str:
|
||||
return pformat(self.dump)
|
||||
|
||||
def dump(self) -> dict:
|
||||
return {
|
||||
"name": self.name(),
|
||||
"description": self.description(),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": self.arguments(),
|
||||
"required": self.required_arguments(),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class AbilityRegistry(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def list_abilities(self) -> list[str]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def perform(self, ability_name: str, **kwargs) -> AbilityResult:
|
||||
...
|
||||
6
autogpt/core/ability/builtins/__init__.py
Normal file
6
autogpt/core/ability/builtins/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility
|
||||
from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel
|
||||
|
||||
BUILTIN_ABILITIES = {
|
||||
QueryLanguageModel.name(): QueryLanguageModel,
|
||||
}
|
||||
102
autogpt/core/ability/builtins/create_new_ability.py
Normal file
102
autogpt/core/ability/builtins/create_new_ability.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
|
||||
|
||||
class CreateNewAbility(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.CreateNewAbility",
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Create a new ability by writing python code."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"ability_name": {
|
||||
"type": "string",
|
||||
"description": "A meaningful and concise name for the new ability.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the ability and its uses, including any limitations.",
|
||||
},
|
||||
"arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the argument.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "The type of the argument. Must be a standard json schema type.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the argument and its uses.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"description": "A list of arguments that the ability will accept.",
|
||||
},
|
||||
"required_arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The names of the arguments that are required.",
|
||||
},
|
||||
"description": "A list of the names of the arguments that are required.",
|
||||
},
|
||||
"package_requirements": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The of the Python package that is required to execute the ability.",
|
||||
},
|
||||
"description": "A list of the names of the Python packages that are required to execute the ability.",
|
||||
},
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code that will be executed when the ability is called.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return [
|
||||
"ability_name",
|
||||
"description",
|
||||
"arguments",
|
||||
"required_arguments",
|
||||
"package_requirements",
|
||||
"code",
|
||||
]
|
||||
|
||||
async def __call__(
|
||||
self,
|
||||
ability_name: str,
|
||||
description: str,
|
||||
arguments: list[dict],
|
||||
required_arguments: list[str],
|
||||
package_requirements: list[str],
|
||||
code: str,
|
||||
) -> AbilityResult:
|
||||
raise NotImplementedError
|
||||
167
autogpt/core/ability/builtins/file_operations.py
Normal file
167
autogpt/core/ability/builtins/file_operations.py
Normal file
@@ -0,0 +1,167 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class ReadFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Read and parse all text from a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to read.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(self, filename: str) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
pass
|
||||
except ImportError:
|
||||
message = "Package charset_normalizer is not installed."
|
||||
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if not file_path.exists():
|
||||
message = f"File {filename} does not exist."
|
||||
if not file_path.is_file():
|
||||
message = f"{filename} is not a file."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename):
|
||||
return result
|
||||
|
||||
from unstructured.partition.auto import partition
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
elements = partition(str(file_path))
|
||||
# TODO: Lots of other potentially useful information is available
|
||||
# in the partitioned file. Consider returning more of it.
|
||||
new_knowledge = Knowledge(
|
||||
content="\n\n".join([element.text for element in elements]),
|
||||
content_type=ContentType.TEXT,
|
||||
content_metadata={"filename": filename},
|
||||
)
|
||||
success = True
|
||||
message = f"File {file_path} read successfully."
|
||||
except IOError as e:
|
||||
new_knowledge = None
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
new_knowledge=new_knowledge,
|
||||
)
|
||||
|
||||
|
||||
class WriteFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Write text to a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write.",
|
||||
},
|
||||
"contents": {
|
||||
"type": "string",
|
||||
"description": "The contents of the file to write.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(
|
||||
self, filename: str, contents: str
|
||||
) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if file_path.exists():
|
||||
message = f"File {filename} already exists."
|
||||
if len(contents):
|
||||
message = f"File {filename} was not given any content."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename, "contents": contents},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str, contents: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename, contents):
|
||||
return result
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
directory = os.path.dirname(file_path)
|
||||
os.makedirs(directory)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(contents)
|
||||
success = True
|
||||
message = f"File {file_path} written successfully."
|
||||
except IOError as e:
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
)
|
||||
78
autogpt/core/ability/builtins/query_language_model.py
Normal file
78
autogpt/core/ability/builtins/query_language_model.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
MessageRole,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
|
||||
|
||||
class QueryLanguageModel(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.QueryLanguageModel",
|
||||
),
|
||||
language_model_required=LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
language_model_provider: LanguageModelProvider,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
self._language_model_provider = language_model_provider
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Query a language model. A query should be a question and any relevant context."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "A query for a language model. A query should contain a question and any relevant context.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return ["query"]
|
||||
|
||||
async def __call__(self, query: str) -> AbilityResult:
|
||||
messages = [
|
||||
LanguageModelMessage(
|
||||
content=query,
|
||||
role=MessageRole.USER,
|
||||
),
|
||||
]
|
||||
model_response = await self._language_model_provider.create_language_completion(
|
||||
model_prompt=messages,
|
||||
functions=[],
|
||||
model_name=self._configuration.language_model_required.model_name,
|
||||
completion_parser=self._parse_response,
|
||||
)
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"query": query},
|
||||
success=True,
|
||||
message=model_response.content["content"],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_response(response_content: dict) -> dict:
|
||||
return {"content": response_content["content"]}
|
||||
30
autogpt/core/ability/schema.py
Normal file
30
autogpt/core/ability/schema.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import enum
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ContentType(str, enum.Enum):
|
||||
# TBD what these actually are.
|
||||
TEXT = "text"
|
||||
CODE = "code"
|
||||
|
||||
|
||||
class Knowledge(BaseModel):
|
||||
content: str
|
||||
content_type: ContentType
|
||||
content_metadata: dict[str, Any]
|
||||
|
||||
|
||||
class AbilityResult(BaseModel):
|
||||
"""The AbilityResult is a standard response struct for an ability."""
|
||||
|
||||
ability_name: str
|
||||
ability_args: dict[str, str]
|
||||
success: bool
|
||||
message: str
|
||||
new_knowledge: Knowledge = None
|
||||
|
||||
def summary(self) -> str:
|
||||
kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items())
|
||||
return f"{self.ability_name}({kwargs}): {self.message}"
|
||||
96
autogpt/core/ability/simple.py
Normal file
96
autogpt/core/ability/simple.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
|
||||
from autogpt.core.ability.builtins import BUILTIN_ABILITIES
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.plugin.simple import SimplePluginService
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
)
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
|
||||
class AbilityRegistryConfiguration(SystemConfiguration):
|
||||
"""Configuration for the AbilityRegistry subsystem."""
|
||||
|
||||
abilities: dict[str, AbilityConfiguration]
|
||||
|
||||
|
||||
class AbilityRegistrySettings(SystemSettings):
|
||||
configuration: AbilityRegistryConfiguration
|
||||
|
||||
|
||||
class SimpleAbilityRegistry(AbilityRegistry, Configurable):
|
||||
default_settings = AbilityRegistrySettings(
|
||||
name="simple_ability_registry",
|
||||
description="A simple ability registry.",
|
||||
configuration=AbilityRegistryConfiguration(
|
||||
abilities={
|
||||
ability_name: ability.default_configuration
|
||||
for ability_name, ability in BUILTIN_ABILITIES.items()
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AbilityRegistrySettings,
|
||||
logger: logging.Logger,
|
||||
memory: Memory,
|
||||
workspace: Workspace,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._memory = memory
|
||||
self._workspace = workspace
|
||||
self._model_providers = model_providers
|
||||
self._abilities = []
|
||||
for (
|
||||
ability_name,
|
||||
ability_configuration,
|
||||
) in self._configuration.abilities.items():
|
||||
self.register_ability(ability_name, ability_configuration)
|
||||
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
ability_class = SimplePluginService.get_plugin(ability_configuration.location)
|
||||
ability_args = {
|
||||
"logger": self._logger.getChild(ability_name),
|
||||
"configuration": ability_configuration,
|
||||
}
|
||||
if ability_configuration.packages_required:
|
||||
# TODO: Check packages are installed and maybe install them.
|
||||
pass
|
||||
if ability_configuration.memory_provider_required:
|
||||
ability_args["memory"] = self._memory
|
||||
if ability_configuration.workspace_required:
|
||||
ability_args["workspace"] = self._workspace
|
||||
if ability_configuration.language_model_required:
|
||||
ability_args["language_model_provider"] = self._model_providers[
|
||||
ability_configuration.language_model_required.provider_name
|
||||
]
|
||||
ability = ability_class(**ability_args)
|
||||
self._abilities.append(ability)
|
||||
|
||||
def list_abilities(self) -> list[str]:
|
||||
return [
|
||||
f"{ability.name()}: {ability.description()}" for ability in self._abilities
|
||||
]
|
||||
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
return [ability.dump() for ability in self._abilities]
|
||||
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
for ability in self._abilities:
|
||||
if ability.name() == ability_name:
|
||||
return ability
|
||||
raise ValueError(f"Ability '{ability_name}' not found.")
|
||||
|
||||
async def perform(self, ability_name: str, **kwargs) -> AbilityResult:
|
||||
ability = self.get_ability(ability_name)
|
||||
return await ability(**kwargs)
|
||||
3
autogpt/core/agent/__init__.py
Normal file
3
autogpt/core/agent/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""The Agent is an autonomouos entity guided by a LLM provider."""
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.agent.simple import AgentSettings, SimpleAgent
|
||||
26
autogpt/core/agent/base.py
Normal file
26
autogpt/core/agent/base.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import abc
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class Agent(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "Agent":
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def __repr__(self):
|
||||
...
|
||||
391
autogpt/core/agent/simple.py
Normal file
391
autogpt/core/agent/simple.py
Normal file
@@ -0,0 +1,391 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.ability import (
|
||||
AbilityRegistrySettings,
|
||||
AbilityResult,
|
||||
SimpleAbilityRegistry,
|
||||
)
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory import MemorySettings, SimpleMemory
|
||||
from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus
|
||||
from autogpt.core.plugin.simple import (
|
||||
PluginLocation,
|
||||
PluginStorageFormat,
|
||||
SimplePluginService,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings
|
||||
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
|
||||
|
||||
class AgentSystems(SystemConfiguration):
|
||||
ability_registry: PluginLocation
|
||||
memory: PluginLocation
|
||||
openai_provider: PluginLocation
|
||||
planning: PluginLocation
|
||||
workspace: PluginLocation
|
||||
|
||||
|
||||
class AgentConfiguration(SystemConfiguration):
|
||||
cycle_count: int
|
||||
max_task_cycle_count: int
|
||||
creation_time: str
|
||||
name: str
|
||||
role: str
|
||||
goals: list[str]
|
||||
systems: AgentSystems
|
||||
|
||||
|
||||
class AgentSystemSettings(SystemSettings):
|
||||
configuration: AgentConfiguration
|
||||
|
||||
|
||||
class AgentSettings(BaseModel):
|
||||
agent: AgentSystemSettings
|
||||
ability_registry: AbilityRegistrySettings
|
||||
memory: MemorySettings
|
||||
openai_provider: OpenAISettings
|
||||
planning: PlannerSettings
|
||||
workspace: WorkspaceSettings
|
||||
|
||||
def update_agent_name_and_goals(self, agent_goals: dict) -> None:
|
||||
self.agent.configuration.name = agent_goals["agent_name"]
|
||||
self.agent.configuration.role = agent_goals["agent_role"]
|
||||
self.agent.configuration.goals = agent_goals["agent_goals"]
|
||||
|
||||
|
||||
class SimpleAgent(Agent, Configurable):
|
||||
default_settings = AgentSystemSettings(
|
||||
name="simple_agent",
|
||||
description="A simple agent.",
|
||||
configuration=AgentConfiguration(
|
||||
name="Entrepreneur-GPT",
|
||||
role=(
|
||||
"An AI designed to autonomously develop and run businesses with "
|
||||
"the sole goal of increasing your net worth."
|
||||
),
|
||||
goals=[
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
],
|
||||
cycle_count=0,
|
||||
max_task_cycle_count=3,
|
||||
creation_time="",
|
||||
systems=AgentSystems(
|
||||
ability_registry=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.SimpleAbilityRegistry",
|
||||
),
|
||||
memory=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.memory.SimpleMemory",
|
||||
),
|
||||
openai_provider=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.resource.model_providers.OpenAIProvider",
|
||||
),
|
||||
planning=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.planning.SimplePlanner",
|
||||
),
|
||||
workspace=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.workspace.SimpleWorkspace",
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSystemSettings,
|
||||
logger: logging.Logger,
|
||||
ability_registry: SimpleAbilityRegistry,
|
||||
memory: SimpleMemory,
|
||||
openai_provider: OpenAIProvider,
|
||||
planning: SimplePlanner,
|
||||
workspace: SimpleWorkspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._ability_registry = ability_registry
|
||||
self._memory = memory
|
||||
# FIXME: Need some work to make this work as a dict of providers
|
||||
# Getting the construction of the config to work is a bit tricky
|
||||
self._openai_provider = openai_provider
|
||||
self._planning = planning
|
||||
self._workspace = workspace
|
||||
self._task_queue = []
|
||||
self._completed_tasks = []
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
@classmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "SimpleAgent":
|
||||
agent_settings = SimpleWorkspace.load_agent_settings(workspace_path)
|
||||
agent_args = {}
|
||||
|
||||
agent_args["settings"] = agent_settings.agent
|
||||
agent_args["logger"] = logger
|
||||
agent_args["workspace"] = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["openai_provider"] = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["planning"] = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger,
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
agent_args["memory"] = cls._get_system_instance(
|
||||
"memory",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
)
|
||||
|
||||
agent_args["ability_registry"] = cls._get_system_instance(
|
||||
"ability_registry",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
memory=agent_args["memory"],
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
|
||||
return cls(**agent_args)
|
||||
|
||||
async def build_initial_plan(self) -> dict:
|
||||
plan = await self._planning.make_initial_plan(
|
||||
agent_name=self._configuration.name,
|
||||
agent_role=self._configuration.role,
|
||||
agent_goals=self._configuration.goals,
|
||||
abilities=self._ability_registry.list_abilities(),
|
||||
)
|
||||
tasks = [Task.parse_obj(task) for task in plan.content["task_list"]]
|
||||
|
||||
# TODO: Should probably do a step to evaluate the quality of the generated tasks,
|
||||
# and ensure that they have actionable ready and acceptance criteria
|
||||
|
||||
self._task_queue.extend(tasks)
|
||||
self._task_queue.sort(key=lambda t: t.priority, reverse=True)
|
||||
self._task_queue[-1].context.status = TaskStatus.READY
|
||||
return plan.content
|
||||
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
if not self._task_queue:
|
||||
return {"response": "I don't have any tasks to work on right now."}
|
||||
|
||||
self._configuration.cycle_count += 1
|
||||
task = self._task_queue.pop()
|
||||
self._logger.info(f"Working on task: {task}")
|
||||
|
||||
task = await self._evaluate_task_and_add_context(task)
|
||||
next_ability = await self._choose_next_ability(
|
||||
task,
|
||||
self._ability_registry.dump_abilities(),
|
||||
)
|
||||
self._current_task = task
|
||||
self._next_ability = next_ability.content
|
||||
return self._current_task, self._next_ability
|
||||
|
||||
async def execute_next_ability(self, user_input: str, *args, **kwargs):
|
||||
if user_input == "y":
|
||||
ability = self._ability_registry.get_ability(
|
||||
self._next_ability["next_ability"]
|
||||
)
|
||||
ability_response = await ability(**self._next_ability["ability_arguments"])
|
||||
await self._update_tasks_and_memory(ability_response)
|
||||
if self._current_task.context.status == TaskStatus.DONE:
|
||||
self._completed_tasks.append(self._current_task)
|
||||
else:
|
||||
self._task_queue.append(self._current_task)
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
return ability_response.dict()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
async def _evaluate_task_and_add_context(self, task: Task) -> Task:
|
||||
"""Evaluate the task and add context to it."""
|
||||
if task.context.status == TaskStatus.IN_PROGRESS:
|
||||
# Nothing to do here
|
||||
return task
|
||||
else:
|
||||
self._logger.debug(f"Evaluating task {task} and adding relevant context.")
|
||||
# TODO: Look up relevant memories (need working memory system)
|
||||
# TODO: Evaluate whether there is enough information to start the task (language model call).
|
||||
task.context.enough_info = True
|
||||
task.context.status = TaskStatus.IN_PROGRESS
|
||||
return task
|
||||
|
||||
async def _choose_next_ability(self, task: Task, ability_schema: list[dict]):
|
||||
"""Choose the next ability to use for the task."""
|
||||
self._logger.debug(f"Choosing next ability for task {task}.")
|
||||
if task.context.cycle_count > self._configuration.max_task_cycle_count:
|
||||
# Don't hit the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
elif not task.context.enough_info:
|
||||
# Don't ask the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
else:
|
||||
next_ability = await self._planning.determine_next_ability(
|
||||
task, ability_schema
|
||||
)
|
||||
return next_ability
|
||||
|
||||
async def _update_tasks_and_memory(self, ability_result: AbilityResult):
|
||||
self._current_task.context.cycle_count += 1
|
||||
self._current_task.context.prior_actions.append(ability_result)
|
||||
# TODO: Summarize new knowledge
|
||||
# TODO: store knowledge and summaries in memory and in relevant tasks
|
||||
# TODO: evaluate whether the task is complete
|
||||
|
||||
def __repr__(self):
|
||||
return "SimpleAgent()"
|
||||
|
||||
################################################################
|
||||
# Factory interface for agent bootstrapping and initialization #
|
||||
################################################################
|
||||
|
||||
@classmethod
|
||||
def build_user_configuration(cls) -> dict[str, Any]:
|
||||
"""Build the user's configuration."""
|
||||
configuration_dict = {
|
||||
"agent": cls.get_user_config(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
for system_name, system_location in system_locations.items():
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.get_user_config()
|
||||
configuration_dict = _prune_empty_dicts(configuration_dict)
|
||||
return configuration_dict
|
||||
|
||||
@classmethod
|
||||
def compile_settings(
|
||||
cls, logger: logging.Logger, user_configuration: dict
|
||||
) -> AgentSettings:
|
||||
"""Compile the user's configuration with the defaults."""
|
||||
logger.debug("Processing agent system configuration.")
|
||||
configuration_dict = {
|
||||
"agent": cls.build_agent_configuration(
|
||||
user_configuration.get("agent", {})
|
||||
).dict(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
|
||||
# Build up default configuration
|
||||
for system_name, system_location in system_locations.items():
|
||||
logger.debug(f"Compiling configuration for system {system_name}")
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.build_agent_configuration(
|
||||
user_configuration.get(system_name, {})
|
||||
).dict()
|
||||
|
||||
return AgentSettings.parse_obj(configuration_dict)
|
||||
|
||||
@classmethod
|
||||
async def determine_agent_name_and_goals(
|
||||
cls,
|
||||
user_objective: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
) -> dict:
|
||||
logger.debug("Loading OpenAI provider.")
|
||||
provider: OpenAIProvider = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
logger.debug("Loading agent planner.")
|
||||
agent_planner: SimplePlanner = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
model_providers={"openai": provider},
|
||||
)
|
||||
logger.debug("determining agent name and goals.")
|
||||
model_response = await agent_planner.decide_name_and_goals(
|
||||
user_objective,
|
||||
)
|
||||
|
||||
return model_response.content
|
||||
|
||||
@classmethod
|
||||
def provision_agent(
|
||||
cls,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
agent_settings.agent.configuration.creation_time = datetime.now().strftime(
|
||||
"%Y%m%d_%H%M%S"
|
||||
)
|
||||
workspace: SimpleWorkspace = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
return workspace.setup_workspace(agent_settings, logger)
|
||||
|
||||
@classmethod
|
||||
def _get_system_instance(
|
||||
cls,
|
||||
system_name: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
system_locations = agent_settings.agent.configuration.systems.dict()
|
||||
|
||||
system_settings = getattr(agent_settings, system_name)
|
||||
system_class = SimplePluginService.get_plugin(system_locations[system_name])
|
||||
system_instance = system_class(
|
||||
system_settings,
|
||||
*args,
|
||||
logger=logger.getChild(system_name),
|
||||
**kwargs,
|
||||
)
|
||||
return system_instance
|
||||
|
||||
|
||||
def _prune_empty_dicts(d: dict) -> dict:
|
||||
"""
|
||||
Prune branches from a nested dictionary if the branch only contains empty dictionaries at the leaves.
|
||||
|
||||
Args:
|
||||
d: The dictionary to prune.
|
||||
|
||||
Returns:
|
||||
The pruned dictionary.
|
||||
"""
|
||||
pruned = {}
|
||||
for key, value in d.items():
|
||||
if isinstance(value, dict):
|
||||
pruned_value = _prune_empty_dicts(value)
|
||||
if (
|
||||
pruned_value
|
||||
): # if the pruned dictionary is not empty, add it to the result
|
||||
pruned[key] = pruned_value
|
||||
else:
|
||||
pruned[key] = value
|
||||
return pruned
|
||||
@@ -0,0 +1,7 @@
|
||||
"""The configuration encapsulates settings for all Agent subsystems."""
|
||||
from autogpt.core.configuration.schema import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import abc
|
||||
import copy
|
||||
import typing
|
||||
from typing import Any
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
def UserConfigurable(*args, **kwargs):
|
||||
return Field(*args, **kwargs, user_configurable=True)
|
||||
|
||||
|
||||
class SystemConfiguration(BaseModel):
|
||||
@@ -15,42 +18,47 @@ class SystemConfiguration(BaseModel):
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class SystemSettings(BaseModel, abc.ABC):
|
||||
class SystemSettings(BaseModel):
|
||||
"""A base class for all system settings."""
|
||||
|
||||
name: str
|
||||
description: typing.Optional[str]
|
||||
description: str
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class Configurable(abc.ABC):
|
||||
S = TypeVar("S", bound=SystemSettings)
|
||||
|
||||
|
||||
class Configurable(abc.ABC, Generic[S]):
|
||||
"""A base class for all configurable objects."""
|
||||
|
||||
prefix: str = ""
|
||||
defaults_settings: typing.ClassVar[SystemSettings]
|
||||
default_settings: typing.ClassVar[S]
|
||||
|
||||
@classmethod
|
||||
def get_user_config(cls) -> dict[str, Any]:
|
||||
return _get_user_config_fields(cls.defaults_settings)
|
||||
return _get_user_config_fields(cls.default_settings)
|
||||
|
||||
@classmethod
|
||||
def build_agent_configuration(cls, configuration: dict = {}) -> SystemSettings:
|
||||
def build_agent_configuration(cls, configuration: dict) -> S:
|
||||
"""Process the configuration for this object."""
|
||||
|
||||
defaults_settings = cls.defaults_settings.dict()
|
||||
final_configuration = deep_update(defaults_settings, configuration)
|
||||
defaults = cls.default_settings.dict()
|
||||
final_configuration = deep_update(defaults, configuration)
|
||||
|
||||
return cls.defaults_settings.__class__.parse_obj(final_configuration)
|
||||
return cls.default_settings.__class__.parse_obj(final_configuration)
|
||||
|
||||
|
||||
def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
|
||||
"""
|
||||
Get the user config fields of a Pydantic model instance.
|
||||
|
||||
Args:
|
||||
instance: The Pydantic model instance.
|
||||
|
||||
Returns:
|
||||
The user config fields of the instance.
|
||||
"""
|
||||
@@ -79,13 +87,14 @@ def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
|
||||
def deep_update(original_dict: dict, update_dict: dict) -> dict:
|
||||
"""
|
||||
Recursively update a dictionary.
|
||||
|
||||
Args:
|
||||
original_dict (dict): The dictionary to be updated.
|
||||
update_dict (dict): The dictionary to update with.
|
||||
|
||||
Returns:
|
||||
dict: The updated dictionary.
|
||||
"""
|
||||
original_dict = copy.deepcopy(original_dict)
|
||||
for key, value in update_dict.items():
|
||||
if (
|
||||
key in original_dict
|
||||
|
||||
3
autogpt/core/memory/__init__.py
Normal file
3
autogpt/core/memory/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""The memory subsystem manages the Agent's long-term memory."""
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.memory.simple import MemorySettings, SimpleMemory
|
||||
13
autogpt/core/memory/base.py
Normal file
13
autogpt/core/memory/base.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import abc
|
||||
|
||||
|
||||
class Memory(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MemoryItem(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MessageHistory(abc.ABC):
|
||||
pass
|
||||
47
autogpt/core/memory/simple.py
Normal file
47
autogpt/core/memory/simple.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class MemoryConfiguration(SystemConfiguration):
|
||||
pass
|
||||
|
||||
|
||||
class MemorySettings(SystemSettings):
|
||||
configuration: MemoryConfiguration
|
||||
|
||||
|
||||
class MessageHistory:
|
||||
def __init__(self, previous_message_history: list[str]):
|
||||
self._message_history = previous_message_history
|
||||
|
||||
|
||||
class SimpleMemory(Memory, Configurable):
|
||||
default_settings = MemorySettings(
|
||||
name="simple_memory",
|
||||
description="A simple memory.",
|
||||
configuration=MemoryConfiguration(),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: MemorySettings,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._message_history = self._load_message_history(workspace)
|
||||
|
||||
@staticmethod
|
||||
def _load_message_history(workspace: Workspace):
|
||||
message_history_path = workspace.get_path("message_history.json")
|
||||
if message_history_path.exists():
|
||||
with message_history_path.open("r") as f:
|
||||
message_history = json.load(f)
|
||||
else:
|
||||
message_history = []
|
||||
return MessageHistory(message_history)
|
||||
10
autogpt/core/planning/__init__.py
Normal file
10
autogpt/core/planning/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""The planning system organizes the Agent's activities."""
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
TaskStatus,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.simple import PlannerSettings, SimplePlanner
|
||||
76
autogpt/core/planning/base.py
Normal file
76
autogpt/core/planning/base.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import abc
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
|
||||
# class Planner(abc.ABC):
|
||||
# """Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
#
|
||||
# @staticmethod
|
||||
# @abc.abstractmethod
|
||||
# async def decide_name_and_goals(
|
||||
# user_objective: str,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Decide the name and goals of an Agent from a user-defined objective.
|
||||
#
|
||||
# Args:
|
||||
# user_objective: The user-defined objective for the agent.
|
||||
#
|
||||
# Returns:
|
||||
# The agent name and goals as a response from the language model.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# async def plan(self, context: PlanningContext) -> LanguageModelResponse:
|
||||
# """Plan the next ability for the Agent.
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# progress, result, memories, and feedback.
|
||||
#
|
||||
#
|
||||
# Returns:
|
||||
# The next ability the agent should take along with thoughts and reasoning.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# def reflect(
|
||||
# self,
|
||||
# context: ReflectionContext,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Reflect on a planned ability and provide self-criticism.
|
||||
#
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# reasoning, plan, thoughts, and criticism.
|
||||
#
|
||||
# Returns:
|
||||
# Self-criticism about the agent's plan.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
|
||||
|
||||
class PromptStrategy(abc.ABC):
|
||||
default_configuration: SystemConfiguration
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def build_prompt(self, *_, **kwargs) -> LanguageModelPrompt:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def parse_response_content(self, response_content: dict) -> dict:
|
||||
...
|
||||
76
autogpt/core/planning/schema.py
Normal file
76
autogpt/core/planning/schema.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import enum
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProviderModelResponse,
|
||||
)
|
||||
|
||||
|
||||
class LanguageModelClassification(str, enum.Enum):
|
||||
"""The LanguageModelClassification is a functional description of the model.
|
||||
|
||||
This is used to determine what kind of model to use for a given prompt.
|
||||
Sometimes we prefer a faster or cheaper model to accomplish a task when
|
||||
possible.
|
||||
|
||||
"""
|
||||
|
||||
FAST_MODEL: str = "fast_model"
|
||||
SMART_MODEL: str = "smart_model"
|
||||
|
||||
|
||||
class LanguageModelPrompt(BaseModel):
|
||||
messages: list[LanguageModelMessage]
|
||||
functions: list[LanguageModelFunction] = Field(default_factory=list)
|
||||
|
||||
def __str__(self):
|
||||
return "\n\n".join([f"{m.role.value}: {m.content}" for m in self.messages])
|
||||
|
||||
|
||||
class LanguageModelResponse(LanguageModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
|
||||
class TaskType(str, enum.Enum):
|
||||
RESEARCH: str = "research"
|
||||
WRITE: str = "write"
|
||||
EDIT: str = "edit"
|
||||
CODE: str = "code"
|
||||
DESIGN: str = "design"
|
||||
TEST: str = "test"
|
||||
PLAN: str = "plan"
|
||||
|
||||
|
||||
class TaskStatus(str, enum.Enum):
|
||||
BACKLOG: str = "backlog"
|
||||
READY: str = "ready"
|
||||
IN_PROGRESS: str = "in_progress"
|
||||
DONE: str = "done"
|
||||
|
||||
|
||||
class TaskContext(BaseModel):
|
||||
cycle_count: int = 0
|
||||
status: TaskStatus = TaskStatus.BACKLOG
|
||||
parent: "Task" = None
|
||||
prior_actions: list[AbilityResult] = Field(default_factory=list)
|
||||
memories: list = Field(default_factory=list)
|
||||
user_input: list[str] = Field(default_factory=list)
|
||||
supplementary_info: list[str] = Field(default_factory=list)
|
||||
enough_info: bool = False
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
objective: str
|
||||
type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema
|
||||
priority: int
|
||||
ready_criteria: list[str]
|
||||
acceptance_criteria: list[str]
|
||||
context: TaskContext = Field(default_factory=TaskContext)
|
||||
|
||||
|
||||
# Need to resolve the circular dependency between Task and TaskContext once both models are defined.
|
||||
TaskContext.update_forward_refs()
|
||||
182
autogpt/core/planning/simple.py
Normal file
182
autogpt/core/planning/simple.py
Normal file
@@ -0,0 +1,182 @@
|
||||
import logging
|
||||
import platform
|
||||
import time
|
||||
|
||||
import distro
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.planning import strategies
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class LanguageModelConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
model_name: str = UserConfigurable()
|
||||
provider_name: ModelProviderName = UserConfigurable()
|
||||
temperature: float = UserConfigurable()
|
||||
|
||||
|
||||
class PromptStrategiesConfiguration(SystemConfiguration):
|
||||
name_and_goals: strategies.NameAndGoalsConfiguration
|
||||
initial_plan: strategies.InitialPlanConfiguration
|
||||
next_ability: strategies.NextAbilityConfiguration
|
||||
|
||||
|
||||
class PlannerConfiguration(SystemConfiguration):
|
||||
"""Configuration for the Planner subsystem."""
|
||||
|
||||
models: dict[LanguageModelClassification, LanguageModelConfiguration]
|
||||
prompt_strategies: PromptStrategiesConfiguration
|
||||
|
||||
|
||||
class PlannerSettings(SystemSettings):
|
||||
"""Settings for the Planner subsystem."""
|
||||
|
||||
configuration: PlannerConfiguration
|
||||
|
||||
|
||||
class SimplePlanner(Configurable):
|
||||
"""Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
|
||||
default_settings = PlannerSettings(
|
||||
name="planner",
|
||||
description="Manages the agent's planning and goal-setting by constructing language model prompts.",
|
||||
configuration=PlannerConfiguration(
|
||||
models={
|
||||
LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT4,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
},
|
||||
prompt_strategies=PromptStrategiesConfiguration(
|
||||
name_and_goals=strategies.NameAndGoals.default_configuration,
|
||||
initial_plan=strategies.InitialPlan.default_configuration,
|
||||
next_ability=strategies.NextAbility.default_configuration,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: PlannerSettings,
|
||||
logger: logging.Logger,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
workspace: Workspace = None, # Workspace is not available during bootstrapping.
|
||||
) -> None:
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {}
|
||||
for model, model_config in self._configuration.models.items():
|
||||
self._providers[model] = model_providers[model_config.provider_name]
|
||||
|
||||
self._prompt_strategies = {
|
||||
"name_and_goals": strategies.NameAndGoals(
|
||||
**self._configuration.prompt_strategies.name_and_goals.dict()
|
||||
),
|
||||
"initial_plan": strategies.InitialPlan(
|
||||
**self._configuration.prompt_strategies.initial_plan.dict()
|
||||
),
|
||||
"next_ability": strategies.NextAbility(
|
||||
**self._configuration.prompt_strategies.next_ability.dict()
|
||||
),
|
||||
}
|
||||
|
||||
async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["name_and_goals"],
|
||||
user_objective=user_objective,
|
||||
)
|
||||
|
||||
async def make_initial_plan(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["initial_plan"],
|
||||
agent_name=agent_name,
|
||||
agent_role=agent_role,
|
||||
agent_goals=agent_goals,
|
||||
abilities=abilities,
|
||||
)
|
||||
|
||||
async def determine_next_ability(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
):
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["next_ability"],
|
||||
task=task,
|
||||
ability_schema=ability_schema,
|
||||
)
|
||||
|
||||
async def chat_with_model(
|
||||
self,
|
||||
prompt_strategy: PromptStrategy,
|
||||
**kwargs,
|
||||
) -> LanguageModelResponse:
|
||||
model_classification = prompt_strategy.model_classification
|
||||
model_configuration = self._configuration.models[model_classification].dict()
|
||||
self._logger.debug(f"Using model configuration: {model_configuration}")
|
||||
del model_configuration["provider_name"]
|
||||
provider = self._providers[model_classification]
|
||||
|
||||
template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy)
|
||||
template_kwargs.update(kwargs)
|
||||
prompt = prompt_strategy.build_prompt(**template_kwargs)
|
||||
|
||||
self._logger.debug(f"Using prompt:\n{prompt}\n\n")
|
||||
response = await provider.create_language_completion(
|
||||
model_prompt=prompt.messages,
|
||||
functions=prompt.functions,
|
||||
**model_configuration,
|
||||
completion_parser=prompt_strategy.parse_response_content,
|
||||
)
|
||||
return LanguageModelResponse.parse_obj(response.dict())
|
||||
|
||||
def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy):
|
||||
provider = self._providers[strategy.model_classification]
|
||||
template_kwargs = {
|
||||
"os_info": get_os_info(),
|
||||
"api_budget": provider.get_remaining_budget(),
|
||||
"current_time": time.strftime("%c"),
|
||||
}
|
||||
return template_kwargs
|
||||
|
||||
|
||||
def get_os_info() -> str:
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return os_info
|
||||
12
autogpt/core/planning/strategies/__init__.py
Normal file
12
autogpt/core/planning/strategies/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from autogpt.core.planning.strategies.initial_plan import (
|
||||
InitialPlan,
|
||||
InitialPlanConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.name_and_goals import (
|
||||
NameAndGoals,
|
||||
NameAndGoalsConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.next_ability import (
|
||||
NextAbility,
|
||||
NextAbilityConfiguration,
|
||||
)
|
||||
190
autogpt/core/planning/strategies/initial_plan.py
Normal file
190
autogpt/core/planning/strategies/initial_plan.py
Normal file
@@ -0,0 +1,190 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class InitialPlanConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_plan_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class InitialPlan(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = (
|
||||
"You are an expert project planner. You're responsibility is to create work plans for autonomous agents. "
|
||||
"You will be given a name, a role, set of goals for the agent to accomplish. Your job is to "
|
||||
"break down those goals into a set of tasks that the agent can accomplish to achieve those goals. "
|
||||
"Agents are resourceful, but require clear instructions. Each task you create should have clearly defined "
|
||||
"`ready_criteria` that the agent can check to see if the task is ready to be started. Each task should "
|
||||
"also have clearly defined `acceptance_criteria` that the agent can check to evaluate if the task is complete. "
|
||||
"You should create as many tasks as you think is necessary to accomplish the goals.\n\n"
|
||||
"System Info:\n{system_info}"
|
||||
)
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}"
|
||||
)
|
||||
|
||||
DEFAULT_CREATE_PLAN_FUNCTION = {
|
||||
"name": "create_initial_agent_plan",
|
||||
"description": "Creates a set of tasks that forms the initial plan for an autonomous agent.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"task_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"objective": {
|
||||
"type": "string",
|
||||
"description": "An imperative verb phrase that succinctly describes the task.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "A categorization for the task. ",
|
||||
"enum": [t.value for t in TaskType],
|
||||
},
|
||||
"acceptance_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met for the task to be considered complete.",
|
||||
},
|
||||
},
|
||||
"priority": {
|
||||
"type": "integer",
|
||||
"description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.",
|
||||
"minimum": 1,
|
||||
"maximum": 10,
|
||||
},
|
||||
"ready_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met before the task can be started.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"objective",
|
||||
"type",
|
||||
"acceptance_criteria",
|
||||
"priority",
|
||||
"ready_criteria",
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = InitialPlanConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
create_plan_function: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_plan_function = create_plan_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"agent_name": agent_name,
|
||||
"agent_role": agent_role,
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
template_kwargs["agent_goals"] = to_numbered_list(
|
||||
agent_goals, **template_kwargs
|
||||
)
|
||||
template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs)
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info, **template_kwargs
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
create_plan_function = LanguageModelFunction(
|
||||
json_schema=self._create_plan_function,
|
||||
)
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=[create_plan_function],
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response["task_list"] = [
|
||||
Task.parse_obj(task) for task in parsed_response["task_list"]
|
||||
]
|
||||
return parsed_response
|
||||
139
autogpt/core/planning/strategies/name_and_goals.py
Normal file
139
autogpt/core/planning/strategies/name_and_goals.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NameAndGoalsConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt: str = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_agent_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NameAndGoals(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT = (
|
||||
"Your job is to respond to a user-defined task by invoking the `create_agent` function "
|
||||
"to generate an autonomous agent to complete the task. You should supply a role-based "
|
||||
"name for the agent, an informative description for what the agent does, and 1 to 5 "
|
||||
"goals that are optimally aligned with the successful completion of its assigned task.\n\n"
|
||||
"Example Input:\n"
|
||||
"Help me with marketing my business\n\n"
|
||||
"Example Function Call:\n"
|
||||
"create_agent(name='CMOGPT', "
|
||||
"description='A professional digital marketer AI that assists Solopreneurs in "
|
||||
"growing their businesses by providing world-class expertise in solving "
|
||||
"marketing problems for SaaS, content products, agencies, and more.', "
|
||||
"goals=['Engage in effective problem-solving, prioritization, planning, and "
|
||||
"supporting execution to address your marketing needs as your virtual Chief "
|
||||
"Marketing Officer.', 'Provide specific, actionable, and concise advice to "
|
||||
"help you make informed decisions without the use of platitudes or overly "
|
||||
"wordy explanations.', 'Identify and prioritize quick wins and cost-effective "
|
||||
"campaigns that maximize results with minimal time and budget investment.', "
|
||||
"'Proactively take the lead in guiding you and offering suggestions when faced "
|
||||
"with unclear information or uncertainty to ensure your marketing strategy "
|
||||
"remains on track.'])"
|
||||
)
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = "'{user_objective}'"
|
||||
|
||||
DEFAULT_CREATE_AGENT_FUNCTION = {
|
||||
"name": "create_agent",
|
||||
"description": ("Create a new autonomous AI agent to complete a given task."),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "A short role-based name for an autonomous agent.",
|
||||
},
|
||||
"agent_role": {
|
||||
"type": "string",
|
||||
"description": "An informative one sentence description of what the AI agent does",
|
||||
},
|
||||
"agent_goals": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"maxItems": 5,
|
||||
"items": {
|
||||
"type": "string",
|
||||
},
|
||||
"description": (
|
||||
"One to five highly effective goals that are optimally aligned with the completion of a "
|
||||
"specific task. The number and complexity of the goals should correspond to the "
|
||||
"complexity of the agent's primary objective."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["agent_name", "agent_role", "agent_goals"],
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NameAndGoalsConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: str,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = create_agent_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt:
|
||||
system_message = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_message,
|
||||
)
|
||||
user_message = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
),
|
||||
)
|
||||
create_agent_function = LanguageModelFunction(
|
||||
json_schema=self._create_agent_function,
|
||||
)
|
||||
prompt = LanguageModelPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[create_agent_function],
|
||||
# TODO
|
||||
tokens_used=0,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
return parsed_response
|
||||
183
autogpt/core/planning/strategies/next_ability.py
Normal file
183
autogpt/core/planning/strategies/next_ability.py
Normal file
@@ -0,0 +1,183 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NextAbilityConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
additional_ability_arguments: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NextAbility(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}"
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"Your current task is is {task_objective}.\n"
|
||||
"You have taken {cycle_count} actions on this task already. "
|
||||
"Here is the actions you have taken and their results:\n"
|
||||
"{action_history}\n\n"
|
||||
"Here is additional information that may be useful to you:\n"
|
||||
"{additional_info}\n\n"
|
||||
"Additionally, you should consider the following:\n"
|
||||
"{user_input}\n\n"
|
||||
"Your task of {task_objective} is complete when the following acceptance criteria have been met:\n"
|
||||
"{acceptance_criteria}\n\n"
|
||||
"Please choose one of the provided functions to accomplish this task. "
|
||||
"Some tasks may require multiple functions to accomplish. If that is the case, choose the function that "
|
||||
"you think is most appropriate for the current situation given your progress so far."
|
||||
)
|
||||
|
||||
DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = {
|
||||
"motivation": {
|
||||
"type": "string",
|
||||
"description": "Your justification for choosing choosing this function instead of a different one.",
|
||||
},
|
||||
"self_criticism": {
|
||||
"type": "string",
|
||||
"description": "Thoughtful self-criticism that explains why this function may not be the best choice.",
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.",
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NextAbilityConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
additional_ability_arguments: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._additional_ability_arguments = additional_ability_arguments
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
for ability in ability_schema:
|
||||
ability["parameters"]["properties"].update(
|
||||
self._additional_ability_arguments
|
||||
)
|
||||
ability["parameters"]["required"] += list(
|
||||
self._additional_ability_arguments.keys()
|
||||
)
|
||||
|
||||
template_kwargs["task_objective"] = task.objective
|
||||
template_kwargs["cycle_count"] = task.context.cycle_count
|
||||
template_kwargs["action_history"] = to_numbered_list(
|
||||
[action.summary() for action in task.context.prior_actions],
|
||||
no_items_response="You have not taken any actions yet.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["additional_info"] = to_numbered_list(
|
||||
[memory.summary() for memory in task.context.memories]
|
||||
+ [info for info in task.context.supplementary_info],
|
||||
no_items_response="There is no additional information available at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["user_input"] = to_numbered_list(
|
||||
[user_input for user_input in task.context.user_input],
|
||||
no_items_response="There are no additional considerations at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["acceptance_criteria"] = to_numbered_list(
|
||||
[acceptance_criteria for acceptance_criteria in task.acceptance_criteria],
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info,
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
functions = [
|
||||
LanguageModelFunction(json_schema=ability) for ability in ability_schema
|
||||
]
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=functions,
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
function_name = response_content["function_call"]["name"]
|
||||
function_arguments = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response = {
|
||||
"motivation": function_arguments.pop("motivation"),
|
||||
"self_criticism": function_arguments.pop("self_criticism"),
|
||||
"reasoning": function_arguments.pop("reasoning"),
|
||||
"next_ability": function_name,
|
||||
"ability_arguments": function_arguments,
|
||||
}
|
||||
return parsed_response
|
||||
27
autogpt/core/planning/strategies/utils.py
Normal file
27
autogpt/core/planning/strategies/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import ast
|
||||
import json
|
||||
|
||||
|
||||
def to_numbered_list(
|
||||
items: list[str], no_items_response: str = "", **template_args
|
||||
) -> str:
|
||||
if items:
|
||||
return "\n".join(
|
||||
f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items)
|
||||
)
|
||||
else:
|
||||
return no_items_response
|
||||
|
||||
|
||||
def json_loads(json_str: str):
|
||||
# TODO: this is a hack function for now. Trying to see what errors show up in testing.
|
||||
# Can hopefully just replace with a call to ast.literal_eval (the function api still
|
||||
# sometimes returns json strings with minor issues like trailing commas).
|
||||
try:
|
||||
return ast.literal_eval(json_str)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
try:
|
||||
print(f"json decode error {e}. trying literal eval")
|
||||
return ast.literal_eval(json_str)
|
||||
except Exception:
|
||||
breakpoint()
|
||||
102
autogpt/core/planning/templates.py
Normal file
102
autogpt/core/planning/templates.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# Rules of thumb:
|
||||
# - Templates don't add new lines at the end of the string. This is the
|
||||
# responsibility of the or a consuming template.
|
||||
|
||||
####################
|
||||
# Planner defaults #
|
||||
####################
|
||||
|
||||
|
||||
USER_OBJECTIVE = (
|
||||
"Write a wikipedia style article about the project: "
|
||||
"https://github.com/significant-gravitas/Auto-GPT"
|
||||
)
|
||||
|
||||
|
||||
ABILITIES = (
|
||||
'analyze_code: Analyze Code, args: "code": "<full_code_string>"',
|
||||
'execute_python_file: Execute Python File, args: "filename": "<filename>"',
|
||||
'append_to_file: Append to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'delete_file: Delete file, args: "filename": "<filename>"',
|
||||
'list_files: List Files in Directory, args: "directory": "<directory>"',
|
||||
'read_file: Read a file, args: "filename": "<filename>"',
|
||||
'write_to_file: Write to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'google: Google Search, args: "query": "<query>"',
|
||||
'improve_code: Get Improved Code, args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
'browse_website: Browse Website, args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
'write_tests: Write Tests, args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
'get_hyperlinks: Get hyperlinks, args: "url": "<url>"',
|
||||
'get_text_summary: Get text summary, args: "url": "<url>", "question": "<question>"',
|
||||
'task_complete: Task Complete (Shutdown), args: "reason": "<reason>"',
|
||||
)
|
||||
|
||||
|
||||
# Plan Prompt
|
||||
# -----------
|
||||
|
||||
|
||||
PLAN_PROMPT_CONSTRAINTS = (
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so "
|
||||
"immediately save important information to files.",
|
||||
"If you are unsure how you previously did something or want to recall past "
|
||||
"events, thinking about similar events will help you remember.",
|
||||
"No user assistance",
|
||||
"Exclusively use the commands listed below e.g. command_name",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_RESOURCES = (
|
||||
"Internet access for searches and information gathering.",
|
||||
"Long-term memory management.",
|
||||
"File output.",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_PERFORMANCE_EVALUATIONS = (
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities.",
|
||||
"Constructively self-criticize your big-picture behavior constantly.",
|
||||
"Reflect on past decisions and strategies to refine your approach.",
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps.",
|
||||
"Write all code to a file",
|
||||
)
|
||||
|
||||
|
||||
PLAN_PROMPT_RESPONSE_DICT = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user",
|
||||
},
|
||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||
}
|
||||
|
||||
PLAN_PROMPT_RESPONSE_FORMAT = (
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n"
|
||||
"{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
PLAN_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
|
||||
PLAN_PROMPT_MAIN = (
|
||||
"{header}\n\n"
|
||||
"GOALS:\n\n{goals}\n\n"
|
||||
"Info:\n{info}\n\n"
|
||||
"Constraints:\n{constraints}\n\n"
|
||||
"Commands:\n{commands}\n\n"
|
||||
"Resources:\n{resources}\n\n"
|
||||
"Performance Evaluations:\n{performance_evaluations}\n\n"
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
|
||||
###########################
|
||||
# Parameterized templates #
|
||||
###########################
|
||||
2
autogpt/core/plugin/__init__.py
Normal file
2
autogpt/core/plugin/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
"""The plugin system allows the Agent to be extended with new functionality."""
|
||||
from autogpt.core.plugin.base import PluginService
|
||||
155
autogpt/core/plugin/base.py
Normal file
155
autogpt/core/plugin/base.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import TYPE_CHECKING, Type
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.ability import Ability, AbilityRegistry
|
||||
from autogpt.core.memory import Memory
|
||||
from autogpt.core.resource.model_providers import (
|
||||
EmbeddingModelProvider,
|
||||
LanguageModelProvider,
|
||||
)
|
||||
|
||||
# Expand to other types as needed
|
||||
PluginType = (
|
||||
Type[Ability] # Swappable now
|
||||
| Type[AbilityRegistry] # Swappable maybe never
|
||||
| Type[LanguageModelProvider] # Swappable soon
|
||||
| Type[EmbeddingModelProvider] # Swappable soon
|
||||
| Type[Memory] # Swappable now
|
||||
# | Type[Planner] # Swappable soon
|
||||
)
|
||||
|
||||
|
||||
class PluginStorageFormat(str, enum.Enum):
|
||||
"""Supported plugin storage formats.
|
||||
|
||||
Plugins can be stored at one of these supported locations.
|
||||
|
||||
"""
|
||||
|
||||
INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults
|
||||
WORKSPACE = "workspace" # Required now
|
||||
# OPENAPI_URL = "open_api_url" # Soon (requires some tooling we don't have yet).
|
||||
# OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now)
|
||||
# GIT = "git" # Maybe later (or soon)
|
||||
# PYPI = "pypi" # Maybe later
|
||||
# AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service" # Long term solution, requires design
|
||||
# AUTO = "auto" # Feature for later maybe, automatically find plugin.
|
||||
|
||||
|
||||
# Installed package example
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
# Workspace example
|
||||
# PluginLocation(
|
||||
# storage_format='workspace',
|
||||
# storage_route='relative/path/to/plugin.pkl'
|
||||
# OR
|
||||
# storage_route='relative/path/to/plugin.py'
|
||||
# )
|
||||
# Git
|
||||
# PluginLocation(
|
||||
# storage_format='git',
|
||||
# Exact format TBD.
|
||||
# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py'
|
||||
# )
|
||||
# PyPI
|
||||
# PluginLocation(
|
||||
# storage_format='pypi',
|
||||
# storage_route='package_name'
|
||||
# )
|
||||
|
||||
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
|
||||
|
||||
# A plugin storage route.
|
||||
#
|
||||
# This is a string that specifies where to load a plugin from
|
||||
# (e.g. an import path or file path).
|
||||
PluginStorageRoute = str
|
||||
|
||||
|
||||
class PluginLocation(SystemConfiguration):
|
||||
"""A plugin location.
|
||||
|
||||
This is a combination of a plugin storage format and a plugin storage route.
|
||||
It is used by the PluginService to load plugins.
|
||||
|
||||
"""
|
||||
|
||||
storage_format: PluginStorageFormat = UserConfigurable()
|
||||
storage_route: PluginStorageRoute = UserConfigurable()
|
||||
|
||||
|
||||
class PluginMetadata(BaseModel):
|
||||
"""Metadata about a plugin."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
location: PluginLocation
|
||||
|
||||
|
||||
class PluginService(abc.ABC):
|
||||
"""Base class for plugin service.
|
||||
|
||||
The plugin service should be stateless. This defines the interface for
|
||||
loading plugins from various storage formats.
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def get_plugin(plugin_location: PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
...
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
...
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an installed package."""
|
||||
...
|
||||
74
autogpt/core/plugin/simple.py
Normal file
74
autogpt/core/plugin/simple.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.core.plugin.base import (
|
||||
PluginLocation,
|
||||
PluginService,
|
||||
PluginStorageFormat,
|
||||
PluginStorageRoute,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.plugin.base import PluginType
|
||||
|
||||
|
||||
class SimplePluginService(PluginService):
|
||||
@staticmethod
|
||||
def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
if isinstance(plugin_location, dict):
|
||||
plugin_location = PluginLocation.parse_obj(plugin_location)
|
||||
if plugin_location.storage_format == PluginStorageFormat.WORKSPACE:
|
||||
return SimplePluginService.load_from_workspace(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE:
|
||||
return SimplePluginService.load_from_installed_package(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Plugin storage format {plugin_location.storage_format} is not implemented."
|
||||
)
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
# TODO: Define an on disk storage format and implement this.
|
||||
# Can pull from existing zip file loading implementation
|
||||
raise NotImplemented("Loading from file path is not implemented.")
|
||||
|
||||
@staticmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
module_path, _, class_name = plugin_route.rpartition(".")
|
||||
return getattr(import_module(module_path), class_name)
|
||||
|
||||
@staticmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
# TODO: Implement a discovery system for finding plugins by name from known
|
||||
# storage locations. E.g. if we know that path_type is a file path, we can
|
||||
# search the workspace for it. If it's an import path, we can check the core
|
||||
# system and the auto_gpt_plugins package.
|
||||
raise NotImplemented("Resolving plugin name to path is not implemented.")
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
plugin = SimplePluginService.load_from_file_path(plugin_route)
|
||||
return plugin
|
||||
|
||||
@staticmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
plugin = SimplePluginService.load_from_import_path(plugin_route)
|
||||
return plugin
|
||||
7
autogpt/core/resource/__init__.py
Normal file
7
autogpt/core/resource/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from autogpt.core.resource.schema import (
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
44
autogpt/core/resource/model_providers/__init__.py
Normal file
44
autogpt/core/resource/model_providers/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from autogpt.core.resource.model_providers.openai import (
|
||||
OPEN_AI_MODELS,
|
||||
OpenAIModelName,
|
||||
OpenAIProvider,
|
||||
OpenAISettings,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
MessageRole,
|
||||
ModelProvider,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderModelInfo,
|
||||
ModelProviderModelResponse,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ModelProvider",
|
||||
"ModelProviderName",
|
||||
"ModelProviderSettings",
|
||||
"EmbeddingModelProvider",
|
||||
"EmbeddingModelProviderModelResponse",
|
||||
"LanguageModelProvider",
|
||||
"LanguageModelProviderModelResponse",
|
||||
"LanguageModelFunction",
|
||||
"LanguageModelMessage",
|
||||
"MessageRole",
|
||||
"OpenAIModelName",
|
||||
"OPEN_AI_MODELS",
|
||||
"OpenAIProvider",
|
||||
"OpenAISettings",
|
||||
]
|
||||
373
autogpt/core/resource/model_providers/openai.py
Normal file
373
autogpt/core/resource/model_providers/openai.py
Normal file
@@ -0,0 +1,373 @@
|
||||
import enum
|
||||
import functools
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
OpenAIEmbeddingParser = Callable[[Embedding], Embedding]
|
||||
OpenAIChatParser = Callable[[str], dict]
|
||||
|
||||
|
||||
class OpenAIModelName(str, enum.Enum):
|
||||
ADA = "text-embedding-ada-002"
|
||||
GPT3 = "gpt-3.5-turbo-0613"
|
||||
GPT3_16K = "gpt-3.5-turbo-16k-0613"
|
||||
GPT4 = "gpt-4-0613"
|
||||
GPT4_32K = "gpt-4-32k-0613"
|
||||
|
||||
|
||||
OPEN_AI_EMBEDDING_MODELS = {
|
||||
OpenAIModelName.ADA: EmbeddingModelProviderModelInfo(
|
||||
name=OpenAIModelName.ADA,
|
||||
service=ModelProviderService.EMBEDDING,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_LANGUAGE_MODELS = {
|
||||
OpenAIModelName.GPT3: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0015,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
OpenAIModelName.GPT3_16K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.003,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=16384,
|
||||
),
|
||||
OpenAIModelName.GPT4: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
OpenAIModelName.GPT4_32K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4_32K,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_MODELS = {
|
||||
**OPEN_AI_LANGUAGE_MODELS,
|
||||
**OPEN_AI_EMBEDDING_MODELS,
|
||||
}
|
||||
|
||||
|
||||
class OpenAIConfiguration(SystemConfiguration):
|
||||
retries_per_request: int = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAIModelProviderBudget(ModelProviderBudget):
|
||||
graceful_shutdown_threshold: float = UserConfigurable()
|
||||
warning_threshold: float = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAISettings(ModelProviderSettings):
|
||||
configuration: OpenAIConfiguration
|
||||
credentials: ModelProviderCredentials()
|
||||
budget: OpenAIModelProviderBudget
|
||||
|
||||
|
||||
class OpenAIProvider(
|
||||
Configurable,
|
||||
LanguageModelProvider,
|
||||
EmbeddingModelProvider,
|
||||
):
|
||||
default_settings = OpenAISettings(
|
||||
name="openai_provider",
|
||||
description="Provides access to OpenAI's API.",
|
||||
configuration=OpenAIConfiguration(
|
||||
retries_per_request=10,
|
||||
),
|
||||
credentials=ModelProviderCredentials(),
|
||||
budget=OpenAIModelProviderBudget(
|
||||
total_budget=math.inf,
|
||||
total_cost=0.0,
|
||||
remaining_budget=math.inf,
|
||||
usage=ModelProviderUsage(
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
total_tokens=0,
|
||||
),
|
||||
graceful_shutdown_threshold=0.005,
|
||||
warning_threshold=0.01,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: OpenAISettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._credentials = settings.credentials
|
||||
self._budget = settings.budget
|
||||
|
||||
self._logger = logger
|
||||
|
||||
retry_handler = _OpenAIRetryHandler(
|
||||
logger=self._logger,
|
||||
num_retries=self._configuration.retries_per_request,
|
||||
)
|
||||
|
||||
self._create_completion = retry_handler(_create_completion)
|
||||
self._create_embedding = retry_handler(_create_embedding)
|
||||
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
"""Get the token limit for a given model."""
|
||||
return OPEN_AI_MODELS[model_name].max_tokens
|
||||
|
||||
def get_remaining_budget(self) -> float:
|
||||
"""Get the remaining budget."""
|
||||
return self._budget.remaining_budget
|
||||
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: OpenAIModelName,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
"""Create a completion using the OpenAI API."""
|
||||
completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
|
||||
response = await self._create_completion(
|
||||
messages=model_prompt,
|
||||
**completion_kwargs,
|
||||
)
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_LANGUAGE_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
|
||||
parsed_response = completion_parser(
|
||||
response.choices[0].message.to_dict_recursive()
|
||||
)
|
||||
response = LanguageModelProviderModelResponse(
|
||||
content=parsed_response, **response_args
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: OpenAIModelName,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
"""Create an embedding using the OpenAI API."""
|
||||
embedding_kwargs = self._get_embedding_kwargs(model_name, **kwargs)
|
||||
response = await self._create_embedding(text=text, **embedding_kwargs)
|
||||
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_EMBEDDING_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
response = EmbeddingModelProviderModelResponse(
|
||||
**response_args,
|
||||
embedding=embedding_parser(response.embeddings[0]),
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
def _get_completion_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
functions: list[LanguageModelFunction],
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for completion API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the chat API call.
|
||||
|
||||
"""
|
||||
completion_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
if functions:
|
||||
completion_kwargs["functions"] = functions
|
||||
|
||||
return completion_kwargs
|
||||
|
||||
def _get_embedding_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for embedding API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the embedding API call.
|
||||
|
||||
"""
|
||||
embedding_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
|
||||
return embedding_kwargs
|
||||
|
||||
def __repr__(self):
|
||||
return "OpenAIProvider()"
|
||||
|
||||
|
||||
async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding:
|
||||
"""Embed text using the OpenAI API.
|
||||
|
||||
Args:
|
||||
text str: The text to embed.
|
||||
model_name str: The name of the model to use.
|
||||
|
||||
Returns:
|
||||
str: The embedding.
|
||||
"""
|
||||
return await openai.Embedding.acreate(
|
||||
input=[text],
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
async def _create_completion(
|
||||
messages: list[LanguageModelMessage], *_, **kwargs
|
||||
) -> openai.Completion:
|
||||
"""Create a chat completion using the OpenAI API.
|
||||
|
||||
Args:
|
||||
messages: The prompt to use.
|
||||
|
||||
Returns:
|
||||
The completion.
|
||||
|
||||
"""
|
||||
messages = [message.dict() for message in messages]
|
||||
if "functions" in kwargs:
|
||||
kwargs["functions"] = [function.json_schema for function in kwargs["functions"]]
|
||||
return await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_P = ParamSpec("_P")
|
||||
|
||||
|
||||
class _OpenAIRetryHandler:
|
||||
"""Retry Handler for OpenAI API call.
|
||||
|
||||
Args:
|
||||
num_retries int: Number of retries. Defaults to 10.
|
||||
backoff_base float: Base for exponential backoff. Defaults to 2.
|
||||
warn_user bool: Whether to warn the user. Defaults to True.
|
||||
"""
|
||||
|
||||
_retry_limit_msg = "Error: Reached rate limit, passing..."
|
||||
_api_key_error_msg = (
|
||||
"Please double check that you have setup a PAID OpenAI API Account. You can "
|
||||
"read more here: https://docs.agpt.co/setup/#getting-an-api-key"
|
||||
)
|
||||
_backoff_msg = "Error: API Bad gateway. Waiting {backoff} seconds..."
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
num_retries: int = 10,
|
||||
backoff_base: float = 2.0,
|
||||
warn_user: bool = True,
|
||||
):
|
||||
self._logger = logger
|
||||
self._num_retries = num_retries
|
||||
self._backoff_base = backoff_base
|
||||
self._warn_user = warn_user
|
||||
|
||||
def _log_rate_limit_error(self) -> None:
|
||||
self._logger.debug(self._retry_limit_msg)
|
||||
if self._warn_user:
|
||||
self._logger.warning(self._api_key_error_msg)
|
||||
self._warn_user = False
|
||||
|
||||
def _backoff(self, attempt: int) -> None:
|
||||
backoff = self._backoff_base ** (attempt + 2)
|
||||
self._logger.debug(self._backoff_msg.format(backoff=backoff))
|
||||
time.sleep(backoff)
|
||||
|
||||
def __call__(self, func: Callable[_P, _T]) -> Callable[_P, _T]:
|
||||
@functools.wraps(func)
|
||||
async def _wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T:
|
||||
num_attempts = self._num_retries + 1 # +1 for the first attempt
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
try:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
except RateLimitError:
|
||||
if attempt == num_attempts:
|
||||
raise
|
||||
self._log_rate_limit_error()
|
||||
|
||||
except APIError as e:
|
||||
if (e.http_status != 502) or (attempt == num_attempts):
|
||||
raise
|
||||
|
||||
self._backoff(attempt)
|
||||
|
||||
return _wrapped
|
||||
219
autogpt/core/resource/model_providers/schema.py
Normal file
219
autogpt/core/resource/model_providers/schema.py
Normal file
@@ -0,0 +1,219 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import Callable, ClassVar
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, validator
|
||||
|
||||
from autogpt.core.configuration import UserConfigurable
|
||||
from autogpt.core.resource.schema import (
|
||||
Embedding,
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderService(str, enum.Enum):
|
||||
"""A ModelService describes what kind of service the model provides."""
|
||||
|
||||
EMBEDDING: str = "embedding"
|
||||
LANGUAGE: str = "language"
|
||||
TEXT: str = "text"
|
||||
|
||||
|
||||
class ModelProviderName(str, enum.Enum):
|
||||
OPENAI: str = "openai"
|
||||
|
||||
|
||||
class MessageRole(str, enum.Enum):
|
||||
USER = "user"
|
||||
SYSTEM = "system"
|
||||
ASSISTANT = "assistant"
|
||||
|
||||
|
||||
class LanguageModelMessage(BaseModel):
|
||||
role: MessageRole
|
||||
content: str
|
||||
|
||||
|
||||
class LanguageModelFunction(BaseModel):
|
||||
json_schema: dict
|
||||
|
||||
|
||||
class ModelProviderModelInfo(BaseModel):
|
||||
"""Struct for model information.
|
||||
|
||||
Would be lovely to eventually get this directly from APIs, but needs to be
|
||||
scraped from websites for now.
|
||||
|
||||
"""
|
||||
|
||||
name: str
|
||||
service: ModelProviderService
|
||||
provider_name: ModelProviderName
|
||||
prompt_token_cost: float = 0.0
|
||||
completion_token_cost: float = 0.0
|
||||
|
||||
|
||||
class ModelProviderModelResponse(BaseModel):
|
||||
"""Standard response struct for a response from a model."""
|
||||
|
||||
prompt_tokens_used: int
|
||||
completion_tokens_used: int
|
||||
model_info: ModelProviderModelInfo
|
||||
|
||||
|
||||
class ModelProviderCredentials(ProviderCredentials):
|
||||
"""Credentials for a model provider."""
|
||||
|
||||
api_key: SecretStr | None = UserConfigurable(default=None)
|
||||
api_type: SecretStr | None = UserConfigurable(default=None)
|
||||
api_base: SecretStr | None = UserConfigurable(default=None)
|
||||
api_version: SecretStr | None = UserConfigurable(default=None)
|
||||
deployment_id: SecretStr | None = UserConfigurable(default=None)
|
||||
|
||||
def unmasked(self) -> dict:
|
||||
return unmask(self)
|
||||
|
||||
class Config:
|
||||
extra = "ignore"
|
||||
|
||||
|
||||
def unmask(model: BaseModel):
|
||||
unmasked_fields = {}
|
||||
for field_name, field in model.__fields__.items():
|
||||
value = getattr(model, field_name)
|
||||
if isinstance(value, SecretStr):
|
||||
unmasked_fields[field_name] = value.get_secret_value()
|
||||
else:
|
||||
unmasked_fields[field_name] = value
|
||||
return unmasked_fields
|
||||
|
||||
|
||||
class ModelProviderUsage(ProviderUsage):
|
||||
"""Usage for a particular model from a model provider."""
|
||||
|
||||
completion_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
|
||||
def update_usage(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
self.completion_tokens += model_response.completion_tokens_used
|
||||
self.prompt_tokens += model_response.prompt_tokens_used
|
||||
self.total_tokens += (
|
||||
model_response.completion_tokens_used + model_response.prompt_tokens_used
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderBudget(ProviderBudget):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ModelProviderUsage
|
||||
|
||||
def update_usage_and_cost(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
"""Update the usage and cost of the provider."""
|
||||
model_info = model_response.model_info
|
||||
self.usage.update_usage(model_response)
|
||||
incremental_cost = (
|
||||
model_response.completion_tokens_used * model_info.completion_token_cost
|
||||
+ model_response.prompt_tokens_used * model_info.prompt_token_cost
|
||||
) / 1000.0
|
||||
self.total_cost += incremental_cost
|
||||
self.remaining_budget -= incremental_cost
|
||||
|
||||
|
||||
class ModelProviderSettings(ProviderSettings):
|
||||
resource_type = ResourceType.MODEL
|
||||
credentials: ModelProviderCredentials
|
||||
budget: ModelProviderBudget
|
||||
|
||||
|
||||
class ModelProvider(abc.ABC):
|
||||
"""A ModelProvider abstracts the details of a particular provider of models."""
|
||||
|
||||
defaults: ClassVar[ModelProviderSettings]
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_remaining_budget(self) -> float:
|
||||
...
|
||||
|
||||
|
||||
####################
|
||||
# Embedding Models #
|
||||
####################
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for embedding model information."""
|
||||
|
||||
model_service = ModelProviderService.EMBEDDING
|
||||
embedding_dimensions: int
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from an embedding model."""
|
||||
|
||||
embedding: Embedding = Field(default_factory=list)
|
||||
|
||||
@classmethod
|
||||
@validator("completion_tokens_used")
|
||||
def _verify_no_completion_tokens_used(cls, v):
|
||||
if v > 0:
|
||||
raise ValueError("Embeddings should not have completion tokens used.")
|
||||
return v
|
||||
|
||||
|
||||
class EmbeddingModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: str,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
...
|
||||
|
||||
|
||||
###################
|
||||
# Language Models #
|
||||
###################
|
||||
|
||||
|
||||
class LanguageModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for language model information."""
|
||||
|
||||
model_service = ModelProviderService.LANGUAGE
|
||||
max_tokens: int
|
||||
|
||||
|
||||
class LanguageModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
content: dict = None
|
||||
|
||||
|
||||
class LanguageModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: str,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
...
|
||||
57
autogpt/core/resource/schema.py
Normal file
57
autogpt/core/resource/schema.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import abc
|
||||
import enum
|
||||
|
||||
from pydantic import SecretBytes, SecretField, SecretStr
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
|
||||
|
||||
class ResourceType(str, enum.Enum):
|
||||
"""An enumeration of resource types."""
|
||||
|
||||
MODEL = "model"
|
||||
MEMORY = "memory"
|
||||
|
||||
|
||||
class ProviderUsage(SystemConfiguration, abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def update_usage(self, *args, **kwargs) -> None:
|
||||
"""Update the usage of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderBudget(SystemConfiguration):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ProviderUsage
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_usage_and_cost(self, *args, **kwargs) -> None:
|
||||
"""Update the usage and cost of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderCredentials(SystemConfiguration):
|
||||
"""Struct for credentials."""
|
||||
|
||||
class Config:
|
||||
json_encoders = {
|
||||
SecretStr: lambda v: v.get_secret_value() if v else None,
|
||||
SecretBytes: lambda v: v.get_secret_value() if v else None,
|
||||
SecretField: lambda v: v.get_secret_value() if v else None,
|
||||
}
|
||||
|
||||
|
||||
class ProviderSettings(SystemSettings):
|
||||
resource_type: ResourceType
|
||||
credentials: ProviderCredentials | None = None
|
||||
budget: ProviderBudget | None = None
|
||||
|
||||
|
||||
# Used both by model providers and memory providers
|
||||
Embedding = list[float]
|
||||
3
autogpt/core/runner/__init__.py
Normal file
3
autogpt/core/runner/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
This module contains the runner for the v2 agent server and client.
|
||||
"""
|
||||
0
autogpt/core/runner/cli_app/__init__.py
Normal file
0
autogpt/core/runner/cli_app/__init__.py
Normal file
47
autogpt/core/runner/cli_app/cli.py
Normal file
47
autogpt/core/runner/cli_app/cli.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
import yaml
|
||||
|
||||
from autogpt.core.runner.cli_app.main import run_auto_gpt
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions
|
||||
|
||||
|
||||
@click.group()
|
||||
def autogpt():
|
||||
"""Temporary command group for v2 commands."""
|
||||
pass
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
@click.option(
|
||||
"--pdb",
|
||||
is_flag=True,
|
||||
help="Drop into a debugger if an error is raised.",
|
||||
)
|
||||
@coroutine
|
||||
async def run(settings_file: str, pdb: bool) -> None:
|
||||
"""Run the Auto-GPT agent."""
|
||||
click.echo("Running Auto-GPT agent...")
|
||||
settings_file = Path(settings_file)
|
||||
settings = {}
|
||||
if settings_file.exists():
|
||||
settings = yaml.safe_load(settings_file.read_text())
|
||||
main = handle_exceptions(run_auto_gpt, with_debugger=pdb)
|
||||
await main(settings)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt()
|
||||
110
autogpt/core/runner/cli_app/main.py
Normal file
110
autogpt/core/runner/cli_app/main.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import click
|
||||
|
||||
from autogpt.core.agent import AgentSettings, SimpleAgent
|
||||
from autogpt.core.runner.client_lib.logging import get_client_logger
|
||||
|
||||
|
||||
async def run_auto_gpt(user_configuration: dict):
|
||||
"""Run the Auto-GPT CLI client."""
|
||||
|
||||
client_logger = get_client_logger()
|
||||
client_logger.debug("Getting agent settings")
|
||||
|
||||
agent_workspace = (
|
||||
user_configuration.get("workspace", {}).get("configuration", {}).get("root", "")
|
||||
)
|
||||
|
||||
if not agent_workspace: # We don't have an agent yet.
|
||||
#################
|
||||
# Bootstrapping #
|
||||
#################
|
||||
# Step 1. Collate the user's settings with the default system settings.
|
||||
agent_settings: AgentSettings = SimpleAgent.compile_settings(
|
||||
client_logger,
|
||||
user_configuration,
|
||||
)
|
||||
|
||||
# Step 2. Get a name and goals for the agent.
|
||||
# First we need to figure out what the user wants to do with the agent.
|
||||
# We'll do this by asking the user for a prompt.
|
||||
user_objective = click.prompt("What do you want Auto-GPT to do?")
|
||||
# Ask a language model to determine a name and goals for a suitable agent.
|
||||
name_and_goals = await SimpleAgent.determine_agent_name_and_goals(
|
||||
user_objective,
|
||||
agent_settings,
|
||||
client_logger,
|
||||
)
|
||||
print(parse_agent_name_and_goals(name_and_goals))
|
||||
# Finally, update the agent settings with the name and goals.
|
||||
agent_settings.update_agent_name_and_goals(name_and_goals)
|
||||
|
||||
# Step 3. Provision the agent.
|
||||
agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger)
|
||||
print("agent is provisioned")
|
||||
|
||||
# launch agent interaction loop
|
||||
agent = SimpleAgent.from_workspace(
|
||||
agent_workspace,
|
||||
client_logger,
|
||||
)
|
||||
print("agent is loaded")
|
||||
|
||||
plan = await agent.build_initial_plan()
|
||||
print(parse_agent_plan(plan))
|
||||
|
||||
while True:
|
||||
current_task, next_ability = await agent.determine_next_ability(plan)
|
||||
print(parse_next_ability(current_task, next_ability))
|
||||
user_input = click.prompt(
|
||||
"Should the agent proceed with this ability?",
|
||||
default="y",
|
||||
)
|
||||
ability_result = await agent.execute_next_ability(user_input)
|
||||
print(parse_ability_result(ability_result))
|
||||
|
||||
|
||||
def parse_agent_name_and_goals(name_and_goals: dict) -> str:
|
||||
parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n"
|
||||
parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n"
|
||||
parsed_response += "Agent Goals:\n"
|
||||
for i, goal in enumerate(name_and_goals["agent_goals"]):
|
||||
parsed_response += f"{i+1}. {goal}\n"
|
||||
return parsed_response
|
||||
|
||||
|
||||
def parse_agent_plan(plan: dict) -> str:
|
||||
parsed_response = f"Agent Plan:\n"
|
||||
for i, task in enumerate(plan["task_list"]):
|
||||
parsed_response += f"{i+1}. {task['objective']}\n"
|
||||
parsed_response += f"Task type: {task['type']} "
|
||||
parsed_response += f"Priority: {task['priority']}\n"
|
||||
parsed_response += f"Ready Criteria:\n"
|
||||
for j, criteria in enumerate(task["ready_criteria"]):
|
||||
parsed_response += f" {j+1}. {criteria}\n"
|
||||
parsed_response += f"Acceptance Criteria:\n"
|
||||
for j, criteria in enumerate(task["acceptance_criteria"]):
|
||||
parsed_response += f" {j+1}. {criteria}\n"
|
||||
parsed_response += "\n"
|
||||
|
||||
return parsed_response
|
||||
|
||||
|
||||
def parse_next_ability(current_task, next_ability: dict) -> str:
|
||||
parsed_response = f"Current Task: {current_task.objective}\n"
|
||||
ability_args = ", ".join(
|
||||
f"{k}={v}" for k, v in next_ability["ability_arguments"].items()
|
||||
)
|
||||
parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n"
|
||||
parsed_response += f"Motivation: {next_ability['motivation']}\n"
|
||||
parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n"
|
||||
parsed_response += f"Reasoning: {next_ability['reasoning']}\n"
|
||||
return parsed_response
|
||||
|
||||
|
||||
def parse_ability_result(ability_result) -> str:
|
||||
parsed_response = f"Ability: {ability_result['ability_name']}\n"
|
||||
parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n"
|
||||
parsed_response = f"Ability Result: {ability_result['success']}\n"
|
||||
parsed_response += f"Message: {ability_result['message']}\n"
|
||||
parsed_response += f"Data: {ability_result['new_knowledge']}\n"
|
||||
return parsed_response
|
||||
0
autogpt/core/runner/cli_web_app/__init__.py
Normal file
0
autogpt/core/runner/cli_web_app/__init__.py
Normal file
101
autogpt/core/runner/cli_web_app/cli.py
Normal file
101
autogpt/core/runner/cli_web_app/cli.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import contextlib
|
||||
import pathlib
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import click
|
||||
import requests
|
||||
import uvicorn
|
||||
import yaml
|
||||
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
status,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine
|
||||
|
||||
|
||||
@click.group()
|
||||
def autogpt():
|
||||
"""Temporary command group for v2 commands."""
|
||||
pass
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
autogpt.add_command(status)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"host",
|
||||
"--host",
|
||||
default="localhost",
|
||||
help="The host for the webserver.",
|
||||
type=click.STRING,
|
||||
)
|
||||
@click.option(
|
||||
"port",
|
||||
"--port",
|
||||
default=8080,
|
||||
help="The port of the webserver.",
|
||||
type=click.INT,
|
||||
)
|
||||
def server(host: str, port: int) -> None:
|
||||
"""Run the Auto-GPT runner httpserver."""
|
||||
click.echo("Running Auto-GPT runner httpserver...")
|
||||
uvicorn.run(
|
||||
"autogpt.core.runner.cli_web_app.server.api:app",
|
||||
workers=1,
|
||||
host=host,
|
||||
port=port,
|
||||
reload=True,
|
||||
)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
@coroutine
|
||||
async def client(settings_file) -> None:
|
||||
"""Run the Auto-GPT runner client."""
|
||||
settings_file = pathlib.Path(settings_file)
|
||||
settings = {}
|
||||
if settings_file.exists():
|
||||
settings = yaml.safe_load(settings_file.read_text())
|
||||
|
||||
from autogpt.core.runner.cli_web_app.client.client import run
|
||||
|
||||
with autogpt_server():
|
||||
run()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def autogpt_server():
|
||||
host = "localhost"
|
||||
port = 8080
|
||||
cmd = shlex.split(
|
||||
f"{sys.executable} autogpt/core/runner/cli_web_app/cli.py server --host {host} --port {port}"
|
||||
)
|
||||
server_process = subprocess.Popen(
|
||||
args=cmd,
|
||||
)
|
||||
started = False
|
||||
|
||||
while not started:
|
||||
try:
|
||||
requests.get(f"http://{host}:{port}")
|
||||
started = True
|
||||
except requests.exceptions.ConnectionError:
|
||||
time.sleep(0.2)
|
||||
yield server_process
|
||||
server_process.terminate()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt()
|
||||
0
autogpt/core/runner/cli_web_app/client/__init__.py
Normal file
0
autogpt/core/runner/cli_web_app/client/__init__.py
Normal file
16
autogpt/core/runner/cli_web_app/client/client.py
Normal file
16
autogpt/core/runner/cli_web_app/client/client.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def run():
|
||||
body = json.dumps(
|
||||
{"ai_name": "HelloBot", "ai_role": "test", "ai_goals": ["goal1", "goal2"]}
|
||||
)
|
||||
|
||||
header = {"Content-Type": "application/json", "openai_api_key": "asdf"}
|
||||
print("Sending: ", header, body)
|
||||
response = requests.post(
|
||||
"http://localhost:8080/api/v1/agents", data=body, headers=header
|
||||
)
|
||||
print(response.content.decode("utf-8"))
|
||||
0
autogpt/core/runner/cli_web_app/server/__init__.py
Normal file
0
autogpt/core/runner/cli_web_app/server/__init__.py
Normal file
48
autogpt/core/runner/cli_web_app/server/api.py
Normal file
48
autogpt/core/runner/cli_web_app/server/api.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import uuid
|
||||
|
||||
from fastapi import APIRouter, FastAPI, Request
|
||||
|
||||
from autogpt.core.runner.cli_web_app.server.schema import InteractRequestBody
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/agents")
|
||||
async def create_agent(request: Request):
|
||||
"""Create a new agent."""
|
||||
agent_id = uuid.uuid4().hex
|
||||
return {"agent_id": agent_id}
|
||||
|
||||
|
||||
@router.post("/agents/{agent_id}")
|
||||
async def interact(request: Request, agent_id: str, body: InteractRequestBody):
|
||||
"""Interact with an agent."""
|
||||
|
||||
# check headers
|
||||
|
||||
# check if agent_id exists
|
||||
|
||||
# get agent object from somewhere, e.g. a database/disk/global dict
|
||||
|
||||
# continue agent interaction with user input
|
||||
|
||||
return {
|
||||
"thoughts": {
|
||||
"thoughts": {
|
||||
"text": "text",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "plan",
|
||||
"criticism": "criticism",
|
||||
"speak": "speak",
|
||||
},
|
||||
"commands": {
|
||||
"name": "name",
|
||||
"args": {"arg_1": "value_1", "arg_2": "value_2"},
|
||||
},
|
||||
},
|
||||
"messages": ["message1", agent_id],
|
||||
}
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
app.include_router(router, prefix="/api/v1")
|
||||
36
autogpt/core/runner/cli_web_app/server/schema.py
Normal file
36
autogpt/core/runner/cli_web_app/server/schema.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
|
||||
class AgentInfo(BaseModel):
|
||||
id: UUID = None
|
||||
objective: str = ""
|
||||
name: str = ""
|
||||
role: str = ""
|
||||
goals: list[str] = []
|
||||
|
||||
|
||||
class AgentConfiguration(BaseModel):
|
||||
"""Configuration for creation of a new agent."""
|
||||
|
||||
# We'll want to get this schema from the configuration, so it needs to be dynamic.
|
||||
user_configuration: dict
|
||||
agent_goals: AgentInfo
|
||||
|
||||
@validator("agent_goals")
|
||||
def only_objective_or_name_role_goals(cls, agent_goals):
|
||||
goals_specification = [agent_goals.name, agent_goals.role, agent_goals.goals]
|
||||
if agent_goals.objective and any(goals_specification):
|
||||
raise ValueError("Cannot specify both objective and name, role, or goals")
|
||||
if not agent_goals.objective and not all(goals_specification):
|
||||
raise ValueError("Must specify either objective or name, role, and goals")
|
||||
|
||||
|
||||
class InteractRequestBody(BaseModel):
|
||||
user_input: str = ""
|
||||
|
||||
|
||||
class InteractResponseBody(BaseModel):
|
||||
thoughts: dict[str, str] # TBD
|
||||
messages: list[str] # for example
|
||||
20
autogpt/core/runner/cli_web_app/server/services/users.py
Normal file
20
autogpt/core/runner/cli_web_app/server/services/users.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import uuid
|
||||
|
||||
from fastapi import Request
|
||||
|
||||
|
||||
class UserService:
|
||||
def __init__(self):
|
||||
self.users = {}
|
||||
|
||||
def get_user_id(self, request: Request) -> uuid.UUID:
|
||||
# TODO: something real. I don't know how this works.
|
||||
hostname = request.client.host
|
||||
port = request.client.port
|
||||
user = f"{hostname}:{port}"
|
||||
if user not in self.users:
|
||||
self.users[user] = uuid.uuid4()
|
||||
return self.users[user]
|
||||
|
||||
|
||||
USER_SERVICE = UserService()
|
||||
0
autogpt/core/runner/client_lib/__init__.py
Normal file
0
autogpt/core/runner/client_lib/__init__.py
Normal file
20
autogpt/core/runner/client_lib/logging.py
Normal file
20
autogpt/core/runner/client_lib/logging.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import logging
|
||||
|
||||
|
||||
def get_client_logger():
|
||||
# Configure logging before we do anything else.
|
||||
# Application logs need a place to live.
|
||||
client_logger = logging.getLogger("autogpt_client_application")
|
||||
client_logger.setLevel(logging.DEBUG)
|
||||
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(logging.DEBUG)
|
||||
ch.setFormatter(formatter)
|
||||
|
||||
client_logger.addHandler(ch)
|
||||
|
||||
return client_logger
|
||||
14
autogpt/core/runner/client_lib/settings.py
Normal file
14
autogpt/core/runner/client_lib/settings.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
from autogpt.core.agent import SimpleAgent
|
||||
|
||||
|
||||
def make_user_configuration(settings_file_path: Path):
|
||||
user_configuration = SimpleAgent.build_user_configuration()
|
||||
|
||||
settings_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
print("Writing settings to", settings_file_path)
|
||||
with settings_file_path.open("w") as f:
|
||||
yaml.safe_dump(user_configuration, f)
|
||||
19
autogpt/core/runner/client_lib/shared_click_commands.py
Normal file
19
autogpt/core/runner/client_lib/shared_click_commands.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import pathlib
|
||||
|
||||
import click
|
||||
|
||||
DEFAULT_SETTINGS_FILE = str(
|
||||
pathlib.Path("~/auto-gpt/default_agent_settings.yml").expanduser()
|
||||
)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
def make_settings(settings_file: str) -> None:
|
||||
from autogpt.core.runner.client_lib.settings import make_user_configuration
|
||||
|
||||
make_user_configuration(pathlib.Path(settings_file))
|
||||
61
autogpt/core/runner/client_lib/utils.py
Normal file
61
autogpt/core/runner/client_lib/utils.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import asyncio
|
||||
import functools
|
||||
from bdb import BdbQuit
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
import click
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def handle_exceptions(
|
||||
application_main: Callable[P, T],
|
||||
with_debugger: bool,
|
||||
) -> Callable[P, T]:
|
||||
"""Wraps a function so that it drops a user into a debugger if it raises an error.
|
||||
|
||||
This is intended to be used as a wrapper for the main function of a CLI application.
|
||||
It will catch all errors and drop a user into a debugger if the error is not a
|
||||
KeyboardInterrupt. If the error is a KeyboardInterrupt, it will raise the error.
|
||||
If the error is not a KeyboardInterrupt, it will log the error and drop a user into a
|
||||
debugger if with_debugger is True. If with_debugger is False, it will raise the error.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
application_main
|
||||
The function to wrap.
|
||||
with_debugger
|
||||
Whether to drop a user into a debugger if an error is raised.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Callable
|
||||
The wrapped function.
|
||||
|
||||
"""
|
||||
|
||||
@functools.wraps(application_main)
|
||||
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
return await application_main(*args, **kwargs)
|
||||
except (BdbQuit, KeyboardInterrupt, click.Abort):
|
||||
raise
|
||||
except Exception as e:
|
||||
if with_debugger:
|
||||
print(f"Uncaught exception {e}")
|
||||
import pdb
|
||||
|
||||
pdb.post_mortem()
|
||||
else:
|
||||
raise
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def coroutine(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
return asyncio.run(f(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
3
autogpt/core/workspace/__init__.py
Normal file
3
autogpt/core/workspace/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""The workspace is the central hub for the Agent's on disk resources."""
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
70
autogpt/core/workspace/base.py
Normal file
70
autogpt/core/workspace/base.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import logging
|
||||
import typing
|
||||
from pathlib import Path
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from autogpt.core.configuration import AgentConfiguration
|
||||
|
||||
|
||||
class Workspace(abc.ABC):
|
||||
"""The workspace is the root directory for all generated files.
|
||||
|
||||
The workspace is responsible for creating the root directory and
|
||||
providing a method for getting the full path to an item in the
|
||||
workspace.
|
||||
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def root(self) -> Path:
|
||||
"""The root directory of the workspace."""
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def restrict_to_workspace(self) -> bool:
|
||||
"""Whether to restrict generated paths to the workspace."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def setup_workspace(
|
||||
configuration: AgentConfiguration, logger: logging.Logger
|
||||
) -> Path:
|
||||
"""Create the workspace root directory and set up all initial content.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
configuration
|
||||
The Agent's configuration.
|
||||
logger
|
||||
The Agent's logger.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The path to the workspace root directory.
|
||||
|
||||
"""
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_path(self, relative_path: str | Path) -> Path:
|
||||
"""Get the full path for an item in the workspace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The path to the item relative to the workspace root.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The full path to the item.
|
||||
|
||||
"""
|
||||
...
|
||||
193
autogpt/core/workspace/simple.py
Normal file
193
autogpt/core/workspace/simple.py
Normal file
@@ -0,0 +1,193 @@
|
||||
import json
|
||||
import logging
|
||||
import typing
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import SecretField
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
# Cyclic import
|
||||
from autogpt.core.agent.simple import AgentSettings
|
||||
|
||||
|
||||
class WorkspaceConfiguration(SystemConfiguration):
|
||||
root: str
|
||||
parent: str = UserConfigurable()
|
||||
restrict_to_workspace: bool = UserConfigurable()
|
||||
|
||||
|
||||
class WorkspaceSettings(SystemSettings):
|
||||
configuration: WorkspaceConfiguration
|
||||
|
||||
|
||||
class SimpleWorkspace(Configurable, Workspace):
|
||||
default_settings = WorkspaceSettings(
|
||||
name="workspace",
|
||||
description="The workspace is the root directory for all agent activity.",
|
||||
configuration=WorkspaceConfiguration(
|
||||
root="",
|
||||
parent="~/auto-gpt/agents",
|
||||
restrict_to_workspace=True,
|
||||
),
|
||||
)
|
||||
|
||||
NULL_BYTES = ["\0", "\000", "\x00", "\u0000", "%00"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: WorkspaceSettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger.getChild("workspace")
|
||||
|
||||
@property
|
||||
def root(self) -> Path:
|
||||
return Path(self._configuration.root)
|
||||
|
||||
@property
|
||||
def debug_log_path(self) -> Path:
|
||||
return self.root / "logs" / "debug.log"
|
||||
|
||||
@property
|
||||
def cycle_log_path(self) -> Path:
|
||||
return self.root / "logs" / "cycle.log"
|
||||
|
||||
@property
|
||||
def configuration_path(self) -> Path:
|
||||
return self.root / "configuration.yml"
|
||||
|
||||
@property
|
||||
def restrict_to_workspace(self) -> bool:
|
||||
return self._configuration.restrict_to_workspace
|
||||
|
||||
def get_path(self, relative_path: str | Path) -> Path:
|
||||
"""Get the full path for an item in the workspace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The relative path to resolve in the workspace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The resolved path relative to the workspace.
|
||||
|
||||
"""
|
||||
return self._sanitize_path(
|
||||
relative_path,
|
||||
root=self.root,
|
||||
restrict_to_root=self.restrict_to_workspace,
|
||||
)
|
||||
|
||||
def _sanitize_path(
|
||||
self,
|
||||
relative_path: str | Path,
|
||||
root: str | Path = None,
|
||||
restrict_to_root: bool = True,
|
||||
) -> Path:
|
||||
"""Resolve the relative path within the given root if possible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The relative path to resolve.
|
||||
root
|
||||
The root path to resolve the relative path within.
|
||||
restrict_to_root
|
||||
Whether to restrict the path to the root.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The resolved path.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the path is absolute and a root is provided.
|
||||
ValueError
|
||||
If the path is outside the root and the root is restricted.
|
||||
|
||||
"""
|
||||
|
||||
# Posix systems disallow null bytes in paths. Windows is agnostic about it.
|
||||
# Do an explicit check here for all sorts of null byte representations.
|
||||
|
||||
for null_byte in self.NULL_BYTES:
|
||||
if null_byte in str(relative_path) or null_byte in str(root):
|
||||
raise ValueError("embedded null byte")
|
||||
|
||||
if root is None:
|
||||
return Path(relative_path).resolve()
|
||||
|
||||
self._logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
|
||||
root, relative_path = Path(root).resolve(), Path(relative_path)
|
||||
self._logger.debug(f"Resolved root as '{root}'")
|
||||
|
||||
if relative_path.is_absolute():
|
||||
raise ValueError(
|
||||
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
|
||||
)
|
||||
full_path = root.joinpath(relative_path).resolve()
|
||||
|
||||
self._logger.debug(f"Joined paths as '{full_path}'")
|
||||
|
||||
if restrict_to_root and not full_path.is_relative_to(root):
|
||||
raise ValueError(
|
||||
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
|
||||
)
|
||||
|
||||
return full_path
|
||||
|
||||
###################################
|
||||
# Factory methods for agent setup #
|
||||
###################################
|
||||
|
||||
@staticmethod
|
||||
def setup_workspace(settings: "AgentSettings", logger: logging.Logger) -> Path:
|
||||
workspace_parent = settings.workspace.configuration.parent
|
||||
workspace_parent = Path(workspace_parent).expanduser().resolve()
|
||||
workspace_parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
agent_name = settings.agent.name
|
||||
|
||||
workspace_root = workspace_parent / agent_name
|
||||
workspace_root.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
settings.workspace.configuration.root = str(workspace_root)
|
||||
|
||||
with (workspace_root / "agent_settings.json").open("w") as f:
|
||||
settings_json = settings.json(
|
||||
encoder=lambda x: x.get_secret_value()
|
||||
if isinstance(x, SecretField)
|
||||
else x,
|
||||
)
|
||||
f.write(settings_json)
|
||||
|
||||
# TODO: What are all the kinds of logs we want here?
|
||||
log_path = workspace_root / "logs"
|
||||
log_path.mkdir(parents=True, exist_ok=True)
|
||||
(log_path / "debug.log").touch()
|
||||
(log_path / "cycle.log").touch()
|
||||
|
||||
return workspace_root
|
||||
|
||||
@staticmethod
|
||||
def load_agent_settings(workspace_root: Path) -> "AgentSettings":
|
||||
# Cyclic import
|
||||
from autogpt.core.agent.simple import AgentSettings
|
||||
|
||||
with (workspace_root / "agent_settings.json").open("r") as f:
|
||||
agent_settings = json.load(f)
|
||||
|
||||
return AgentSettings.parse_obj(agent_settings)
|
||||
@@ -95,7 +95,7 @@ class ApiManager(metaclass=Singleton):
|
||||
"""
|
||||
return self.total_budget
|
||||
|
||||
def get_models(self) -> List[Model]:
|
||||
def get_models(self, **openai_credentials) -> List[Model]:
|
||||
"""
|
||||
Get list of available GPT models.
|
||||
|
||||
@@ -104,7 +104,7 @@ class ApiManager(metaclass=Singleton):
|
||||
|
||||
"""
|
||||
if self.models is None:
|
||||
all_models = openai.Model.list()["data"]
|
||||
all_models = openai.Model.list(**openai_credentials)["data"]
|
||||
self.models = [model for model in all_models if "gpt" in model["id"]]
|
||||
|
||||
return self.models
|
||||
|
||||
@@ -35,13 +35,13 @@ def chat_with_ai(
|
||||
system_prompt (str): The prompt explaining the rules to the AI.
|
||||
triggering_prompt (str): The input from the user.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
model (str, optional): The model to use. If None, the config.fast_llm_model will be used. Defaults to None.
|
||||
model (str, optional): The model to use. By default, the config.smart_llm will be used.
|
||||
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
if model is None:
|
||||
model = config.fast_llm_model
|
||||
model = config.smart_llm
|
||||
|
||||
# Reserve 1000 tokens for the response
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
|
||||
@@ -73,10 +73,10 @@ OPEN_AI_CHAT_MODELS = {
|
||||
}
|
||||
# Set aliases for rolling model IDs
|
||||
chat_model_mapping = {
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4": "gpt-4-0314",
|
||||
"gpt-4-32k": "gpt-4-32k-0314",
|
||||
"gpt-4": "gpt-4-0613",
|
||||
"gpt-4-32k": "gpt-4-32k-0613",
|
||||
}
|
||||
for alias, target in chat_model_mapping.items():
|
||||
alias_info = ChatModelInfo(**OPEN_AI_CHAT_MODELS[target].__dict__)
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import asdict
|
||||
from typing import List, Literal, Optional
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
from ..api_manager import ApiManager
|
||||
from ..base import ChatModelResponse, ChatSequence, Message
|
||||
@@ -23,8 +21,8 @@ def call_ai_function(
|
||||
function: str,
|
||||
args: list,
|
||||
description: str,
|
||||
config: Config,
|
||||
model: Optional[str] = None,
|
||||
config: Optional[Config] = None,
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
@@ -41,7 +39,7 @@ def call_ai_function(
|
||||
str: The response from the function
|
||||
"""
|
||||
if model is None:
|
||||
model = config.smart_llm_model
|
||||
model = config.smart_llm
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma separated string
|
||||
@@ -69,21 +67,18 @@ def create_text_completion(
|
||||
max_output_tokens: Optional[int],
|
||||
) -> str:
|
||||
if model is None:
|
||||
model = config.fast_llm_model
|
||||
model = config.fast_llm
|
||||
if temperature is None:
|
||||
temperature = config.temperature
|
||||
|
||||
if config.use_azure:
|
||||
kwargs = {"deployment_id": config.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
kwargs = {"model": model}
|
||||
kwargs.update(config.get_openai_credentials(model))
|
||||
|
||||
response = iopenai.create_text_completion(
|
||||
prompt=prompt,
|
||||
**kwargs,
|
||||
temperature=temperature,
|
||||
max_tokens=max_output_tokens,
|
||||
api_key=config.openai_api_key,
|
||||
)
|
||||
logger.debug(f"Response: {response}")
|
||||
|
||||
@@ -115,6 +110,8 @@ def create_chat_completion(
|
||||
model = prompt.model.name
|
||||
if temperature is None:
|
||||
temperature = config.temperature
|
||||
if max_tokens is None:
|
||||
max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt.token_length
|
||||
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
@@ -137,11 +134,8 @@ def create_chat_completion(
|
||||
if message is not None:
|
||||
return message
|
||||
|
||||
chat_completion_kwargs["api_key"] = config.openai_api_key
|
||||
if config.use_azure:
|
||||
chat_completion_kwargs[
|
||||
"deployment_id"
|
||||
] = config.get_azure_deployment_id_for_model(model)
|
||||
chat_completion_kwargs.update(config.get_openai_credentials(model))
|
||||
|
||||
if functions:
|
||||
chat_completion_kwargs["functions"] = [
|
||||
function.__dict__ for function in functions
|
||||
@@ -175,11 +169,14 @@ def create_chat_completion(
|
||||
|
||||
|
||||
def check_model(
|
||||
model_name: str, model_type: Literal["smart_llm_model", "fast_llm_model"]
|
||||
model_name: str,
|
||||
model_type: Literal["smart_llm", "fast_llm"],
|
||||
config: Config,
|
||||
) -> str:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
openai_credentials = config.get_openai_credentials(model_name)
|
||||
api_manager = ApiManager()
|
||||
models = api_manager.get_models()
|
||||
models = api_manager.get_models(**openai_credentials)
|
||||
|
||||
if any(model_name in m["id"] for m in models):
|
||||
return model_name
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from logging import LogRecord
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.config import Config
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter
|
||||
from autogpt.singleton import Singleton
|
||||
from autogpt.speech import say_text
|
||||
|
||||
|
||||
class Logger(metaclass=Singleton):
|
||||
@@ -82,14 +85,27 @@ class Logger(metaclass=Singleton):
|
||||
self.json_logger.addHandler(error_handler)
|
||||
self.json_logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.speak_mode = False
|
||||
self._config: Optional[Config] = None
|
||||
self.chat_plugins = []
|
||||
|
||||
@property
|
||||
def config(self) -> Config | None:
|
||||
return self._config
|
||||
|
||||
@config.setter
|
||||
def config(self, config: Config):
|
||||
self._config = config
|
||||
if config.plain_output:
|
||||
self.typing_logger.removeHandler(self.typing_console_handler)
|
||||
self.typing_logger.addHandler(self.console_handler)
|
||||
|
||||
def typewriter_log(
|
||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
||||
):
|
||||
if speak_text and self.speak_mode:
|
||||
say_text(f"{title}. {content}")
|
||||
from autogpt.speech import say_text
|
||||
|
||||
if speak_text and self.config and self.config.speak_mode:
|
||||
say_text(f"{title}. {content}", self.config)
|
||||
|
||||
for plugin in self.chat_plugins:
|
||||
plugin.report(f"{title}. {content}")
|
||||
@@ -261,13 +277,15 @@ def print_assistant_thoughts(
|
||||
assistant_reply_json_valid: object,
|
||||
config: Config,
|
||||
) -> None:
|
||||
from autogpt.speech import say_text
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
|
||||
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
||||
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text"))
|
||||
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = remove_ansi_escape(
|
||||
assistant_thoughts.get("reasoning")
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
"""The application entry point. Can be invoked by a CLI or any other front end application."""
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
from autogpt.config.config import ConfigBuilder, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_memory
|
||||
@@ -45,14 +47,19 @@ def run_auto_gpt(
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
workspace_directory: str | Path,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str] = None,
|
||||
ai_role: Optional[str] = None,
|
||||
ai_goals: tuple[str] = tuple(),
|
||||
):
|
||||
# Configure logging before we do anything else.
|
||||
logger.set_level(logging.DEBUG if debug else logging.INFO)
|
||||
logger.speak_mode = speak
|
||||
|
||||
config = Config.build_config_from_env()
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
|
||||
# or import it directly.
|
||||
logger.config = config
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key(config)
|
||||
@@ -147,17 +154,20 @@ def run_auto_gpt(
|
||||
incompatible_commands.append(command)
|
||||
|
||||
for command in incompatible_commands:
|
||||
command_registry.unregister(command.name)
|
||||
command_registry.unregister(command)
|
||||
logger.debug(
|
||||
f"Unregistering incompatible command: {command.name}, "
|
||||
f"reason - {command.disabled_reason or 'Disabled by current config.'}"
|
||||
)
|
||||
|
||||
ai_name = ""
|
||||
ai_config = construct_main_ai_config(config)
|
||||
ai_config = construct_main_ai_config(
|
||||
config,
|
||||
name=ai_name,
|
||||
role=ai_role,
|
||||
goals=ai_goals,
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
if ai_config.ai_name:
|
||||
ai_name = ai_config.ai_name
|
||||
ai_name = ai_config.ai_name
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
next_action_count = 0
|
||||
|
||||
@@ -171,14 +171,14 @@ class MessageHistory:
|
||||
# Assume an upper bound length for the summary prompt template, i.e. Your task is to create a concise running summary...., in summarize_batch func
|
||||
# TODO make this default dynamic
|
||||
prompt_template_length = 100
|
||||
max_tokens = OPEN_AI_CHAT_MODELS.get(config.fast_llm_model).max_tokens
|
||||
summary_tlength = count_string_tokens(str(self.summary), config.fast_llm_model)
|
||||
max_tokens = OPEN_AI_CHAT_MODELS.get(config.fast_llm).max_tokens
|
||||
summary_tlength = count_string_tokens(str(self.summary), config.fast_llm)
|
||||
batch = []
|
||||
batch_tlength = 0
|
||||
|
||||
# TODO Can put a cap on length of total new events and drop some previous events to save API cost, but need to think thru more how to do it without losing the context
|
||||
for event in new_events:
|
||||
event_tlength = count_string_tokens(str(event), config.fast_llm_model)
|
||||
event_tlength = count_string_tokens(str(event), config.fast_llm)
|
||||
|
||||
if (
|
||||
batch_tlength + event_tlength
|
||||
@@ -187,7 +187,7 @@ class MessageHistory:
|
||||
# The batch is full. Summarize it and start a new one.
|
||||
self.summarize_batch(batch, config)
|
||||
summary_tlength = count_string_tokens(
|
||||
str(self.summary), config.fast_llm_model
|
||||
str(self.summary), config.fast_llm
|
||||
)
|
||||
batch = [event]
|
||||
batch_tlength = event_tlength
|
||||
@@ -217,9 +217,7 @@ Latest Development:
|
||||
"""
|
||||
'''
|
||||
|
||||
prompt = ChatSequence.for_model(
|
||||
config.fast_llm_model, [Message("user", prompt)]
|
||||
)
|
||||
prompt = ChatSequence.for_model(config.fast_llm, [Message("user", prompt)])
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.ai_name,
|
||||
self.agent.created_at,
|
||||
|
||||
@@ -40,6 +40,21 @@ supported_memory = ["json_file", "no_memory"]
|
||||
|
||||
|
||||
def get_memory(config: Config) -> VectorMemory:
|
||||
"""Returns a memory object corresponding to the memory backend specified in the config.
|
||||
|
||||
The type of memory object returned depends on the value of the `memory_backend`
|
||||
attribute in the configuration. E.g. if `memory_backend` is set to "pinecone", a
|
||||
`PineconeMemory` object is returned. If it is set to "redis", a `RedisMemory`
|
||||
object is returned.
|
||||
By default, a `JSONFileMemory` object is returned.
|
||||
|
||||
Params:
|
||||
config: A configuration object that contains information about the memory backend
|
||||
to be used and other relevant parameters.
|
||||
|
||||
Returns:
|
||||
VectorMemory: an instance of a memory object based on the configuration provided.
|
||||
"""
|
||||
memory = None
|
||||
|
||||
match config.memory_backend:
|
||||
|
||||
@@ -74,6 +74,7 @@ class MemoryItem:
|
||||
if len(chunks) == 1
|
||||
else summarize_text(
|
||||
"\n\n".join(chunk_summaries),
|
||||
config,
|
||||
instruction=how_to_summarize,
|
||||
question=question_for_summary,
|
||||
)[0]
|
||||
|
||||
@@ -41,21 +41,20 @@ def get_embedding(
|
||||
input = [text.replace("\n", " ") for text in input]
|
||||
|
||||
model = config.embedding_model
|
||||
if config.use_azure:
|
||||
kwargs = {"engine": config.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
kwargs = {"model": model}
|
||||
kwargs.update(config.get_openai_credentials(model))
|
||||
|
||||
logger.debug(
|
||||
f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
|
||||
f" with model '{model}'"
|
||||
+ (f" via Azure deployment '{kwargs['engine']}'" if config.use_azure else "")
|
||||
)
|
||||
if config.use_azure:
|
||||
breakpoint()
|
||||
|
||||
embeddings = iopenai.create_embedding(
|
||||
input,
|
||||
**kwargs,
|
||||
api_key=config.openai_api_key,
|
||||
).data
|
||||
|
||||
if not multiple:
|
||||
|
||||
@@ -22,6 +22,7 @@ class Command:
|
||||
parameters: list[CommandParameter],
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
aliases: list[str] = [],
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
@@ -29,6 +30,7 @@ class Command:
|
||||
self.parameters = parameters
|
||||
self.enabled = enabled
|
||||
self.disabled_reason = disabled_reason
|
||||
self.aliases = aliases
|
||||
|
||||
def __call__(self, *args, **kwargs) -> Any:
|
||||
if hasattr(kwargs, "config") and callable(self.enabled):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import importlib
|
||||
import inspect
|
||||
from typing import Any, Callable
|
||||
from typing import Any
|
||||
|
||||
from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER
|
||||
from autogpt.logs import logger
|
||||
@@ -15,10 +15,11 @@ class CommandRegistry:
|
||||
directory.
|
||||
"""
|
||||
|
||||
commands: dict[str, Command]
|
||||
commands: dict[str, Command] = {}
|
||||
commands_aliases: dict[str, Command] = {}
|
||||
|
||||
def __init__(self):
|
||||
self.commands = {}
|
||||
def __contains__(self, command_name: str):
|
||||
return command_name in self.commands or command_name in self.commands_aliases
|
||||
|
||||
def _import_module(self, module_name: str) -> Any:
|
||||
return importlib.import_module(module_name)
|
||||
@@ -33,11 +34,21 @@ class CommandRegistry:
|
||||
)
|
||||
self.commands[cmd.name] = cmd
|
||||
|
||||
def unregister(self, command_name: str):
|
||||
if command_name in self.commands:
|
||||
del self.commands[command_name]
|
||||
if cmd.name in self.commands_aliases:
|
||||
logger.warn(
|
||||
f"Command '{cmd.name}' will overwrite alias with the same name of "
|
||||
f"'{self.commands_aliases[cmd.name]}'!"
|
||||
)
|
||||
for alias in cmd.aliases:
|
||||
self.commands_aliases[alias] = cmd
|
||||
|
||||
def unregister(self, command: Command) -> None:
|
||||
if command.name in self.commands:
|
||||
del self.commands[command.name]
|
||||
for alias in command.aliases:
|
||||
del self.commands_aliases[alias]
|
||||
else:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
raise KeyError(f"Command '{command.name}' not found in registry.")
|
||||
|
||||
def reload_commands(self) -> None:
|
||||
"""Reloads all loaded command plugins."""
|
||||
@@ -48,14 +59,17 @@ class CommandRegistry:
|
||||
if hasattr(reloaded_module, "register"):
|
||||
reloaded_module.register(self)
|
||||
|
||||
def get_command(self, name: str) -> Callable[..., Any]:
|
||||
return self.commands[name]
|
||||
def get_command(self, name: str) -> Command | None:
|
||||
if name in self.commands:
|
||||
return self.commands[name]
|
||||
|
||||
if name in self.commands_aliases:
|
||||
return self.commands_aliases[name]
|
||||
|
||||
def call(self, command_name: str, **kwargs) -> Any:
|
||||
if command_name not in self.commands:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
command = self.commands[command_name]
|
||||
return command(**kwargs)
|
||||
if command := self.get_command(command_name):
|
||||
return command(**kwargs)
|
||||
raise KeyError(f"Command '{command_name}' not found in registry")
|
||||
|
||||
def command_prompt(self) -> str:
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Handles loading of plugins."""
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import inspect
|
||||
@@ -7,7 +8,7 @@ import os
|
||||
import sys
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import TYPE_CHECKING, List
|
||||
from urllib.parse import urlparse
|
||||
from zipimport import zipimporter
|
||||
|
||||
@@ -16,7 +17,9 @@ import requests
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.config import Config as OpenAPIConfig
|
||||
|
||||
from autogpt.config.config import Config
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.logs import logger
|
||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
||||
|
||||
@@ -218,7 +221,7 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
|
||||
"""
|
||||
loaded_plugins = []
|
||||
# Generic plugins
|
||||
plugins_path_path = Path(config.plugins_dir)
|
||||
plugins_path = Path(config.plugins_dir)
|
||||
|
||||
plugins_config = config.plugins_config
|
||||
# Directory-based plugins
|
||||
@@ -235,7 +238,9 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
|
||||
plugin = sys.modules[qualified_module_name]
|
||||
|
||||
if not plugins_config.is_enabled(plugin_module_name):
|
||||
logger.warn(f"Plugin {plugin_module_name} found but not configured")
|
||||
logger.warn(
|
||||
f"Plugin folder {plugin_module_name} found but not configured. If this is a legitimate plugin, please add it to plugins_config.yaml (key: {plugin_module_name})."
|
||||
)
|
||||
continue
|
||||
|
||||
for _, class_obj in inspect.getmembers(plugin):
|
||||
@@ -246,23 +251,25 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
|
||||
loaded_plugins.append(class_obj())
|
||||
|
||||
# Zip-based plugins
|
||||
for plugin in plugins_path_path.glob("*.zip"):
|
||||
for plugin in plugins_path.glob("*.zip"):
|
||||
if moduleList := inspect_zip_for_modules(str(plugin), debug):
|
||||
for module in moduleList:
|
||||
plugin = Path(plugin)
|
||||
module = Path(module)
|
||||
logger.debug(f"Plugin: {plugin} Module: {module}")
|
||||
logger.debug(f"Zipped Plugin: {plugin}, Module: {module}")
|
||||
zipped_package = zipimporter(str(plugin))
|
||||
zipped_module = zipped_package.load_module(str(module.parent))
|
||||
|
||||
for key in dir(zipped_module):
|
||||
if key.startswith("__"):
|
||||
continue
|
||||
|
||||
a_module = getattr(zipped_module, key)
|
||||
if not inspect.isclass(a_module):
|
||||
continue
|
||||
|
||||
if (
|
||||
inspect.isclass(a_module)
|
||||
and issubclass(a_module, AutoGPTPluginTemplate)
|
||||
issubclass(a_module, AutoGPTPluginTemplate)
|
||||
and a_module.__name__ != "AutoGPTPluginTemplate"
|
||||
):
|
||||
plugin_name = a_module.__name__
|
||||
@@ -271,24 +278,23 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
|
||||
|
||||
if plugin_configured and plugin_enabled:
|
||||
logger.debug(
|
||||
f"Loading plugin {plugin_name} as it was enabled in config."
|
||||
f"Loading plugin {plugin_name}. Enabled in plugins_config.yaml."
|
||||
)
|
||||
loaded_plugins.append(a_module())
|
||||
elif plugin_configured and not plugin_enabled:
|
||||
logger.debug(
|
||||
f"Not loading plugin {plugin_name} as it was disabled in config."
|
||||
f"Not loading plugin {plugin_name}. Disabled in plugins_config.yaml."
|
||||
)
|
||||
elif not plugin_configured:
|
||||
logger.warn(
|
||||
f"Not loading plugin {plugin_name} as it was not found in config. "
|
||||
f"Please check your config. Starting with 0.4.1, plugins will not be loaded unless "
|
||||
f"they are enabled in plugins_config.yaml. Zipped plugins should use the class "
|
||||
f"name ({plugin_name}) as the key."
|
||||
f"Not loading plugin {plugin_name}. Key '{plugin_name}' was not found in plugins_config.yaml. "
|
||||
f"Zipped plugins should use the class name ({plugin_name}) as the key."
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"Skipping {key}: {a_module.__name__} because it doesn't subclass AutoGPTPluginTemplate."
|
||||
)
|
||||
if a_module.__name__ != "AutoGPTPluginTemplate":
|
||||
logger.debug(
|
||||
f"Skipping '{key}' because it doesn't subclass AutoGPTPluginTemplate."
|
||||
)
|
||||
|
||||
# OpenAI plugins
|
||||
if config.plugins_openai:
|
||||
@@ -299,7 +305,9 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
|
||||
)
|
||||
for url, openai_plugin_meta in manifests_specs_clients.items():
|
||||
if not plugins_config.is_enabled(url):
|
||||
logger.warn(f"Plugin {plugin_module_name} found but not configured")
|
||||
logger.warn(
|
||||
f"OpenAI Plugin {plugin_module_name} found but not configured"
|
||||
)
|
||||
continue
|
||||
|
||||
plugin = BaseOpenAIPlugin(openai_plugin_meta)
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PluginConfig:
|
||||
|
||||
class PluginConfig(BaseModel):
|
||||
"""Class for holding configuration of a single plugin"""
|
||||
|
||||
def __init__(self, name: str, enabled: bool = False, config: dict[str, Any] = None):
|
||||
self.name = name
|
||||
self.enabled = enabled
|
||||
# Arbitray config options for this plugin. API keys or plugin-specific options live here.
|
||||
self.config = config or {}
|
||||
|
||||
def __repr__(self):
|
||||
return f"PluginConfig('{self.name}', {self.enabled}, {str(self.config)}"
|
||||
name: str
|
||||
enabled: bool = False
|
||||
config: dict[str, Any] = None
|
||||
|
||||
@@ -1,29 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Union
|
||||
from typing import Union
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.plugins.plugin_config import PluginConfig
|
||||
|
||||
|
||||
class PluginsConfig:
|
||||
class PluginsConfig(BaseModel):
|
||||
"""Class for holding configuration of all plugins"""
|
||||
|
||||
def __init__(self, plugins_config: dict[str, Any]):
|
||||
self.plugins = {}
|
||||
for name, plugin in plugins_config.items():
|
||||
if type(plugin) == dict:
|
||||
self.plugins[name] = PluginConfig(
|
||||
name,
|
||||
plugin.get("enabled", False),
|
||||
plugin.get("config", {}),
|
||||
)
|
||||
elif type(plugin) == PluginConfig:
|
||||
self.plugins[name] = plugin
|
||||
else:
|
||||
raise ValueError(f"Invalid plugin config data type: {type(plugin)}")
|
||||
plugins: dict[str, PluginConfig]
|
||||
|
||||
def __repr__(self):
|
||||
return f"PluginsConfig({self.plugins})"
|
||||
@@ -33,20 +23,29 @@ class PluginsConfig:
|
||||
|
||||
def is_enabled(self, name) -> bool:
|
||||
plugin_config = self.plugins.get(name)
|
||||
return plugin_config and plugin_config.enabled
|
||||
return plugin_config is not None and plugin_config.enabled
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, global_config: Config) -> "PluginsConfig":
|
||||
empty_config = cls({})
|
||||
def load_config(
|
||||
cls,
|
||||
plugins_config_file: str,
|
||||
plugins_denylist: list[str],
|
||||
plugins_allowlist: list[str],
|
||||
) -> "PluginsConfig":
|
||||
empty_config = cls(plugins={})
|
||||
|
||||
try:
|
||||
config_data = cls.deserialize_config_file(global_config=global_config)
|
||||
config_data = cls.deserialize_config_file(
|
||||
plugins_config_file,
|
||||
plugins_denylist,
|
||||
plugins_allowlist,
|
||||
)
|
||||
if type(config_data) != dict:
|
||||
logger.error(
|
||||
f"Expected plugins config to be a dict, got {type(config_data)}, continuing without plugins"
|
||||
)
|
||||
return empty_config
|
||||
return cls(config_data)
|
||||
return cls(plugins=config_data)
|
||||
|
||||
except BaseException as e:
|
||||
logger.error(
|
||||
@@ -55,27 +54,59 @@ class PluginsConfig:
|
||||
return empty_config
|
||||
|
||||
@classmethod
|
||||
def deserialize_config_file(cls, global_config: Config) -> dict[str, Any]:
|
||||
plugins_config_path = global_config.plugins_config_file
|
||||
if not os.path.exists(plugins_config_path):
|
||||
def deserialize_config_file(
|
||||
cls,
|
||||
plugins_config_file: str,
|
||||
plugins_denylist: list[str],
|
||||
plugins_allowlist: list[str],
|
||||
) -> dict[str, PluginConfig]:
|
||||
if not os.path.exists(plugins_config_file):
|
||||
logger.warn("plugins_config.yaml does not exist, creating base config.")
|
||||
cls.create_empty_plugins_config(global_config=global_config)
|
||||
cls.create_empty_plugins_config(
|
||||
plugins_config_file,
|
||||
plugins_denylist,
|
||||
plugins_allowlist,
|
||||
)
|
||||
|
||||
with open(plugins_config_path, "r") as f:
|
||||
return yaml.load(f, Loader=yaml.FullLoader)
|
||||
with open(plugins_config_file, "r") as f:
|
||||
plugins_config = yaml.load(f, Loader=yaml.FullLoader)
|
||||
|
||||
plugins = {}
|
||||
for name, plugin in plugins_config.items():
|
||||
if type(plugin) == dict:
|
||||
plugins[name] = PluginConfig(
|
||||
name=name,
|
||||
enabled=plugin.get("enabled", False),
|
||||
config=plugin.get("config", {}),
|
||||
)
|
||||
elif type(plugin) == PluginConfig:
|
||||
plugins[name] = plugin
|
||||
else:
|
||||
raise ValueError(f"Invalid plugin config data type: {type(plugin)}")
|
||||
return plugins
|
||||
|
||||
@staticmethod
|
||||
def create_empty_plugins_config(global_config: Config):
|
||||
def create_empty_plugins_config(
|
||||
plugins_config_file: str,
|
||||
plugins_denylist: list[str],
|
||||
plugins_allowlist: list[str],
|
||||
):
|
||||
"""Create an empty plugins_config.yaml file. Fill it with values from old env variables."""
|
||||
base_config = {}
|
||||
|
||||
logger.debug(f"Legacy plugin denylist: {plugins_denylist}")
|
||||
logger.debug(f"Legacy plugin allowlist: {plugins_allowlist}")
|
||||
|
||||
# Backwards-compatibility shim
|
||||
for plugin_name in global_config.plugins_denylist:
|
||||
for plugin_name in plugins_denylist:
|
||||
base_config[plugin_name] = {"enabled": False, "config": {}}
|
||||
|
||||
for plugin_name in global_config.plugins_allowlist:
|
||||
for plugin_name in plugins_allowlist:
|
||||
base_config[plugin_name] = {"enabled": True, "config": {}}
|
||||
|
||||
with open(global_config.plugins_config_file, "w+") as f:
|
||||
logger.debug(f"Constructed base plugins config: {base_config}")
|
||||
|
||||
logger.debug(f"Creating plugin config file {plugins_config_file}")
|
||||
with open(plugins_config_file, "w+") as f:
|
||||
f.write(yaml.dump(base_config))
|
||||
return base_config
|
||||
|
||||
@@ -82,7 +82,7 @@ def summarize_text(
|
||||
if instruction and question:
|
||||
raise ValueError("Parameters 'question' and 'instructions' cannot both be set")
|
||||
|
||||
model = config.fast_llm_model
|
||||
model = config.fast_llm
|
||||
|
||||
if question:
|
||||
instruction = (
|
||||
@@ -136,7 +136,7 @@ def summarize_text(
|
||||
|
||||
logger.info(f"Summarized {len(chunks)} chunks")
|
||||
|
||||
summary, _ = summarize_text("\n\n".join(summaries), config, instruction)
|
||||
summary, _ = summarize_text("\n\n".join(summaries), config)
|
||||
return summary.strip(), [
|
||||
(summaries[i], chunks[i][0]) for i in range(0, len(chunks))
|
||||
]
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
""" A module for generating custom prompt strings."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypedDict
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import llm_response_schema
|
||||
@@ -15,19 +17,33 @@ class PromptGenerator:
|
||||
resources, and performance evaluations.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Initialize the PromptGenerator object with empty lists of constraints,
|
||||
commands, resources, and performance evaluations.
|
||||
"""
|
||||
class Command(TypedDict):
|
||||
label: str
|
||||
name: str
|
||||
params: dict[str, str]
|
||||
function: Optional[Callable]
|
||||
|
||||
constraints: list[str]
|
||||
commands: list[Command]
|
||||
resources: list[str]
|
||||
performance_evaluation: list[str]
|
||||
command_registry: CommandRegistry | None
|
||||
|
||||
# TODO: replace with AIConfig
|
||||
name: str
|
||||
role: str
|
||||
goals: list[str]
|
||||
|
||||
def __init__(self):
|
||||
self.constraints = []
|
||||
self.commands = []
|
||||
self.resources = []
|
||||
self.performance_evaluation = []
|
||||
self.goals = []
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
self.command_registry = None
|
||||
|
||||
self.name = "Bob"
|
||||
self.role = "AI"
|
||||
self.goals = []
|
||||
|
||||
def add_constraint(self, constraint: str) -> None:
|
||||
"""
|
||||
@@ -42,29 +58,29 @@ class PromptGenerator:
|
||||
self,
|
||||
command_label: str,
|
||||
command_name: str,
|
||||
args=None,
|
||||
params: dict[str, str] = {},
|
||||
function: Optional[Callable] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Add a command to the commands list with a label, name, and optional arguments.
|
||||
|
||||
*Should only be used by plugins.* Native commands should be added
|
||||
directly to the CommandRegistry.
|
||||
|
||||
Args:
|
||||
command_label (str): The label of the command.
|
||||
command_name (str): The name of the command.
|
||||
args (dict, optional): A dictionary containing argument names and their
|
||||
params (dict, optional): A dictionary containing argument names and their
|
||||
values. Defaults to None.
|
||||
function (callable, optional): A callable function to be called when
|
||||
the command is executed. Defaults to None.
|
||||
"""
|
||||
if args is None:
|
||||
args = {}
|
||||
command_params = {name: type for name, type in params.items()}
|
||||
|
||||
command_args = {arg_key: arg_value for arg_key, arg_value in args.items()}
|
||||
|
||||
command = {
|
||||
command: PromptGenerator.Command = {
|
||||
"label": command_label,
|
||||
"name": command_name,
|
||||
"args": command_args,
|
||||
"params": command_params,
|
||||
"function": function,
|
||||
}
|
||||
|
||||
@@ -80,10 +96,10 @@ class PromptGenerator:
|
||||
Returns:
|
||||
str: The formatted command string.
|
||||
"""
|
||||
args_string = ", ".join(
|
||||
f'"{key}": "{value}"' for key, value in command["args"].items()
|
||||
params_string = ", ".join(
|
||||
f'"{key}": "{value}"' for key, value in command["params"].items()
|
||||
)
|
||||
return f'{command["label"]}: "{command["name"]}", args: {args_string}'
|
||||
return f'{command["label"]}: "{command["name"]}", params: {params_string}'
|
||||
|
||||
def add_resource(self, resource: str) -> None:
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
@@ -42,14 +44,32 @@ def build_default_prompt_generator(config: Config) -> PromptGenerator:
|
||||
return prompt_generator
|
||||
|
||||
|
||||
def construct_main_ai_config(config: Config) -> AIConfig:
|
||||
def construct_main_ai_config(
|
||||
config: Config,
|
||||
name: Optional[str] = None,
|
||||
role: Optional[str] = None,
|
||||
goals: tuple[str] = tuple(),
|
||||
) -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
str: The prompt string
|
||||
"""
|
||||
ai_config = AIConfig.load(config.ai_settings_file)
|
||||
if config.skip_reprompt and ai_config.ai_name:
|
||||
|
||||
# Apply overrides
|
||||
if name:
|
||||
ai_config.ai_name = name
|
||||
if role:
|
||||
ai_config.ai_role = role
|
||||
if goals:
|
||||
ai_config.ai_goals = list(goals)
|
||||
|
||||
if (
|
||||
all([name, role, goals])
|
||||
or config.skip_reprompt
|
||||
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
|
||||
):
|
||||
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
|
||||
@@ -58,7 +78,7 @@ def construct_main_ai_config(config: Config) -> AIConfig:
|
||||
Fore.GREEN,
|
||||
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
|
||||
)
|
||||
elif ai_config.ai_name:
|
||||
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
|
||||
logger.typewriter_log(
|
||||
"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
@@ -77,7 +97,7 @@ Continue ({config.authorise_key}/{config.exit_key}): """,
|
||||
if should_continue.lower() == config.exit_key:
|
||||
ai_config = AIConfig()
|
||||
|
||||
if not ai_config.ai_name:
|
||||
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
|
||||
ai_config = prompt_user(config)
|
||||
ai_config.save(config.ai_settings_file)
|
||||
|
||||
|
||||
123
autogpt/setup.py
123
autogpt/setup.py
@@ -1,5 +1,6 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from jinja2 import Template
|
||||
@@ -17,14 +18,18 @@ from autogpt.prompts.default_prompts import (
|
||||
)
|
||||
|
||||
|
||||
def prompt_user(config: Config) -> AIConfig:
|
||||
def prompt_user(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
ai_name = ""
|
||||
ai_config = None
|
||||
|
||||
# Construct the prompt
|
||||
logger.typewriter_log(
|
||||
@@ -34,29 +39,39 @@ def prompt_user(config: Config) -> AIConfig:
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Get user desire
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
ai_config_template_provided = ai_config_template is not None and any(
|
||||
[
|
||||
ai_config_template.ai_goals,
|
||||
ai_config_template.ai_name,
|
||||
ai_config_template.ai_role,
|
||||
]
|
||||
)
|
||||
|
||||
user_desire = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
user_desire = ""
|
||||
if not ai_config_template_provided:
|
||||
# Get user desire if command line overrides have not been passed in
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
if user_desire == "":
|
||||
user_desire = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire.strip() == "":
|
||||
user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
|
||||
|
||||
# If user desire contains "--manual"
|
||||
if "--manual" in user_desire:
|
||||
# If user desire contains "--manual" or we have overridden any of the AI configuration
|
||||
if "--manual" in user_desire or ai_config_template_provided:
|
||||
logger.typewriter_log(
|
||||
"Manual Mode Selected",
|
||||
Fore.GREEN,
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual(config)
|
||||
return generate_aiconfig_manual(config, ai_config_template)
|
||||
|
||||
else:
|
||||
try:
|
||||
@@ -72,7 +87,9 @@ def prompt_user(config: Config) -> AIConfig:
|
||||
return generate_aiconfig_manual(config)
|
||||
|
||||
|
||||
def generate_aiconfig_manual(config: Config) -> AIConfig:
|
||||
def generate_aiconfig_manual(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
@@ -80,6 +97,10 @@ def generate_aiconfig_manual(config: Config) -> AIConfig:
|
||||
an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
|
||||
goals. If the user does not provide a value for any of the fields, default values will be used.
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
|
||||
"""
|
||||
@@ -93,11 +114,15 @@ def generate_aiconfig_manual(config: Config) -> AIConfig:
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Get AI Name from User
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input(config, "AI Name: ")
|
||||
if ai_config_template and ai_config_template.ai_name:
|
||||
ai_name = ai_config_template.ai_name
|
||||
else:
|
||||
ai_name = ""
|
||||
# Get AI Name from User
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input(config, "AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
@@ -105,34 +130,40 @@ def generate_aiconfig_manual(config: Config) -> AIConfig:
|
||||
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
||||
)
|
||||
|
||||
# Get AI Role from User
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(config, f"{ai_name} is: ")
|
||||
if ai_config_template and ai_config_template.ai_role:
|
||||
ai_role = ai_config_template.ai_role
|
||||
else:
|
||||
# Get AI Role from User
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(config, f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
|
||||
# Enter up to 5 goals for the AI
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
|
||||
if ai_config_template and ai_config_template.ai_goals:
|
||||
ai_goals = ai_config_template.ai_goals
|
||||
else:
|
||||
# Enter up to 5 goals for the AI
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
|
||||
)
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if not ai_goals:
|
||||
ai_goals = [
|
||||
"Increase net worth",
|
||||
@@ -178,7 +209,7 @@ def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
|
||||
# Call LLM with the string as user input
|
||||
output = create_chat_completion(
|
||||
ChatSequence.for_model(
|
||||
config.fast_llm_model,
|
||||
config.fast_llm,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("user", prompt_ai_config_automatic),
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
"""Base class for all voice classes."""
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import re
|
||||
from threading import Lock
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.singleton import AbstractSingleton
|
||||
|
||||
|
||||
@@ -40,7 +45,7 @@ class VoiceBase(AbstractSingleton):
|
||||
return self._speech(text, voice_index)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _setup(self) -> None:
|
||||
def _setup(self, config: Config) -> None:
|
||||
"""
|
||||
Setup the voices, API key, etc.
|
||||
"""
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
"""ElevenLabs speech module"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from .base import VoiceBase
|
||||
|
||||
PLACEHOLDERS = {"your-voice-id"}
|
||||
|
||||
|
||||
@@ -4,13 +4,14 @@ import os
|
||||
import gtts
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
|
||||
class GTTSVoice(VoiceBase):
|
||||
"""GTTS Voice."""
|
||||
|
||||
def _setup(self) -> None:
|
||||
def _setup(self, config: Config) -> None:
|
||||
pass
|
||||
|
||||
def _speech(self, text: str, _: int = 0) -> bool:
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
""" MacOS TTS Voice. """
|
||||
import os
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
|
||||
class MacOSTTS(VoiceBase):
|
||||
"""MacOS TTS Voice."""
|
||||
|
||||
def _setup(self) -> None:
|
||||
def _setup(self, config: Config) -> None:
|
||||
pass
|
||||
|
||||
def _speech(self, text: str, voice_index: int = 0) -> bool:
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
""" Text to speech module """
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from threading import Semaphore
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
from autogpt.speech.eleven_labs import ElevenLabsSpeech
|
||||
from autogpt.speech.gtts import GTTSVoice
|
||||
from autogpt.speech.macos_tts import MacOSTTS
|
||||
from autogpt.speech.stream_elements_speech import StreamElementsSpeech
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from .base import VoiceBase
|
||||
from .eleven_labs import ElevenLabsSpeech
|
||||
from .gtts import GTTSVoice
|
||||
from .macos_tts import MacOSTTS
|
||||
from .stream_elements_speech import StreamElementsSpeech
|
||||
|
||||
_QUEUE_SEMAPHORE = Semaphore(
|
||||
1
|
||||
@@ -36,10 +41,10 @@ def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
|
||||
if tts_provider == "elevenlabs":
|
||||
voice_engine = ElevenLabsSpeech(config)
|
||||
elif tts_provider == "macos":
|
||||
voice_engine = MacOSTTS()
|
||||
voice_engine = MacOSTTS(config)
|
||||
elif tts_provider == "streamelements":
|
||||
voice_engine = StreamElementsSpeech()
|
||||
voice_engine = StreamElementsSpeech(config)
|
||||
else:
|
||||
voice_engine = GTTSVoice()
|
||||
voice_engine = GTTSVoice(config)
|
||||
|
||||
return GTTSVoice(), voice_engine
|
||||
return GTTSVoice(config), voice_engine
|
||||
|
||||
@@ -4,13 +4,14 @@ import os
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
|
||||
class StreamElementsSpeech(VoiceBase):
|
||||
"""Streamelements speech module for autogpt"""
|
||||
|
||||
def _setup(self) -> None:
|
||||
def _setup(self, config: Config) -> None:
|
||||
"""Setup the voices, API key, etc."""
|
||||
|
||||
def _speech(self, text: str, voice: str, _: int = 0) -> bool:
|
||||
|
||||
@@ -10,6 +10,7 @@ agent.
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
@@ -77,7 +78,7 @@ class Workspace:
|
||||
@staticmethod
|
||||
def _sanitize_path(
|
||||
relative_path: str | Path,
|
||||
root: str | Path = None,
|
||||
root: Optional[str | Path] = None,
|
||||
restrict_to_root: bool = True,
|
||||
) -> Path:
|
||||
"""Resolve the relative path within the given root if possible.
|
||||
@@ -139,7 +140,7 @@ class Workspace:
|
||||
return full_path
|
||||
|
||||
@staticmethod
|
||||
def build_file_logger_path(config, workspace_directory):
|
||||
def build_file_logger_path(config: Config, workspace_directory: Path):
|
||||
file_logger_path = workspace_directory / "file_logger.txt"
|
||||
if not file_logger_path.exists():
|
||||
with file_logger_path.open(mode="w", encoding="utf-8") as f:
|
||||
@@ -147,10 +148,12 @@ class Workspace:
|
||||
config.file_logger_path = str(file_logger_path)
|
||||
|
||||
@staticmethod
|
||||
def get_workspace_directory(config: Config, workspace_directory: str = None):
|
||||
def get_workspace_directory(
|
||||
config: Config, workspace_directory: Optional[str | Path] = None
|
||||
):
|
||||
if workspace_directory is None:
|
||||
workspace_directory = Path(__file__).parent / "auto_gpt_workspace"
|
||||
else:
|
||||
elif type(workspace_directory) == str:
|
||||
workspace_directory = Path(workspace_directory)
|
||||
# TODO: pass in the ai_settings file and the env file and have them cloned into
|
||||
# the workspace directory so we can bind them to the agent.
|
||||
|
||||
@@ -2,6 +2,6 @@ azure_api_type: azure
|
||||
azure_api_base: your-base-url-for-azure
|
||||
azure_api_version: api-version-for-azure
|
||||
azure_model_map:
|
||||
fast_llm_model_deployment_id: gpt35-deployment-id-for-azure
|
||||
smart_llm_model_deployment_id: gpt4-deployment-id-for-azure
|
||||
fast_llm_deployment_id: gpt35-deployment-id-for-azure
|
||||
smart_llm_deployment_id: gpt4-deployment-id-for-azure
|
||||
embedding_model_deployment_id: embedding-deployment-id-for-azure
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user