Sync release v0.4.6 with patches back into master (#5065)

This commit is contained in:
Luke
2023-07-28 14:43:21 +02:00
committed by GitHub
15 changed files with 111 additions and 42 deletions

View File

@@ -16,13 +16,13 @@ OPENAI_API_KEY=your-openai-api-key
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the Auto-GPT root directory. (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file (Default plugins_config.yaml)
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the Auto-GPT root directory. (Default plugins_config.yaml)
# PLUGINS_CONFIG_FILE=plugins_config.yaml
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the Auto-GPT root directory. (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
@@ -58,7 +58,7 @@ OPENAI_API_KEY=your-openai-api-key
## USE_AZURE - Use Azure OpenAI or not (Default: False)
# USE_AZURE=False
## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml)
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the Auto-GPT root directory. (Default: azure.yaml)
# AZURE_CONFIG_FILE=azure.yaml

View File

@@ -4,23 +4,24 @@
📖 *User Guide*: https://docs.agpt.co.
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
# v0.4.5 RELEASE HIGHLIGHTS! 🚀
# v0.4.6 RELEASE HIGHLIGHTS! 🚀
# -----------------------------
This release includes under-the-hood improvements and bug fixes, such as more
accurate token counts for OpenAI functions, faster CI builds, improved plugin
handling, and refactoring of the Config class for better maintainability.
This release includes under-the-hood improvements and bug fixes, including better UTF-8
special character support, workspace write access for sandboxed Python execution,
more robust path resolution for config files and the workspace, and a full restructure
of the Agent class, the "brain" of Auto-GPT, to make it more extensible.
We have also released some documentation updates, including:
- *How to share your system logs*
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
via a log analyzer graciously contributed by https://www.e2b.dev/
- *Auto-GPT re-architecture documentation*
You can learn more about the inner-workings of the Auto-GPT re-architecture
You can learn more about the inner-workings of the Auto-GPT re-architecture
released last cycle, via these links:
* [autogpt/core/README.md]
* [autogpt/core/ARCHITECTURE_NOTES.md]
Take a look at the Release Notes on Github for the full changelog!
Take a look at the Release Notes on Github for the full changelog!
https://github.com/Significant-Gravitas/Auto-GPT/releases.

View File

@@ -27,7 +27,7 @@ def bootstrap_agent(task, continuous_mode) -> Agent:
config.plain_output = True
command_registry = get_command_registry(config)
config.memory_backend = "no_memory"
config.workspace_path = Workspace.set_workspace_directory(config)
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",

View File

@@ -16,7 +16,10 @@ import click
@click.option(
"--ai-settings",
"-C",
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
help=(
"Specifies which ai_settings.yaml file to use, relative to the Auto-GPT"
" root directory. Will also automatically skip the re-prompt."
),
)
@click.option(
"--prompt-settings",
@@ -129,7 +132,9 @@ def main(
browser_name=browser_name,
allow_downloads=allow_downloads,
skip_news=skip_news,
working_directory=Path(__file__).parent.parent, # TODO: make this an option
working_directory=Path(
__file__
).parent.parent.parent, # TODO: make this an option
workspace_directory=workspace_directory,
install_plugin_deps=install_plugin_deps,
ai_name=ai_name,

View File

@@ -126,7 +126,7 @@ def run_auto_gpt(
# TODO: have this directory live outside the repository (e.g. in a user's
# home directory) and have it come in as a command line argument or part of
# the env file.
config.workspace_path = Workspace.set_workspace_directory(
config.workspace_path = Workspace.init_workspace_directory(
config, workspace_directory
)
@@ -511,7 +511,7 @@ Continue ({config.authorise_key}/{config.exit_key}): """,
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
ai_config = prompt_user(config)
ai_config.save(config.ai_settings_file)
ai_config.save(config.workdir / config.ai_settings_file)
if config.restrict_to_workspace:
logger.typewriter_log(

View File

@@ -25,7 +25,7 @@ def text_checksum(text: str) -> str:
def operations_from_log(
log_path: str,
log_path: str | Path,
) -> Generator[tuple[Operation, str, str | None], None, None]:
"""Parse the file operations log and return a tuple containing the log entries"""
try:
@@ -52,7 +52,7 @@ def operations_from_log(
log.close()
def file_operations_state(log_path: str) -> dict[str, str]:
def file_operations_state(log_path: str | Path) -> dict[str, str]:
"""Iterates over the operations log and returns the expected state.
Parses a log file at config.file_logger_path to construct a dictionary that maps

View File

@@ -15,8 +15,11 @@ from pydantic import Field, validator
from autogpt.core.configuration.schema import Configurable, SystemSettings
from autogpt.plugins.plugins_config import PluginsConfig
AI_SETTINGS_FILE = "ai_settings.yaml"
AZURE_CONFIG_FILE = "azure.yaml"
PLUGINS_CONFIG_FILE = "plugins_config.yaml"
PROMPT_SETTINGS_FILE = "prompt_settings.yaml"
GPT_4_MODEL = "gpt-4"
GPT_3_MODEL = "gpt-3.5-turbo"
@@ -44,11 +47,11 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
# Agent Control Settings #
##########################
# Paths
ai_settings_file: str = "ai_settings.yaml"
prompt_settings_file: str = "prompt_settings.yaml"
ai_settings_file: str = AI_SETTINGS_FILE
prompt_settings_file: str = PROMPT_SETTINGS_FILE
workdir: Path = None
workspace_path: Optional[Path] = None
file_logger_path: Optional[str] = None
file_logger_path: Optional[Path] = None
# Model configuration
fast_llm: str = "gpt-3.5-turbo"
smart_llm: str = "gpt-4"
@@ -218,8 +221,10 @@ class ConfigBuilder(Configurable[Config]):
"exit_key": os.getenv("EXIT_KEY"),
"plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
"ai_settings_file": os.getenv("AI_SETTINGS_FILE"),
"prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"),
"ai_settings_file": os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE),
"prompt_settings_file": os.getenv(
"PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE
),
"fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
"smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
"embedding_model": os.getenv("EMBEDDING_MODEL"),
@@ -256,7 +261,9 @@ class ConfigBuilder(Configurable[Config]):
"redis_password": os.getenv("REDIS_PASSWORD"),
"wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
"plugins_dir": os.getenv("PLUGINS_DIR"),
"plugins_config_file": os.getenv("PLUGINS_CONFIG_FILE"),
"plugins_config_file": os.getenv(
"PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE
),
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
}

View File

@@ -87,7 +87,7 @@ def bootstrap_agent(task, continuous_mode) -> Agent:
config.plain_output = True
command_registry = get_command_registry(config)
config.memory_backend = "no_memory"
config.workspace_path = Workspace.set_workspace_directory(config)
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",

View File

@@ -144,21 +144,24 @@ class Workspace:
return full_path
@staticmethod
def build_file_logger_path(workspace_directory: Path) -> str:
def build_file_logger_path(workspace_directory: Path) -> Path:
file_logger_path = workspace_directory / "file_logger.txt"
if not file_logger_path.exists():
with file_logger_path.open(mode="w", encoding="utf-8") as f:
f.write("File Operation Logger ")
return str(file_logger_path)
return file_logger_path
@staticmethod
def set_workspace_directory(
config: Config, workspace_directory: Optional[str | Path] = None
def init_workspace_directory(
config: Config, override_workspace_path: Optional[str | Path] = None
) -> Path:
if workspace_directory is None:
workspace_directory = config.workdir / "auto_gpt_workspace"
elif type(workspace_directory) == str:
workspace_directory = Path(workspace_directory)
if override_workspace_path is None:
workspace_path = config.workdir / "auto_gpt_workspace"
elif type(override_workspace_path) == str:
workspace_path = Path(override_workspace_path)
else:
workspace_path = override_workspace_path
# TODO: pass in the ai_settings file and the env file and have them cloned into
# the workspace directory so we can bind them to the agent.
return Workspace.make_workspace(workspace_directory)
return Workspace.make_workspace(workspace_path)

51
benchmarks.py Normal file
View File

@@ -0,0 +1,51 @@
from pathlib import Path
from autogpt.agents import Agent
from autogpt.app.main import run_interaction_loop
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
PROJECT_DIR = Path().resolve()
def run_task(task) -> None:
agent = bootstrap_agent(task)
run_interaction_loop(agent)
def bootstrap_agent(task):
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
config.continuous_mode = False
config.temperature = 0
config.plain_output = True
command_registry = get_command_registry(config)
config.memory_backend = "no_memory"
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",
ai_role="a multi-purpose AI assistant.",
ai_goals=[task.user_input],
)
ai_config.command_registry = command_registry
return Agent(
memory=get_memory(config),
command_registry=command_registry,
ai_config=ai_config,
config=config,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
)
def get_command_registry(config: Config):
command_registry = CommandRegistry()
enabled_command_categories = [
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
]
for command_category in enabled_command_categories:
command_registry.import_commands(command_category)
return command_registry

View File

@@ -186,7 +186,7 @@ View memory usage by using the `--debug` flag :)
## 🧠 Memory pre-seeding
!!! warning
Data ingestion is broken in v0.4.5 and possibly earlier versions. This is a known issue that will be addressed in future releases. Follow these issues for updates.
Data ingestion is broken in v0.4.6 and possibly earlier versions. This is a known issue that will be addressed in future releases. Follow these issues for updates.
[Issue 4435](https://github.com/Significant-Gravitas/Auto-GPT/issues/4435)
[Issue 4024](https://github.com/Significant-Gravitas/Auto-GPT/issues/4024)
[Issue 2076](https://github.com/Significant-Gravitas/Auto-GPT/issues/2076)

View File

@@ -4,9 +4,10 @@ Configuration is controlled through the `Config` object. You can set configurati
## Environment Variables
- `AI_SETTINGS_FILE`: Location of AI Settings file. Default: ai_settings.yaml
- `AI_SETTINGS_FILE`: Location of the AI Settings file relative to the Auto-GPT root directory. Default: ai_settings.yaml
- `AUDIO_TO_TEXT_PROVIDER`: Audio To Text Provider. Only option currently is `huggingface`. Default: huggingface
- `AUTHORISE_COMMAND_KEY`: Key response accepted when authorising commands. Default: y
- `AZURE_CONFIG_FILE`: Location of the Azure Config file relative to the Auto-GPT root directory. Default: azure.yaml
- `BROWSE_CHUNK_MAX_LENGTH`: When browsing website, define the length of chunks to summarize. Default: 3000
- `BROWSE_SPACY_LANGUAGE_MODEL`: [spaCy language model](https://spacy.io/usage/models) to use when creating chunks. Default: en_core_web_sm
- `CHAT_MESSAGES_ENABLED`: Enable chat messages. Optional
@@ -32,8 +33,8 @@ Configuration is controlled through the `Config` object. You can set configurati
- `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys).
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False
- `PLUGINS_CONFIG_FILE`: Path of plugins_config.yaml file. Default: plugins_config.yaml
- `PROMPT_SETTINGS_FILE`: Location of Prompt Settings file. Default: prompt_settings.yaml
- `PLUGINS_CONFIG_FILE`: Path of the Plugins Config file relative to the Auto-GPT root directory. Default: plugins_config.yaml
- `PROMPT_SETTINGS_FILE`: Location of the Prompt Settings file relative to the Auto-GPT root directory. Default: prompt_settings.yaml
- `REDIS_HOST`: Redis Host. Default: localhost
- `REDIS_PASSWORD`: Redis Password. Optional. Default:
- `REDIS_PORT`: Redis Port. Default: 6379

View File

@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "agpt"
version = "0.4.5"
version = "0.4.6"
authors = [
{ name="Torantulino", email="support@agpt.co" },
]

View File

@@ -52,6 +52,8 @@ def config(
if not os.environ.get("OPENAI_API_KEY"):
os.environ["OPENAI_API_KEY"] = "sk-dummy"
config.workspace_path = workspace.root
# HACK: this is necessary to ensure PLAIN_OUTPUT takes effect
logger.config = config
@@ -84,7 +86,7 @@ def api_manager() -> ApiManager:
@pytest.fixture
def agent(config: Config, workspace: Workspace) -> Agent:
def agent(config: Config) -> Agent:
ai_config = AIConfig(
ai_name="Base",
ai_role="A base AI",

View File

@@ -4,7 +4,6 @@ from autogpt.agents import Agent
from autogpt.config import AIConfig, Config
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.workspace import Workspace
@pytest.fixture
@@ -20,7 +19,7 @@ def memory_json_file(config: Config):
@pytest.fixture
def dummy_agent(config: Config, memory_json_file, workspace: Workspace):
def dummy_agent(config: Config, memory_json_file):
command_registry = CommandRegistry()
ai_config = AIConfig(