deleted swifty agent

This commit is contained in:
SwiftyOS
2023-09-16 18:24:04 +02:00
parent dd2078a6f5
commit 03abb32cca
46 changed files with 0 additions and 7323 deletions

View File

@@ -1,11 +0,0 @@
# Your OpenAI API Key. If GPT-4 is available it will use that, otherwise will use 3.5-turbo
OPENAI_API_KEY=abc
# If you want to enable Helicone proxy and caching
HELICONE_KEY=abc
OPENAI_API_BASE=https://oai.hconeai.com/v1
# Control log level
LOG_LEVEL=INFO
DATABASE_STRING="sqlite:///agent.db"
PORT=8000
AGENT_WORKSPACE="agbenchmark_config/workspace"

View File

@@ -1,15 +0,0 @@
[flake8]
max-line-length = 88
select = "E303, W293, W292, E305, E231, E302"
exclude =
.tox,
__pycache__,
*.pyc,
.env
venv*/*,
.venv/*,
reports/*,
dist/*,
agent/*,
code,
agbenchmark/challenges/*

View File

@@ -1,174 +0,0 @@
## Original ignores
autogpt/keys.py
autogpt/*.json
**/auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
ai_settings.yaml
last_run_ai_settings.yaml
.vscode
.idea/*
auto-gpt.json
log.txt
log-ingestion.txt
logs
*.log
*.mp3
mem.sqlite3
venvAutoGPT
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
plugins/
plugins_config.yaml
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
site/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.direnv/
.env
.venv
env/
venv*/
ENV/
env.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
llama-*
vicuna-*
# mac
.DS_Store
openai/
# news
CURRENT_BULLETIN.md
agbenchmark_config/workspace
agbenchmark_config/reports
*.sqlite
.agbench
.agbenchmark
.benchmarks
.mypy_cache
.pytest_cache
.vscode
ig_*
agent.db

View File

@@ -1,43 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
args: ['--maxkb=500']
- id: check-byte-order-marker
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
language_version: python3.11
- repo: https://github.com/psf/black
rev: 23.3.0
hooks:
- id: black
language_version: python3.11
# - repo: https://github.com/pre-commit/mirrors-mypy
# rev: 'v1.3.0'
# hooks:
# - id: mypy
- repo: local
hooks:
- id: autoflake
name: autoflake
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring forge/autogpt
language: python
types: [ python ]
# Mono repo has bronken this TODO: fix
# - id: pytest-check
# name: pytest-check
# entry: pytest
# language: system
# pass_filenames: false
# always_run: true

View File

@@ -1,40 +0,0 @@
# Use an official Python runtime as a parent image
FROM python:3.11-slim-buster as base
# Set work directory in the container
WORKDIR /app
# Install system dependencies
RUN apt-get update \
&& apt-get install -y build-essential curl ffmpeg \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Poetry - respects $POETRY_VERSION & $POETRY_HOME
ENV POETRY_VERSION=1.1.8 \
POETRY_HOME="/opt/poetry" \
POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_CREATE=false \
PATH="$POETRY_HOME/bin:$PATH"
RUN pip3 install poetry
COPY pyproject.toml poetry.lock* /app/
# Project initialization:
RUN poetry install --no-interaction --no-ansi
ENV PYTHONPATH="/app:$PYTHONPATH"
FROM base as dependencies
# Copy project
COPY . /app
# Make port 80 available to the world outside this container
EXPOSE 8000
# Run the application when the container launches
CMD ["poetry", "run", "python", "autogpt/__main__.py"]

View File

@@ -1,66 +0,0 @@
# 🚀 **Auto-GPT-Forge**: Build Your Own Auto-GPT Agent! 🧠
## (Release date: very soon)
### 🌌 Dive into the Universe of Auto-GPT Creation! 🌌
Ever dreamt of becoming the genius behind an AI agent? Dive into the *Forge*, where **you** become the creator!
---
### 🛠️ **Why Auto-GPT-Forge?**
- 💤 **No More Boilerplate!** Don't let the mundane tasks stop you. Fork and build without the headache of starting from scratch!
- 🧠 **Brain-centric Development!** All the tools you need so you can spend 100% of your time on what matters - crafting the brain of your AI!
- 🛠️ **Tooling ecosystem!** We work with the best in class tools to bring you the best experience possible!
---
### 🚀 **Get Started!**
Intial setup:
1. **[Fork the Project](https://github.com/Significant-Gravitas/Auto-GPT)**
2. Clone your repo
3. run `create_new_agent.sh name` changing name to the name you want to give your agent
4. `cd autogpts/name` where name is the name you entered above
5. Install [Poetry](https://python-poetry.org/docs/#installation) if you haven't already
6. Run `poetry install` to install the project dependencies
7. Activate the virtual environment with `poetry shell`
---
### 🏃‍♂️ **Running Your Agent**
1. Make sure you're in the poetry shell. If not, activate it with `poetry shell`.
2. Copy the example environment file with `cp .env.example .env`.
3. Open the `.env` file and add your OpenAI API key. You can get it from [OpenAI API](https://platform.openai.com/docs/developer-quickstart/).
4. Run your agent with `./run`. This command runs the server and watches for changes.
### 📊 **Benchmarking**
To run the benchmark, use the `agbenchmark start` command. Here are some options you can use with this command:
- `--backend`: If it's being run from the cli
- `-c, --category TEXT`: Specific category to run
- `-s, --skip-category TEXT`: Skips preventing the tests from this category from running
- `--test TEXT`: Specific test to run
- `--maintain`: Runs only regression tests
- `--improve`: Run only non-regression tests
- `--explore`: Only attempt challenges that have never been beaten
- `--mock`: Run with mock
- `--no_dep`: Run without dependencies
- `--nc`: Run without cutoff
- `--keep-answers`: Keep answers
- `--cutoff TEXT`: Set or override tests cutoff (seconds)
- `--help`: Show this message and exit.
For example, if you want to run a specific test, you can use the `--test` option like this:
`agbenchmark start --test your_test_name`
If you want to run the benchmark without dependencies, you can use the `--no_dep` option like this:
`agbenchmark start --no_dep`
You can combine multiple options as well. For example, to run a specific test without dependencies, you can do:
`agbenchmark start --test your_test_name --no_dep`
Remember to replace `your_test_name` with the name of the test you want to run.

View File

@@ -1,4 +0,0 @@
{
"workspace": {"input": "agbenchmark_config/workspace", "output": "agbenchmark_config/workspace"},
"host": "http://localhost:8000"
}

View File

@@ -1,53 +0,0 @@
import os
from dotenv import load_dotenv
load_dotenv()
import forge.sdk.forge_log
forge.sdk.forge_log.setup_logger()
LOG = forge.sdk.forge_log.ForgeLogger(__name__)
logo = """\n\n
d8888 888 .d8888b. 8888888b. 88888888888
d88888 888 d88P Y88b 888 Y88b 888
d88P888 888 888 888 888 888 888
d88P 888 888 888 888888 .d88b. 888 888 d88P 888
d88P 888 888 888 888 d88""88b 888 88888 8888888P" 888
d88P 888 888 888 888 888 888 888 888 888 888
d8888888888 Y88b 888 Y88b. Y88..88P Y88b d88P 888 888
d88P 888 "Y88888 "Y888 "Y88P" "Y8888P88 888 888
8888888888
888
888
8888888 .d88b. 888d888 .d88b. .d88b.
888 d88""88b 888P" d88P"88b d8P Y8b
888 888 888 888 888 888 88888888
888 Y88..88P 888 Y88b 888 Y8b.
888 "Y88P" 888 "Y88888 "Y8888
888
Y8b d88P
"Y88P" v0.1.0
\n"""
if __name__ == "__main__":
"""Runs the agent server"""
# modules are imported here so that logging is setup first
import forge.agent
import forge.sdk.db
from forge.sdk.workspace import LocalWorkspace
print(logo)
database_name = os.getenv("DATABASE_STRING")
workspace = LocalWorkspace(os.getenv("AGENT_WORKSPACE"))
port = os.getenv("PORT")
database = forge.sdk.db.AgentDB(database_name, debug_enabled=True)
agent = forge.agent.ForgeAgent(database=database, workspace=workspace)
agent.start(port=port)

View File

@@ -1,106 +0,0 @@
from forge.sdk import Agent, AgentDB, Step, StepRequestBody, Workspace
class ForgeAgent(Agent):
"""
The goal of the Forge is to take care of the boilerplate code so you can focus on
agent design.
There is a great paper surveying the agent landscape: https://arxiv.org/abs/2308.11432
Which I would highly recommend reading as it will help you understand the possabilities.
Here is a summary of the key components of an agent:
Anatomy of an agent:
- Profile
- Memory
- Planning
- Action
Profile:
Agents typically perform a task by assuming specific roles. For example, a teacher,
a coder, a planner etc. In using the profile in the llm prompt it has been shown to
improve the quality of the output. https://arxiv.org/abs/2305.14688
Additionally baed on the profile selected, the agent could be configured to use a
different llm. The possabilities are endless and the profile can be selected selected
dynamically based on the task at hand.
Memory:
Memory is critical for the agent to acculmulate experiences, self-evolve, and behave
in a more consistent, reasonable, and effective manner. There are many approaches to
memory. However, some thoughts: there is long term and short term or working memory.
You may want different approaches for each. There has also been work exploring the
idea of memory reflection, which is the ability to assess its memories and re-evaluate
them. For example, condensting short term memories into long term memories.
Planning:
When humans face a complex task, they first break it down into simple subtasks and then
solve each subtask one by one. The planning module empowers LLM-based agents with the ability
to think and plan for solving complex tasks, which makes the agent more comprehensive,
powerful, and reliable. The two key methods to consider are: Planning with feedback and planning
without feedback.
Action:
Actions translate the agents decisions into specific outcomes. For example, if the agent
decides to write a file, the action would be to write the file. There are many approaches you
could implement actions.
The Forge has a basic module for each of these areas. However, you are free to implement your own.
This is just a starting point.
"""
def __init__(self, database: AgentDB, workspace: Workspace):
"""
The database is used to store tasks, steps and artifact metadata. The workspace is used to
store artifacts. The workspace is a directory on the file system.
Feel free to create subclasses of the database and workspace to implement your own storage
"""
super().__init__(database, workspace)
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
"""
The agent protocol, which is the core of the Forge, works by creating a task and then
executing steps for that task. This method is called when the agent is asked to execute
a step.
The task that is created contains an input string, for the bechmarks this is the task
the agent has been asked to solve and additional input, which is a dictionary and
could contain anything.
If you want to get the task use:
```
task = await self.db.get_task(task_id)
```
The step request body is essentailly the same as the task request and contains an input
string, for the bechmarks this is the task the agent has been asked to solve and
additional input, which is a dictionary and could contain anything.
You need to implement logic that will take in this step input and output the completed step
as a step object. You can do everything in a single step or you can break it down into
multiple steps. Returning a request to continue in the step output, the user can then decide
if they want the agent to continue or not.
"""
# An example that
self.workspace.write(task_id=task_id, path="output.txt", data=b"Washington D.C")
step = await self.db.create_step(
task_id=task_id, input=step_request, is_last=True
)
artifact = await self.db.create_artifact(
task_id=task_id,
step_id=step.step_id,
file_name="output.txt",
relative_path="",
agent_created=True,
)
step.output = "Washington D.C"
return step

View File

@@ -1,9 +0,0 @@
{% extends "techniques/expert.j2" %}
{% block expert %}Human Resources{% endblock %}
{% block prompt %}
Generate a profile for an expert who can help with the task '{{ task }}'. Please provide the following details:
Name: Enter the expert's name
Expertise: Specify the area in which the expert specializes
Goals: List 4 goals that the expert aims to achieve in order to help with the task
Assessment: Describe how the expert will assess whether they have successfully completed the task
{% endblock %}

View File

@@ -1,2 +0,0 @@
{% block prompt %} {% endblock %}
Let's work this out in a step by step way to be sure we have the right answer.

View File

@@ -1 +0,0 @@
Answer as an expert in {% block expert %} {% endblock %}. {% block prompt %}{% endblock %}

View File

@@ -1,5 +0,0 @@
{% block prompt %} {% endblock %}
Examples:
{% for example in examples %}
- {{ example }}
{% endfor %}

View File

@@ -1,25 +0,0 @@
"""
The Forge SDK. This is the core of the Forge. It contains the agent protocol, which is the
core of the Forge.
"""
from .agent import Agent
from .db import AgentDB
from .forge_log import ForgeLogger
from .prompting import PromptEngine
from .schema import (
Artifact,
ArtifactUpload,
Pagination,
Status,
Step,
StepInput,
StepOutput,
StepRequestBody,
Task,
TaskArtifactsListResponse,
TaskInput,
TaskListResponse,
TaskRequestBody,
TaskStepsListResponse,
)
from .workspace import LocalWorkspace, Workspace

View File

@@ -1,35 +0,0 @@
from typing import List
from ..registry import ability
@ability(
name="list_files",
description="List files in a directory",
parameters=[
{
"name": "path",
"description": "Path to the directory",
"type": "string",
"required": True,
},
{
"name": "recursive",
"description": "Recursively list files",
"type": "boolean",
"required": False,
},
],
output_type="list[str]",
)
def list_files(agent, path: str, recursive: bool = False) -> List[str]:
"""
List files in a directory
"""
import glob
import os
if recursive:
return glob.glob(os.path.join(path, "**"), recursive=True)
else:
return os.listdir(path)

View File

@@ -1,187 +0,0 @@
import glob
import importlib
import inspect
import os
from typing import Any, Callable, List
import pydantic
class AbilityParameter(pydantic.BaseModel):
"""
This class represents a parameter for an ability.
Attributes:
name (str): The name of the parameter.
description (str): A brief description of what the parameter does.
type (str): The type of the parameter.
required (bool): A flag indicating whether the parameter is required or optional.
"""
name: str
description: str
type: str
required: bool
class Ability(pydantic.BaseModel):
"""
This class represents an ability in the system.
Attributes:
name (str): The name of the ability.
description (str): A brief description of what the ability does.
method (Callable): The method that implements the ability.
parameters (List[AbilityParameter]): A list of parameters that the ability requires.
output_type (str): The type of the output that the ability returns.
"""
name: str
description: str
method: Callable
parameters: List[AbilityParameter]
output_type: str
category: str | None = None
def __call__(self, *args: Any, **kwds: Any) -> Any:
"""
This method allows the class instance to be called as a function.
Args:
*args: Variable length argument list.
**kwds: Arbitrary keyword arguments.
Returns:
Any: The result of the method call.
"""
return self.method(*args, **kwds)
def __str__(self) -> str:
"""
This method returns a string representation of the class instance.
Returns:
str: A string representation of the class instance.
"""
func_summary = f"{self.name}("
for param in self.parameters:
func_summary += f"{param.name}: {param.type}, "
func_summary = func_summary[:-2] + ")"
func_summary += f" -> {self.output_type}. Usage: {self.description},"
return func_summary
def ability(
name: str, description: str, parameters: List[AbilityParameter], output_type: str
):
def decorator(func):
func_params = inspect.signature(func).parameters
param_names = set(
[AbilityParameter.parse_obj(param).name for param in parameters]
)
param_names.add("agent")
func_param_names = set(func_params.keys())
if param_names != func_param_names:
raise ValueError(
f"Mismatch in parameter names. Ability Annotation includes {param_names}, but function acatually takes {func_param_names} in function {func.__name__} signature"
)
func.ability = Ability(
name=name,
description=description,
parameters=parameters,
method=func,
output_type=output_type,
)
return func
return decorator
class AbilityRegister:
def __init__(self) -> None:
self.abilities = {}
self.register_abilities()
def register_abilities(self) -> None:
print(os.path.join(os.path.dirname(__file__), "*.py"))
for ability_path in glob.glob(
os.path.join(os.path.dirname(__file__), "**/*.py"), recursive=True
):
if not os.path.basename(ability_path) in [
"__init__.py",
"registry.py",
]:
ability = os.path.relpath(
ability_path, os.path.dirname(__file__)
).replace("/", ".")
try:
module = importlib.import_module(
f".{ability[:-3]}", package="autogpt.sdk.abilities"
)
for attr in dir(module):
func = getattr(module, attr)
if hasattr(func, "ability"):
ab = func.ability
ab.category = (
ability.split(".")[0].lower().replace("_", " ")
if len(ability.split(".")) > 1
else "general"
)
self.abilities[func.ability.name] = func.ability
except Exception as e:
print(f"Error occurred while registering abilities: {str(e)}")
def list_abilities(self) -> List[Ability]:
return self.abilities
def abilities_description(self) -> str:
abilities_by_category = {}
for ability in self.abilities.values():
if ability.category not in abilities_by_category:
abilities_by_category[ability.category] = []
abilities_by_category[ability.category].append(str(ability))
abilities_description = ""
for category, abilities in abilities_by_category.items():
if abilities_description != "":
abilities_description += "\n"
abilities_description += f"{category}:"
for ability in abilities:
abilities_description += f" {ability}"
return abilities_description
def run_ability(self, agent, ability_name: str, *args: Any, **kwds: Any) -> Any:
"""
This method runs a specified ability with the provided arguments and keyword arguments.
The agent is passed as the first argument to the ability. This allows the ability to access and manipulate
the agent's state as needed.
Args:
agent: The agent instance.
ability_name (str): The name of the ability to run.
*args: Variable length argument list.
**kwds: Arbitrary keyword arguments.
Returns:
Any: The result of the ability execution.
Raises:
Exception: If there is an error in running the ability.
"""
try:
ability = self.abilities[ability_name]
return ability(agent, *args, **kwds)
except Exception:
raise
if __name__ == "__main__":
import sys
sys.path.append("/Users/swifty/dev/forge/forge")
register = AbilityRegister()
print(register.abilities_description())
print(register.run_ability(None, "list_files", "/Users/swifty/dev/forge/forge"))

View File

@@ -1,191 +0,0 @@
import asyncio
import os
from uuid import uuid4
from fastapi import APIRouter, FastAPI, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from hypercorn.asyncio import serve
from hypercorn.config import Config
from .db import AgentDB
from .errors import NotFoundError
from .forge_log import ForgeLogger
from .middlewares import AgentMiddleware
from .routes.agent_protocol import base_router
from .schema import *
from .workspace import Workspace
LOG = ForgeLogger(__name__)
class Agent:
def __init__(self, database: AgentDB, workspace: Workspace):
self.db = database
self.workspace = workspace
def start(self, port: int = 8000, router: APIRouter = base_router):
"""
Start the agent server.
"""
config = Config()
config.bind = [f"localhost:{port}"]
app = FastAPI(
title="Auto-GPT Forge",
description="Modified version of The Agent Protocol.",
version="v0.4",
)
# Add CORS middleware
origins = [
"http://localhost:5000",
"http://127.0.0.1:5000",
# Add any other origins you want to whitelist
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(router)
app.add_middleware(AgentMiddleware, agent=self)
config.loglevel = "ERROR"
config.bind = [f"0.0.0.0:{port}"]
LOG.info(f"Agent server starting on http://{config.bind[0]}")
asyncio.run(serve(app, config))
async def create_task(self, task_request: TaskRequestBody) -> Task:
"""
Create a task for the agent.
"""
try:
task = await self.db.create_task(
input=task_request.input,
additional_input=task_request.additional_input,
)
return task
except Exception as e:
raise
async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse:
"""
List all tasks that the agent has created.
"""
try:
tasks, pagination = await self.db.list_tasks(page, pageSize)
response = TaskListResponse(tasks=tasks, pagination=pagination)
return response
except Exception as e:
raise
async def get_task(self, task_id: str) -> Task:
"""
Get a task by ID.
"""
try:
task = await self.db.get_task(task_id)
except Exception as e:
raise
return task
async def list_steps(
self, task_id: str, page: int = 1, pageSize: int = 10
) -> TaskStepsListResponse:
"""
List the IDs of all steps that the task has created.
"""
try:
steps, pagination = await self.db.list_steps(task_id, page, pageSize)
response = TaskStepsListResponse(steps=steps, pagination=pagination)
return response
except Exception as e:
raise
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
"""
Create a step for the task.
"""
raise NotImplementedError
async def get_step(self, task_id: str, step_id: str) -> Step:
"""
Get a step by ID.
"""
try:
step = await self.db.get_step(task_id, step_id)
return step
except Exception as e:
raise
async def list_artifacts(
self, task_id: str, page: int = 1, pageSize: int = 10
) -> TaskArtifactsListResponse:
"""
List the artifacts that the task has created.
"""
try:
artifacts, pagination = await self.db.list_artifacts(
task_id, page, pageSize
)
return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination)
except Exception as e:
raise
async def create_artifact(
self, task_id: str, file: UploadFile, relative_path: str
) -> Artifact:
"""
Create an artifact for the task.
"""
data = None
file_name = file.filename or str(uuid4())
try:
data = b""
while contents := file.file.read(1024 * 1024):
data += contents
# Check if relative path ends with filename
if relative_path.endswith(file_name):
file_path = relative_path
else:
file_path = os.path.join(relative_path, file_name)
self.workspace.write(task_id, file_path, data)
artifact = await self.db.create_artifact(
task_id=task_id,
file_name=file_name,
relative_path=relative_path,
agent_created=False,
)
except Exception as e:
raise
return artifact
async def get_artifact(self, task_id: str, artifact_id: str) -> Artifact:
"""
Get an artifact by ID.
"""
try:
artifact = await self.db.get_artifact(artifact_id)
file_path = os.path.join(artifact.relative_path, artifact.file_name)
retrieved_artifact = self.workspace.read(task_id=task_id, path=file_path)
path = artifact.file_name
with open(path, "wb") as f:
f.write(retrieved_artifact)
except NotFoundError as e:
raise
except FileNotFoundError as e:
raise
except Exception as e:
raise
return FileResponse(
# Note: mimetype is guessed in the FileResponse constructor
path=path,
filename=artifact.file_name,
)

View File

@@ -1,107 +0,0 @@
import pytest
from .agent import Agent
from .db import AgentDB
from .schema import StepRequestBody, Task, TaskListResponse, TaskRequestBody
from .workspace import LocalWorkspace
@pytest.fixture
def agent():
db = AgentDB("sqlite:///test.db")
workspace = LocalWorkspace("./test_workspace")
return Agent(db, workspace)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_create_task(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task: Task = await agent.create_task(task_request)
assert task.input == "test_input"
@pytest.mark.skip
@pytest.mark.asyncio
async def test_list_tasks(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
tasks = await agent.list_tasks()
assert isinstance(tasks, TaskListResponse)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_get_task(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
retrieved_task = await agent.get_task(task.task_id)
assert retrieved_task.task_id == task.task_id
@pytest.mark.skip
@pytest.mark.asyncio
async def test_create_and_execute_step(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
step_request = StepRequestBody(
input="step_input", additional_input={"input": "additional_test_input"}
)
step = await agent.create_and_execute_step(task.task_id, step_request)
assert step.input == "step_input"
assert step.additional_input == {"input": "additional_test_input"}
@pytest.mark.skip
@pytest.mark.asyncio
async def test_get_step(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
step_request = StepRequestBody(
input="step_input", additional_input={"input": "additional_test_input"}
)
step = await agent.create_and_execute_step(task.task_id, step_request)
retrieved_step = await agent.get_step(task.task_id, step.step_id)
assert retrieved_step.step_id == step.step_id
@pytest.mark.skip
@pytest.mark.asyncio
async def test_list_artifacts(agent):
artifacts = await agent.list_artifacts()
assert isinstance(artifacts, list)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_create_artifact(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
artifact_request = ArtifactRequestBody(file=None, uri="test_uri")
artifact = await agent.create_artifact(task.task_id, artifact_request)
assert artifact.uri == "test_uri"
@pytest.mark.skip
@pytest.mark.asyncio
async def test_get_artifact(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
artifact_request = ArtifactRequestBody(file=None, uri="test_uri")
artifact = await agent.create_artifact(task.task_id, artifact_request)
retrieved_artifact = await agent.get_artifact(task.task_id, artifact.artifact_id)
assert retrieved_artifact.artifact_id == artifact.artifact_id

View File

@@ -1,25 +0,0 @@
"""
PROFILE CONCEPT:
The profile generator is used to intiliase and configure an ai agent.
It came from the obsivation that if an llm is provided with a profile such as:
```
Expert:
```
Then it's performance at a task can impove. Here we use the profile to generate
a system prompt for the agent to use. However, it can be used to configure other
aspects of the agent such as memory, planning, and actions available.
The possibilities are limited just by your imagination.
"""
from forge.sdk import PromptEngine
class ProfileGenerator:
def __init__(self, task: str, PromptEngine: PromptEngine):
"""
Initialize the profile generator with the task to be performed.
"""
self.task = task

View File

@@ -1,468 +0,0 @@
"""
This is an example implementation of the Agent Protocol DB for development Purposes
It uses SQLite as the database and file store backend.
IT IS NOT ADVISED TO USE THIS IN PRODUCTION!
"""
import datetime
import math
import uuid
from typing import Any, Dict, List, Optional, Tuple
from sqlalchemy import (
JSON,
Boolean,
Column,
DateTime,
ForeignKey,
String,
create_engine,
)
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import DeclarativeBase, joinedload, relationship, sessionmaker
from .errors import NotFoundError
from .forge_log import ForgeLogger
from .schema import Artifact, Pagination, Status, Step, StepRequestBody, Task, TaskInput
LOG = ForgeLogger(__name__)
class Base(DeclarativeBase):
pass
class TaskModel(Base):
__tablename__ = "tasks"
task_id = Column(String, primary_key=True, index=True)
input = Column(String)
additional_input = Column(JSON)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
modified_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
artifacts = relationship("ArtifactModel", back_populates="task")
class StepModel(Base):
__tablename__ = "steps"
step_id = Column(String, primary_key=True, index=True)
task_id = Column(String, ForeignKey("tasks.task_id"))
name = Column(String)
input = Column(String)
status = Column(String)
is_last = Column(Boolean, default=False)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
modified_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
additional_input = Column(JSON)
artifacts = relationship("ArtifactModel", back_populates="step")
class ArtifactModel(Base):
__tablename__ = "artifacts"
artifact_id = Column(String, primary_key=True, index=True)
task_id = Column(String, ForeignKey("tasks.task_id"))
step_id = Column(String, ForeignKey("steps.step_id"))
agent_created = Column(Boolean, default=False)
file_name = Column(String)
relative_path = Column(String)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
modified_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
step = relationship("StepModel", back_populates="artifacts")
task = relationship("TaskModel", back_populates="artifacts")
def convert_to_task(task_obj: TaskModel, debug_enabled: bool = False) -> Task:
if debug_enabled:
LOG.debug(f"Converting TaskModel to Task for task_id: {task_obj.task_id}")
task_artifacts = [convert_to_artifact(artifact) for artifact in task_obj.artifacts]
return Task(
task_id=task_obj.task_id,
created_at=task_obj.created_at,
modified_at=task_obj.modified_at,
input=task_obj.input,
additional_input=task_obj.additional_input,
artifacts=task_artifacts,
)
def convert_to_step(step_model: StepModel, debug_enabled: bool = False) -> Step:
if debug_enabled:
LOG.debug(f"Converting StepModel to Step for step_id: {step_model.step_id}")
step_artifacts = [
convert_to_artifact(artifact) for artifact in step_model.artifacts
]
status = Status.completed if step_model.status == "completed" else Status.created
return Step(
task_id=step_model.task_id,
step_id=step_model.step_id,
created_at=step_model.created_at,
modified_at=step_model.modified_at,
name=step_model.name,
input=step_model.input,
status=status,
artifacts=step_artifacts,
is_last=step_model.is_last == 1,
additional_input=step_model.additional_input,
)
def convert_to_artifact(artifact_model: ArtifactModel) -> Artifact:
return Artifact(
artifact_id=artifact_model.artifact_id,
created_at=artifact_model.created_at,
modified_at=artifact_model.modified_at,
agent_created=artifact_model.agent_created,
relative_path=artifact_model.relative_path,
file_name=artifact_model.file_name,
)
# sqlite:///{database_name}
class AgentDB:
def __init__(self, database_string, debug_enabled: bool = False) -> None:
super().__init__()
self.debug_enabled = debug_enabled
if self.debug_enabled:
LOG.debug(f"Initializing AgentDB with database_string: {database_string}")
self.engine = create_engine(database_string)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
async def create_task(
self, input: Optional[str], additional_input: Optional[TaskInput] = {}
) -> Task:
if self.debug_enabled:
LOG.debug("Creating new task")
try:
with self.Session() as session:
new_task = TaskModel(
task_id=str(uuid.uuid4()),
input=input,
additional_input=additional_input.json()
if additional_input
else {},
)
session.add(new_task)
session.commit()
session.refresh(new_task)
if self.debug_enabled:
LOG.debug(f"Created new task with task_id: {new_task.task_id}")
return convert_to_task(new_task, self.debug_enabled)
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while creating task: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while creating task: {e}")
raise
async def create_step(
self,
task_id: str,
input: StepRequestBody,
is_last: bool = False,
additional_input: Optional[Dict[str, Any]] = {},
) -> Step:
if self.debug_enabled:
LOG.debug(f"Creating new step for task_id: {task_id}")
try:
with self.Session() as session:
new_step = StepModel(
task_id=task_id,
step_id=str(uuid.uuid4()),
name=input.input,
input=input.input,
status="created",
is_last=is_last,
additional_input=additional_input,
)
session.add(new_step)
session.commit()
session.refresh(new_step)
if self.debug_enabled:
LOG.debug(f"Created new step with step_id: {new_step.step_id}")
return convert_to_step(new_step, self.debug_enabled)
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while creating step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while creating step: {e}")
raise
async def create_artifact(
self,
task_id: str,
file_name: str,
relative_path: str,
agent_created: bool = False,
step_id: str | None = None,
) -> Artifact:
if self.debug_enabled:
LOG.debug(f"Creating new artifact for task_id: {task_id}")
try:
with self.Session() as session:
if (
existing_artifact := session.query(ArtifactModel)
.filter_by(
task_id=task_id,
file_name=file_name,
relative_path=relative_path,
)
.first()
):
session.close()
if self.debug_enabled:
LOG.debug(
f"Artifact already exists with relative_path: {relative_path}"
)
return convert_to_artifact(existing_artifact)
new_artifact = ArtifactModel(
artifact_id=str(uuid.uuid4()),
task_id=task_id,
step_id=step_id,
agent_created=agent_created,
file_name=file_name,
relative_path=relative_path,
)
session.add(new_artifact)
session.commit()
session.refresh(new_artifact)
if self.debug_enabled:
LOG.debug(
f"Created new artifact with artifact_id: {new_artifact.artifact_id}"
)
return convert_to_artifact(new_artifact)
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while creating step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while creating step: {e}")
raise
async def get_task(self, task_id: int) -> Task:
"""Get a task by its id"""
if self.debug_enabled:
LOG.debug(f"Getting task with task_id: {task_id}")
try:
with self.Session() as session:
if task_obj := (
session.query(TaskModel)
.options(joinedload(TaskModel.artifacts))
.filter_by(task_id=task_id)
.first()
):
return convert_to_task(task_obj, self.debug_enabled)
else:
LOG.error(f"Task not found with task_id: {task_id}")
raise NotFoundError("Task not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting task: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting task: {e}")
raise
async def get_step(self, task_id: int, step_id: int) -> Step:
if self.debug_enabled:
LOG.debug(f"Getting step with task_id: {task_id} and step_id: {step_id}")
try:
with self.Session() as session:
if step := (
session.query(StepModel)
.options(joinedload(StepModel.artifacts))
.filter(StepModel.step_id == step_id)
.first()
):
return convert_to_step(step, self.debug_enabled)
else:
LOG.error(
f"Step not found with task_id: {task_id} and step_id: {step_id}"
)
raise NotFoundError("Step not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting step: {e}")
raise
async def update_step(
self,
task_id: str,
step_id: str,
status: str,
additional_input: Optional[Dict[str, Any]] = {},
) -> Step:
if self.debug_enabled:
LOG.debug(f"Updating step with task_id: {task_id} and step_id: {step_id}")
try:
with self.Session() as session:
if (
step := session.query(StepModel)
.filter_by(task_id=task_id, step_id=step_id)
.first()
):
step.status = status
step.additional_input = additional_input
session.commit()
return await self.get_step(task_id, step_id)
else:
LOG.error(
f"Step not found for update with task_id: {task_id} and step_id: {step_id}"
)
raise NotFoundError("Step not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting step: {e}")
raise
async def get_artifact(self, artifact_id: str) -> Artifact:
if self.debug_enabled:
LOG.debug(f"Getting artifact with and artifact_id: {artifact_id}")
try:
with self.Session() as session:
if (
artifact_model := session.query(ArtifactModel)
.filter_by(artifact_id=artifact_id)
.first()
):
return convert_to_artifact(artifact_model)
else:
LOG.error(f"Artifact not found with and artifact_id: {artifact_id}")
raise NotFoundError("Artifact not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting artifact: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting artifact: {e}")
raise
async def list_tasks(
self, page: int = 1, per_page: int = 10
) -> Tuple[List[Task], Pagination]:
if self.debug_enabled:
LOG.debug("Listing tasks")
try:
with self.Session() as session:
tasks = (
session.query(TaskModel)
.offset((page - 1) * per_page)
.limit(per_page)
.all()
)
total = session.query(TaskModel).count()
pages = math.ceil(total / per_page)
pagination = Pagination(
total_items=total,
total_pages=pages,
current_page=page,
page_size=per_page,
)
return [
convert_to_task(task, self.debug_enabled) for task in tasks
], pagination
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while listing tasks: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while listing tasks: {e}")
raise
async def list_steps(
self, task_id: str, page: int = 1, per_page: int = 10
) -> Tuple[List[Step], Pagination]:
if self.debug_enabled:
LOG.debug(f"Listing steps for task_id: {task_id}")
try:
with self.Session() as session:
steps = (
session.query(StepModel)
.filter_by(task_id=task_id)
.offset((page - 1) * per_page)
.limit(per_page)
.all()
)
total = session.query(StepModel).filter_by(task_id=task_id).count()
pages = math.ceil(total / per_page)
pagination = Pagination(
total_items=total,
total_pages=pages,
current_page=page,
page_size=per_page,
)
return [
convert_to_step(step, self.debug_enabled) for step in steps
], pagination
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while listing steps: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while listing steps: {e}")
raise
async def list_artifacts(
self, task_id: str, page: int = 1, per_page: int = 10
) -> Tuple[List[Artifact], Pagination]:
if self.debug_enabled:
LOG.debug(f"Listing artifacts for task_id: {task_id}")
try:
with self.Session() as session:
artifacts = (
session.query(ArtifactModel)
.filter_by(task_id=task_id)
.offset((page - 1) * per_page)
.limit(per_page)
.all()
)
total = session.query(ArtifactModel).filter_by(task_id=task_id).count()
pages = math.ceil(total / per_page)
pagination = Pagination(
total_items=total,
total_pages=pages,
current_page=page,
page_size=per_page,
)
return [
convert_to_artifact(artifact) for artifact in artifacts
], pagination
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while listing artifacts: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while listing artifacts: {e}")
raise

View File

@@ -1,325 +0,0 @@
import os
import sqlite3
from datetime import datetime
import pytest
from forge.sdk.db import (
AgentDB,
ArtifactModel,
StepModel,
TaskModel,
convert_to_artifact,
convert_to_step,
convert_to_task,
)
from forge.sdk.errors import NotFoundError as DataNotFoundError
from forge.sdk.schema import *
@pytest.mark.asyncio
def test_table_creation():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
conn = sqlite3.connect("test_db.sqlite3")
cursor = conn.cursor()
# Test for tasks table existence
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='tasks'")
assert cursor.fetchone() is not None
# Test for steps table existence
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='steps'")
assert cursor.fetchone() is not None
# Test for artifacts table existence
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='artifacts'"
)
assert cursor.fetchone() is not None
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_task_schema():
now = datetime.now()
task = Task(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
input="Write the words you receive to the file 'output.txt'.",
created_at=now,
modified_at=now,
artifacts=[
Artifact(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
agent_created=True,
file_name="main.py",
relative_path="python/code/",
created_at=now,
modified_at=now,
)
],
)
assert task.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert task.input == "Write the words you receive to the file 'output.txt'."
assert len(task.artifacts) == 1
assert task.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
@pytest.mark.asyncio
async def test_step_schema():
now = datetime.now()
step = Step(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
step_id="6bb1801a-fd80-45e8-899a-4dd723cc602e",
created_at=now,
modified_at=now,
name="Write to file",
input="Write the words you receive to the file 'output.txt'.",
status=Status.created,
output="I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
artifacts=[
Artifact(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
file_name="main.py",
relative_path="python/code/",
created_at=now,
modified_at=now,
agent_created=True,
)
],
is_last=False,
)
assert step.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert step.step_id == "6bb1801a-fd80-45e8-899a-4dd723cc602e"
assert step.name == "Write to file"
assert step.status == Status.created
assert (
step.output
== "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>"
)
assert len(step.artifacts) == 1
assert step.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
assert step.is_last == False
@pytest.mark.asyncio
async def test_convert_to_task():
now = datetime.now()
task_model = TaskModel(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
created_at=now,
modified_at=now,
input="Write the words you receive to the file 'output.txt'.",
artifacts=[
ArtifactModel(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
created_at=now,
modified_at=now,
relative_path="file:///path/to/main.py",
agent_created=True,
file_name="main.py",
)
],
)
task = convert_to_task(task_model)
assert task.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert task.input == "Write the words you receive to the file 'output.txt'."
assert len(task.artifacts) == 1
assert task.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
@pytest.mark.asyncio
async def test_convert_to_step():
now = datetime.now()
step_model = StepModel(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
step_id="6bb1801a-fd80-45e8-899a-4dd723cc602e",
created_at=now,
modified_at=now,
name="Write to file",
status="created",
input="Write the words you receive to the file 'output.txt'.",
artifacts=[
ArtifactModel(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
created_at=now,
modified_at=now,
relative_path="file:///path/to/main.py",
agent_created=True,
file_name="main.py",
)
],
is_last=False,
)
step = convert_to_step(step_model)
assert step.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert step.step_id == "6bb1801a-fd80-45e8-899a-4dd723cc602e"
assert step.name == "Write to file"
assert step.status == Status.created
assert len(step.artifacts) == 1
assert step.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
assert step.is_last == False
@pytest.mark.asyncio
async def test_convert_to_artifact():
now = datetime.now()
artifact_model = ArtifactModel(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
created_at=now,
modified_at=now,
relative_path="file:///path/to/main.py",
agent_created=True,
file_name="main.py",
)
artifact = convert_to_artifact(artifact_model)
assert artifact.artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
assert artifact.relative_path == "file:///path/to/main.py"
assert artifact.agent_created == True
@pytest.mark.asyncio
async def test_create_task():
# Having issues with pytest fixture so added setup and teardown in each test as a rapid workaround
# TODO: Fix this!
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
task = await agent_db.create_task("task_input")
assert task.input == "task_input"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_create_and_get_task():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
task = await agent_db.create_task("test_input")
fetched_task = await agent_db.get_task(task.task_id)
assert fetched_task.input == "test_input"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_get_task_not_found():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
with pytest.raises(DataNotFoundError):
await agent_db.get_task(9999)
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_create_and_get_step():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
task = await agent_db.create_task("task_input")
step_input = StepInput(type="python/code")
request = StepRequestBody(input="test_input debug", additional_input=step_input)
step = await agent_db.create_step(task.task_id, request)
step = await agent_db.get_step(task.task_id, step.step_id)
assert step.input == "test_input debug"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_updating_step():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
created_task = await agent_db.create_task("task_input")
step_input = StepInput(type="python/code")
request = StepRequestBody(input="test_input debug", additional_input=step_input)
created_step = await agent_db.create_step(created_task.task_id, request)
await agent_db.update_step(created_task.task_id, created_step.step_id, "completed")
step = await agent_db.get_step(created_task.task_id, created_step.step_id)
assert step.status.value == "completed"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_get_step_not_found():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
with pytest.raises(DataNotFoundError):
await agent_db.get_step(9999, 9999)
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_get_artifact():
db_name = "sqlite:///test_db.sqlite3"
db = AgentDB(db_name)
# Given: A task and its corresponding artifact
task = await db.create_task("test_input debug")
step_input = StepInput(type="python/code")
requst = StepRequestBody(input="test_input debug", additional_input=step_input)
step = await db.create_step(task.task_id, requst)
# Create an artifact
artifact = await db.create_artifact(
task_id=task.task_id,
file_name="test_get_artifact_sample_file.txt",
relative_path="file:///path/to/test_get_artifact_sample_file.txt",
agent_created=True,
step_id=step.step_id,
)
# When: The artifact is fetched by its ID
fetched_artifact = await db.get_artifact(artifact.artifact_id)
# Then: The fetched artifact matches the original
assert fetched_artifact.artifact_id == artifact.artifact_id
assert (
fetched_artifact.relative_path
== "file:///path/to/test_get_artifact_sample_file.txt"
)
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_list_tasks():
db_name = "sqlite:///test_db.sqlite3"
db = AgentDB(db_name)
# Given: Multiple tasks in the database
task1 = await db.create_task("test_input_1")
task2 = await db.create_task("test_input_2")
# When: All tasks are fetched
fetched_tasks, pagination = await db.list_tasks()
# Then: The fetched tasks list includes the created tasks
task_ids = [task.task_id for task in fetched_tasks]
assert task1.task_id in task_ids
assert task2.task_id in task_ids
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_list_steps():
db_name = "sqlite:///test_db.sqlite3"
db = AgentDB(db_name)
step_input = StepInput(type="python/code")
requst = StepRequestBody(input="test_input debug", additional_input=step_input)
# Given: A task and multiple steps for that task
task = await db.create_task("test_input")
step1 = await db.create_step(task.task_id, requst)
requst = StepRequestBody(input="step two", additional_input=step_input)
step2 = await db.create_step(task.task_id, requst)
# When: All steps for the task are fetched
fetched_steps, pagination = await db.list_steps(task.task_id)
# Then: The fetched steps list includes the created steps
step_ids = [step.step_id for step in fetched_steps]
assert step1.step_id in step_ids
assert step2.step_id in step_ids
os.remove(db_name.split("///")[1])

View File

@@ -1,2 +0,0 @@
class NotFoundError(Exception):
pass

View File

@@ -1,203 +0,0 @@
import json
import logging
import logging.config
import logging.handlers
import os
import queue
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class ForgeLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": ForgeLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.DEBUG,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)

View File

@@ -1,306 +0,0 @@
import abc
import hashlib
import chromadb
from chromadb.config import Settings
class MemStore(abc.ABC):
"""
An abstract class that represents a Memory Store
"""
@abc.abstractmethod
def __init__(self, store_path: str):
"""
Initialize the MemStore with a given store path.
Args:
store_path (str): The path to the store.
"""
pass
@abc.abstractmethod
def add_task_memory(self, task_id: str, document: str, metadatas: dict) -> None:
"""
Add a document to the current tasks MemStore.
This function calls the base version with the task_id as the collection_name.
Args:
task_id (str): The ID of the task.
document (str): The document to be added.
metadatas (dict): The metadata of the document.
"""
self.add(collection_name=task_id, document=document, metadatas=metadatas)
@abc.abstractmethod
def query_task_memory(
self,
task_id: str,
query: str,
filters: dict = None,
document_search: dict = None,
) -> dict:
"""
Query the current tasks MemStore.
This function calls the base version with the task_id as the collection_name.
Args:
task_id (str): The ID of the task.
query (str): The query string.
filters (dict, optional): The filters to be applied. Defaults to None.
document_search (dict, optional): The search string. Defaults to None.
Returns:
dict: The query results.
"""
return self.query(
collection_name=task_id,
query=query,
filters=filters,
document_search=document_search,
)
@abc.abstractmethod
def get_task_memory(
self, task_id: str, doc_ids: list = None, filters: dict = None
) -> dict:
"""
Get documents from the current tasks MemStore.
This function calls the base version with the task_id as the collection_name.
Args:
task_id (str): The ID of the task.
doc_ids (list, optional): The IDs of the documents to be retrieved. Defaults to None.
filters (dict, optional): The filters to be applied. Defaults to None.
Returns:
dict: The retrieved documents.
"""
return self.get(collection_name=task_id, doc_ids=doc_ids, filters=filters)
@abc.abstractmethod
def update_task_memory(
self, task_id: str, doc_ids: list, documents: list, metadatas: list
):
"""
Update documents in the current tasks MemStore.
This function calls the base version with the task_id as the collection_name.
Args:
task_id (str): The ID of the task.
doc_ids (list): The IDs of the documents to be updated.
documents (list): The updated documents.
metadatas (list): The updated metadata.
"""
self.update(
collection_name=task_id,
doc_ids=doc_ids,
documents=documents,
metadatas=metadatas,
)
@abc.abstractmethod
def delete_task_memory(self, task_id: str, doc_id: str):
"""
Delete a document from the current tasks MemStore.
This function calls the base version with the task_id as the collection_name.
Args:
task_id (str): The ID of the task.
doc_id (str): The ID of the document to be deleted.
"""
self.delete(collection_name=task_id, doc_id=doc_id)
@abc.abstractmethod
def add(self, collection_name: str, document: str, metadatas: dict) -> None:
"""
Add a document to the current collection's MemStore.
Args:
collection_name (str): The name of the collection.
document (str): The document to be added.
metadatas (dict): The metadata of the document.
"""
pass
@abc.abstractmethod
def query(
self,
collection_name: str,
query: str,
filters: dict = None,
document_search: dict = None,
) -> dict:
pass
@abc.abstractmethod
def get(
self, collection_name: str, doc_ids: list = None, filters: dict = None
) -> dict:
pass
@abc.abstractmethod
def update(
self, collection_name: str, doc_ids: list, documents: list, metadatas: list
):
pass
@abc.abstractmethod
def delete(self, collection_name: str, doc_id: str):
pass
class ChromaMemStore(MemStore):
"""
A class used to represent a Memory Store
"""
def __init__(self, store_path: str):
"""
Initialize the MemStore with a given store path.
Args:
store_path (str): The path to the store.
"""
self.client = chromadb.PersistentClient(
path=store_path, settings=Settings(anonymized_telemetry=False)
)
def add(self, task_id: str, document: str, metadatas: dict) -> None:
"""
Add a document to the MemStore.
Args:
task_id (str): The ID of the task.
document (str): The document to be added.
metadatas (dict): The metadata of the document.
"""
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
collection = self.client.get_or_create_collection(task_id)
collection.add(documents=[document], metadatas=[metadatas], ids=[doc_id])
def query(
self,
task_id: str,
query: str,
filters: dict = None,
document_search: dict = None,
) -> dict:
"""
Query the MemStore.
Args:
task_id (str): The ID of the task.
query (str): The query string.
filters (dict, optional): The filters to be applied. Defaults to None.
search_string (str, optional): The search string. Defaults to None.
Returns:
dict: The query results.
"""
collection = self.client.get_or_create_collection(task_id)
kwargs = {
"query_texts": [query],
"n_results": 10,
}
if filters:
kwargs["where"] = filters
if document_search:
kwargs["where_document"] = document_search
return collection.query(**kwargs)
def get(self, task_id: str, doc_ids: list = None, filters: dict = None) -> dict:
"""
Get documents from the MemStore.
Args:
task_id (str): The ID of the task.
doc_ids (list, optional): The IDs of the documents to be retrieved. Defaults to None.
filters (dict, optional): The filters to be applied. Defaults to None.
Returns:
dict: The retrieved documents.
"""
collection = self.client.get_or_create_collection(task_id)
kwargs = {}
if doc_ids:
kwargs["ids"] = doc_ids
if filters:
kwargs["where"] = filters
return collection.get(**kwargs)
def update(self, task_id: str, doc_ids: list, documents: list, metadatas: list):
"""
Update documents in the MemStore.
Args:
task_id (str): The ID of the task.
doc_ids (list): The IDs of the documents to be updated.
documents (list): The updated documents.
metadatas (list): The updated metadata.
"""
collection = self.client.get_or_create_collection(task_id)
collection.update(ids=doc_ids, documents=documents, metadatas=metadatas)
def delete(self, task_id: str, doc_id: str):
"""
Delete a document from the MemStore.
Args:
task_id (str): The ID of the task.
doc_id (str): The ID of the document to be deleted.
"""
collection = self.client.get_or_create_collection(task_id)
collection.delete(ids=[doc_id])
if __name__ == "__main__":
print("#############################################")
# Initialize MemStore
mem = MemStore(".agent_mem_store")
# Test add function
task_id = "test_task"
document = "This is a another new test document."
metadatas = {"metadata": "test_metadata"}
mem.add(task_id, document, metadatas)
task_id = "test_task"
document = "The quick brown fox jumps over the lazy dog."
metadatas = {"metadata": "test_metadata"}
mem.add(task_id, document, metadatas)
task_id = "test_task"
document = "AI is a new technology that will change the world."
metadatas = {"timestamp": 1623936000}
mem.add(task_id, document, metadatas)
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
# Test query function
query = "test"
filters = {"metadata": {"$eq": "test"}}
search_string = {"$contains": "test"}
doc_ids = [doc_id]
documents = ["This is an updated test document."]
updated_metadatas = {"metadata": "updated_test_metadata"}
print("Query:")
print(mem.query(task_id, query))
# Test get function
print("Get:")
print(mem.get(task_id))
# Test update function
print("Update:")
print(mem.update(task_id, doc_ids, documents, updated_metadatas))
print("Delete:")
# Test delete function
print(mem.delete(task_id, doc_ids[0]))

View File

@@ -1,58 +0,0 @@
import hashlib
import shutil
import pytest
from forge.sdk.memory.memstore import ChromaMemStore
@pytest.fixture
def memstore():
mem = ChromaMemStore(".test_mem_store")
yield mem
shutil.rmtree(".test_mem_store")
def test_add(memstore):
task_id = "test_task"
document = "This is a test document."
metadatas = {"metadata": "test_metadata"}
memstore.add(task_id, document, metadatas)
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
assert memstore.client.get_or_create_collection(task_id).count() == 1
def test_query(memstore):
task_id = "test_task"
document = "This is a test document."
metadatas = {"metadata": "test_metadata"}
memstore.add(task_id, document, metadatas)
query = "test"
assert len(memstore.query(task_id, query)["documents"]) == 1
def test_update(memstore):
task_id = "test_task"
document = "This is a test document."
metadatas = {"metadata": "test_metadata"}
memstore.add(task_id, document, metadatas)
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
updated_document = "This is an updated test document."
updated_metadatas = {"metadata": "updated_test_metadata"}
memstore.update(task_id, [doc_id], [updated_document], [updated_metadatas])
assert memstore.get(task_id, [doc_id]) == {
"documents": [updated_document],
"metadatas": [updated_metadatas],
"embeddings": None,
"ids": [doc_id],
}
def test_delete(memstore):
task_id = "test_task"
document = "This is a test document."
metadatas = {"metadata": "test_metadata"}
memstore.add(task_id, document, metadatas)
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
memstore.delete(task_id, doc_id)
assert memstore.client.get_or_create_collection(task_id).count() == 0

View File

@@ -1,34 +0,0 @@
from fastapi import FastAPI
class AgentMiddleware:
"""
Middleware that injects the agent instance into the request scope.
"""
def __init__(self, app: FastAPI, agent: "Agent"):
"""
Args:
app: The FastAPI app - automatically injected by FastAPI.
agent: The agent instance to inject into the request scope.
Examples:
>>> from fastapi import FastAPI, Request
>>> from agent_protocol.agent import Agent
>>> from agent_protocol.middlewares import AgentMiddleware
>>> app = FastAPI()
>>> @app.get("/")
>>> async def root(request: Request):
>>> agent = request["agent"]
>>> task = agent.db.create_task("Do something.")
>>> return {"task_id": a.task_id}
>>> agent = Agent()
>>> app.add_middleware(AgentMiddleware, agent=agent)
"""
self.app = app
self.agent = agent
async def __call__(self, scope, receive, send):
scope["agent"] = self.agent
await self.app(scope, receive, send)

View File

@@ -1,115 +0,0 @@
"""
Relative to this file I will have a prompt directory its located ../prompts
In this directory there will be a techniques directory and a directory for each model - gpt-3.5-turbo gpt-4, llama-2-70B, code-llama-7B etc
Each directory will have jinga2 templates for the prompts.
prompts in the model directories can use the techniques in the techniques directory.
Write the code I'd need to load and populate the templates.
I want the following functions:
class PromptEngine:
def __init__(self, model):
pass
def load_prompt(model, prompt_name, prompt_ags) -> str:
pass
"""
import glob
import os
from difflib import get_close_matches
from typing import List
from jinja2 import Environment, FileSystemLoader
from .forge_log import ForgeLogger
LOG = ForgeLogger(__name__)
class PromptEngine:
"""
Class to handle loading and populating Jinja2 templates for prompts.
"""
def __init__(self, model: str, debug_enabled: bool = False):
"""
Initialize the PromptEngine with the specified model.
Args:
model (str): The model to use for loading prompts.
debug_enabled (bool): Enable or disable debug logging.
"""
self.model = model
self.debug_enabled = debug_enabled
if self.debug_enabled:
LOG.debug(f"Initializing PromptEngine for model: {model}")
try:
# Get the list of all model directories
models_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../prompts")
)
model_names = [
os.path.basename(os.path.normpath(d))
for d in glob.glob(os.path.join(models_dir, "*/"))
if os.path.isdir(d) and "techniques" not in d
]
self.model = self.get_closest_match(self.model, model_names)
if self.debug_enabled:
LOG.debug(f"Using the closest match model for prompts: {self.model}")
self.env = Environment(loader=FileSystemLoader(models_dir))
except Exception as e:
LOG.error(f"Error initializing Environment: {e}")
raise
@staticmethod
def get_closest_match(target: str, model_dirs: List[str]) -> str:
"""
Find the closest match to the target in the list of model directories.
Args:
target (str): The target model.
model_dirs (list): The list of available model directories.
Returns:
str: The closest match to the target.
"""
try:
matches = get_close_matches(target, model_dirs, n=1, cutoff=0.6)
LOG.warning(matches)
for m in matches:
LOG.info(m)
return matches[0]
except Exception as e:
LOG.error(f"Error finding closest match: {e}")
raise
def load_prompt(self, template: str, **kwargs) -> str:
"""
Load and populate the specified template.
Args:
template (str): The name of the template to load.
**kwargs: The arguments to populate the template with.
Returns:
str: The populated template.
"""
try:
template = os.path.join(self.model, template)
if self.debug_enabled:
LOG.debug(f"Loading template: {template}")
template = self.env.get_template(f"{template}.j2")
if self.debug_enabled:
LOG.debug(f"Rendering template: {template} with args: {kwargs}")
return template.render(**kwargs)
except Exception as e:
LOG.error(f"Error loading or rendering template: {e}")
raise

View File

@@ -1,611 +0,0 @@
"""
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
import json
from typing import Optional
from fastapi import APIRouter, Query, Request, Response, UploadFile
from fastapi.responses import FileResponse
from forge.sdk.errors import *
from forge.sdk.forge_log import ForgeLogger
from forge.sdk.schema import *
base_router = APIRouter()
LOG = ForgeLogger(__name__)
@base_router.get("/", tags=["root"])
async def root():
"""
Root endpoint that returns a welcome message.
"""
return Response(content="Welcome to the Auto-GPT Forge")
@base_router.get("/heartbeat", tags=["server"])
async def check_server_status():
"""
Check if the server is running.
"""
return Response(content="Server is running.", status_code=200)
@base_router.get("/", tags=["root"])
async def root():
"""
Root endpoint that returns a welcome message.
"""
return Response(content="Welcome to the Auto-GPT Forge")
@base_router.get("/heartbeat", tags=["server"])
async def check_server_status():
"""
Check if the server is running.
"""
return Response(content="Server is running.", status_code=200)
@base_router.post("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
}
"""
agent = request["agent"]
try:
task_request = await agent.create_task(task_request)
return Response(
content=task_request.json(),
status_code=200,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to create a task: {task_request}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get("/agent/tasks", tags=["agent"], response_model=TaskListResponse)
async def list_agent_tasks(
request: Request,
page: Optional[int] = Query(1, ge=1),
page_size: Optional[int] = Query(10, ge=1),
) -> TaskListResponse:
"""
Retrieves a paginated list of all tasks.
Args:
request (Request): FastAPI request object.
page (int, optional): The page number for pagination. Defaults to 1.
page_size (int, optional): The number of tasks per page for pagination. Defaults to 10.
Returns:
TaskListResponse: A response object containing a list of tasks and pagination details.
Example:
Request:
GET /agent/tasks?page=1&pageSize=10
Response (TaskListResponse defined in schema.py):
{
"items": [
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [],
"steps": []
},
...
],
"pagination": {
"total": 100,
"pages": 10,
"current": 1,
"pageSize": 10
}
}
"""
agent = request["agent"]
try:
tasks = await agent.list_tasks(page, page_size)
return Response(
content=tasks.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception("Error whilst trying to list tasks")
return Response(
content=json.dumps({"error": "Tasks not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception("Error whilst trying to list tasks")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str) -> Task:
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
try:
task = await agent.get_task(task_id)
return Response(
content=task.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception(f"Error whilst trying to get task: {task_id}")
return Response(
content=json.dumps({"error": "Task not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to get task: {task_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=TaskStepsListResponse
)
async def list_agent_task_steps(
request: Request,
task_id: str,
page: Optional[int] = Query(1, ge=1),
page_size: Optional[int] = Query(10, ge=1, alias="pageSize"),
) -> TaskStepsListResponse:
"""
Retrieves a paginated list of steps associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
page (int, optional): The page number for pagination. Defaults to 1.
page_size (int, optional): The number of steps per page for pagination. Defaults to 10.
Returns:
TaskStepsListResponse: A response object containing a list of steps and pagination details.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps?page=1&pageSize=10
Response (TaskStepsListResponse defined in schema.py):
{
"items": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
},
...
],
"pagination": {
"total": 100,
"pages": 10,
"current": 1,
"pageSize": 10
}
}
"""
agent = request["agent"]
try:
steps = await agent.list_steps(task_id, page, page_size)
return Response(
content=steps.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception("Error whilst trying to list steps")
return Response(
content=json.dumps({"error": "Steps not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception("Error whilst trying to list steps")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.post("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: Optional[StepRequestBody] = None
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
try:
# An empty step request represents a yes to continue command
if not step:
step = StepRequestBody(input="y")
step = await agent.execute_step(task_id, step)
return Response(
content=step.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception(f"Error whilst trying to execute a task step: {task_id}")
return Response(
content=json.dumps({"error": f"Task not found {task_id}"}),
status_code=404,
media_type="application/json",
)
except Exception as e:
LOG.exception(f"Error whilst trying to execute a task step: {task_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
try:
step = await agent.get_step(task_id, step_id)
return Response(content=step.json(), status_code=200)
except NotFoundError:
LOG.exception(f"Error whilst trying to get step: {step_id}")
return Response(
content=json.dumps({"error": "Step not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to get step: {step_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/artifacts",
tags=["agent"],
response_model=TaskArtifactsListResponse,
)
async def list_agent_task_artifacts(
request: Request,
task_id: str,
page: Optional[int] = Query(1, ge=1),
page_size: Optional[int] = Query(10, ge=1, alias="pageSize"),
) -> TaskArtifactsListResponse:
"""
Retrieves a paginated list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
page (int, optional): The page number for pagination. Defaults to 1.
page_size (int, optional): The number of items per page for pagination. Defaults to 10.
Returns:
TaskArtifactsListResponse: A response object containing a list of artifacts and pagination details.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts?page=1&pageSize=10
Response (TaskArtifactsListResponse defined in schema.py):
{
"items": [
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
],
"pagination": {
"total": 100,
"pages": 10,
"current": 1,
"pageSize": 10
}
}
"""
agent = request["agent"]
try:
artifacts: TaskArtifactsListResponse = await agent.list_artifacts(
task_id, page, page_size
)
LOG.info(f"Artifacts: {artifacts.json()}")
return artifacts
except NotFoundError:
LOG.exception("Error whilst trying to list artifacts")
return Response(
content=json.dumps({"error": "Artifacts not found for task_id"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception("Error whilst trying to list artifacts")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.post(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request, task_id: str, file: UploadFile, relative_path: Optional[str] = ""
) -> Artifact:
"""
This endpoint is used to upload an artifact associated with a specific task. The artifact is provided as a file.
Args:
request (Request): The FastAPI request object.
task_id (str): The unique identifier of the task for which the artifact is being uploaded.
file (UploadFile): The file being uploaded as an artifact.
relative_path (str): The relative path for the file. This is a query parameter.
Returns:
Artifact: An object containing metadata of the uploaded artifact, including its unique identifier.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts?relative_path=my_folder/my_other_folder
File: <uploaded_file>
Response:
{
"artifact_id": "b225e278-8b4c-4f99-a696-8facf19f0e56",
"created_at": "2023-01-01T00:00:00Z",
"modified_at": "2023-01-01T00:00:00Z",
"agent_created": false,
"relative_path": "/my_folder/my_other_folder/",
"file_name": "main.py"
}
"""
agent = request["agent"]
if file is None:
return Response(
content=json.dumps({"error": "File must be specified"}),
status_code=404,
media_type="application/json",
)
try:
artifact = await agent.create_artifact(task_id, file, relative_path)
return Response(
content=artifact.json(),
status_code=200,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to upload artifact: {task_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
try:
return await agent.get_artifact(task_id, artifact_id)
except NotFoundError:
LOG.exception(f"Error whilst trying to download artifact: {task_id}")
return Response(
content=json.dumps(
{
"error": f"Artifact not found - task_id: {task_id}, artifact_id: {artifact_id}"
}
),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to download artifact: {task_id}")
return Response(
content=json.dumps(
{
"error": f"Internal server error - task_id: {task_id}, artifact_id: {artifact_id}"
}
),
status_code=500,
media_type="application/json",
)

View File

@@ -1,189 +0,0 @@
# generated by fastapi-codegen:
# filename: ../../postman/schemas/openapi.yaml
# timestamp: 2023-08-25T10:36:11+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, Field
class ArtifactUpload(BaseModel):
file: str = Field(..., description="File to upload.", format="binary")
relative_path: str = Field(
...,
description="Relative path of the artifact in the agent's workspace.",
example="python/code",
)
class Pagination(BaseModel):
total_items: int = Field(..., description="Total number of items.", example=42)
total_pages: int = Field(..., description="Total number of pages.", example=97)
current_page: int = Field(..., description="Current_page page number.", example=1)
page_size: int = Field(..., description="Number of items per page.", example=25)
class TaskInput(BaseModel):
pass
class Artifact(BaseModel):
created_at: datetime = Field(
...,
description="The creation datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
modified_at: datetime = Field(
...,
description="The modification datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
artifact_id: str = Field(
...,
description="ID of the artifact.",
example="b225e278-8b4c-4f99-a696-8facf19f0e56",
)
agent_created: bool = Field(
...,
description="Whether the artifact has been created by the agent.",
example=False,
)
relative_path: str = Field(
...,
description="Relative path of the artifact in the agents workspace.",
example="/my_folder/my_other_folder/",
)
file_name: str = Field(
...,
description="Filename of the artifact.",
example="main.py",
)
class StepInput(BaseModel):
pass
class StepOutput(BaseModel):
pass
class TaskRequestBody(BaseModel):
input: str = Field(
...,
min_length=1,
description="Input prompt for the task.",
example="Write the words you receive to the file 'output.txt'.",
)
additional_input: Optional[TaskInput] = {}
class Task(TaskRequestBody):
created_at: datetime = Field(
...,
description="The creation datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
modified_at: datetime = Field(
...,
description="The modification datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
task_id: str = Field(
...,
description="The ID of the task.",
example="50da533e-3904-4401-8a07-c49adf88b5eb",
)
artifacts: Optional[List[Artifact]] = Field(
[],
description="A list of artifacts that the task has produced.",
example=[
"7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"ab7b4091-2560-4692-a4fe-d831ea3ca7d6",
],
)
class StepRequestBody(BaseModel):
name: Optional[str] = Field(
None, description="The name of the task step.", example="Write to file"
)
input: Optional[str] = Field(
None,
min_length=1,
description="Input prompt for the step.",
example="Washington",
)
additional_input: Optional[StepInput] = {}
class Status(Enum):
created = "created"
running = "running"
completed = "completed"
class Step(StepRequestBody):
created_at: datetime = Field(
...,
description="The creation datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
modified_at: datetime = Field(
...,
description="The modification datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
task_id: str = Field(
...,
description="The ID of the task this step belongs to.",
example="50da533e-3904-4401-8a07-c49adf88b5eb",
)
step_id: str = Field(
...,
description="The ID of the task step.",
example="6bb1801a-fd80-45e8-899a-4dd723cc602e",
)
name: Optional[str] = Field(
None, description="The name of the task step.", example="Write to file"
)
status: Status = Field(
..., description="The status of the task step.", example="created"
)
output: Optional[str] = Field(
None,
description="Output of the task step.",
example="I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')",
)
additional_output: Optional[StepOutput] = {}
artifacts: Optional[List[Artifact]] = Field(
[], description="A list of artifacts that the step has produced."
)
is_last: bool = Field(
..., description="Whether this is the last step in the task.", example=True
)
class TaskListResponse(BaseModel):
tasks: Optional[List[Task]] = None
pagination: Optional[Pagination] = None
class TaskStepsListResponse(BaseModel):
steps: Optional[List[Step]] = None
pagination: Optional[Pagination] = None
class TaskArtifactsListResponse(BaseModel):
artifacts: Optional[List[Artifact]] = None
pagination: Optional[Pagination] = None

View File

@@ -1,76 +0,0 @@
import abc
import os
import typing
from pathlib import Path
class Workspace(abc.ABC):
@abc.abstractclassmethod
def __init__(self, base_path: str) -> None:
self.base_path = base_path
@abc.abstractclassmethod
def read(self, task_id: str, path: str) -> bytes:
pass
@abc.abstractclassmethod
def write(self, task_id: str, path: str, data: bytes) -> None:
pass
@abc.abstractclassmethod
def delete(
self, task_id: str, path: str, directory: bool = False, recursive: bool = False
) -> None:
pass
@abc.abstractclassmethod
def exists(self, task_id: str, path: str) -> bool:
pass
@abc.abstractclassmethod
def list(self, task_id: str, path: str) -> typing.List[str]:
pass
class LocalWorkspace(Workspace):
def __init__(self, base_path: str):
self.base_path = Path(base_path).resolve()
def _resolve_path(self, task_id: str, path: str) -> Path:
abs_path = (self.base_path / task_id / path).resolve()
if not str(abs_path).startswith(str(self.base_path)):
print("Error")
raise ValueError("Directory traversal is not allowed!")
abs_path.parent.mkdir(parents=True, exist_ok=True)
return abs_path
def read(self, task_id: str, path: str) -> bytes:
with open(self._resolve_path(task_id, path), "rb") as f:
return f.read()
def write(self, task_id: str, path: str, data: bytes) -> None:
file_path = self._resolve_path(task_id, path)
with open(file_path, "wb") as f:
f.write(data)
def delete(
self, task_id: str, path: str, directory: bool = False, recursive: bool = False
) -> None:
path = self.base_path / task_id / path
resolved_path = self._resolve_path(task_id, path)
if directory:
if recursive:
os.rmdir(resolved_path)
else:
os.removedirs(resolved_path)
else:
os.remove(resolved_path)
def exists(self, task_id: str, path: str) -> bool:
path = self.base_path / task_id / path
return self._resolve_path(task_id, path).exists()
def list(self, task_id: str, path: str) -> typing.List[str]:
path = self.base_path / task_id / path
base = self._resolve_path(task_id, path)
return [str(p.relative_to(self.base_path / task_id)) for p in base.iterdir()]

View File

@@ -1,47 +0,0 @@
import os
import pytest
# Assuming the classes are defined in a file named workspace.py
from .workspace import LocalWorkspace
# Constants
TEST_BASE_PATH = "/tmp/test_workspace"
TEST_FILE_CONTENT = b"Hello World"
TEST_TASK_ID = "1234"
# Setup and Teardown for LocalWorkspace
@pytest.fixture
def setup_local_workspace():
os.makedirs(TEST_BASE_PATH, exist_ok=True)
yield
os.system(f"rm -rf {TEST_BASE_PATH}") # Cleanup after tests
def test_local_read_write_delete_exists(setup_local_workspace):
workspace = LocalWorkspace(TEST_BASE_PATH)
# Write
workspace.write(TEST_TASK_ID, "test_file.txt", TEST_FILE_CONTENT)
# Exists
assert workspace.exists(TEST_TASK_ID, "test_file.txt")
# Read
assert workspace.read(TEST_TASK_ID, "test_file.txt") == TEST_FILE_CONTENT
# Delete
workspace.delete(TEST_TASK_ID, "test_file.txt")
assert not workspace.exists(TEST_TASK_ID, "test_file.txt")
def test_local_list(setup_local_workspace):
workspace = LocalWorkspace(TEST_BASE_PATH)
workspace.write(TEST_TASK_ID, "test1.txt", TEST_FILE_CONTENT)
workspace.write(TEST_TASK_ID, "test2.txt", TEST_FILE_CONTENT)
files = workspace.list(TEST_TASK_ID, ".")
assert set(files) == {"test1.txt", "test2.txt"}

View File

@@ -1,5 +0,0 @@
You developed a tool that could help people build agents ?
Fork this repository, integrate your tool to the forge and send us the link of your fork in the autogpt discord: https://discord.gg/autogpt (ping maintainers)
PS: make sure the way you integrate your tool allows for easy rebases from upstream.

View File

@@ -1,13 +0,0 @@
[mypy]
namespace_packages = True
follow_imports = skip
check_untyped_defs = True
disallow_untyped_defs = True
exclude = ^(agbenchmark/challenges/|agent/|venv|venv-dev)
ignore_missing_imports = True
[mypy-agbenchmark.utils.data_types.*]
ignore_errors = True
[mypy-numpy.*]
ignore_errors = True

File diff suppressed because it is too large Load Diff

View File

@@ -1,57 +0,0 @@
[tool.poetry]
name = "Auto-GPT-Forge"
version = "0.1.0"
description = ""
authors = ["Craig Swift <craigswift13@gmail.com>"]
license = "MIT"
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
python-dotenv = "^1.0.0"
openai = "^0.27.8"
tenacity = "^8.2.2"
sqlalchemy = "^2.0.19"
aiohttp = "^3.8.5"
colorlog = "^6.7.0"
chromadb = "^0.4.10"
agbenchmark = { path = "../../benchmark" }
hypercorn = "^0.14.4"
python-multipart = "^0.0.6"
toml = "^0.10.2"
[tool.poetry.group.dev.dependencies]
isort = "^5.12.0"
black = "^23.3.0"
pre-commit = "^3.3.3"
mypy = "^1.4.1"
flake8 = "^6.0.0"
types-requests = "^2.31.0.2"
pytest = "^7.4.0"
pytest-asyncio = "^0.21.1"
watchdog = "^3.0.0"
mock = "^5.1.0"
autoflake = "^2.2.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 88
target-version = ['py310']
include = '\.pyi?$'
packages = ["autogpt"]
extend-exclude = '(/dist|/.venv|/venv|/build|/agent|agbenchmark/challenges)/'
[tool.isort]
profile = "black"
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
use_parentheses = true
ensure_newline_before_comments = true
line_length = 88
sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"]
skip_glob = [".tox", "__pycache__", "*.pyc", "venv*/*", "reports", "venv", "env", "node_modules", ".env", ".venv", "dist", "agent/*", "agbenchmark/challenges/*"]

View File

@@ -1,11 +0,0 @@
#!/bin/bash
kill $(lsof -t -i :8000)
kill $(lsof -t -i :8080)
poetry install
if [ ! -f .env ]; then
cp .env.example .env
echo "Please add your api keys to the .env file."
fi
poetry run python -m forge &
agbenchmark serve &

View File

@@ -1,10 +0,0 @@
#!/bin/bash
kill $(lsof -t -i :8000)
poetry install
poetry run pip3 uninstall agbenchmark --yes
poetry run pip3 install -e ../../benchmark
poetry run python3 -m forge &
export PYTHONPATH=$PYTHONPATH:../../benchmark/agbenchmark
poetry run python3 -m agbenchmark start "$@"