Re-Organised project and added benchmark code

This commit is contained in:
SwiftyOS
2023-09-04 16:21:09 +02:00
parent bc211827ad
commit e5cef441e2
1979 changed files with 566014 additions and 263 deletions

11
forge/.env.example Normal file
View File

@@ -0,0 +1,11 @@
# Your OpenAI API Key. If GPT-4 is available it will use that, otherwise will use 3.5-turbo
OPENAI_API_KEY=abc
# If you want to enable Helicone proxy and caching
HELICONE_KEY=abc
OPENAI_API_BASE=https://oai.hconeai.com/v1
# Control log level
LOG_LEVEL=INFO
DATABASE_STRING="sqlite:///agent.db"
PORT=8000
AGENT_WORKSPACE="agbenchmark/workspace"

15
forge/.flake8 Normal file
View File

@@ -0,0 +1,15 @@
[flake8]
max-line-length = 88
select = "E303, W293, W292, E305, E231, E302"
exclude =
.tox,
__pycache__,
*.pyc,
.env
venv*/*,
.venv/*,
reports/*,
dist/*,
agent/*,
code,
agbenchmark/challenges/*

172
forge/.gitignore vendored Normal file
View File

@@ -0,0 +1,172 @@
## Original ignores
autogpt/keys.py
autogpt/*.json
**/auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
ai_settings.yaml
last_run_ai_settings.yaml
.vscode
.idea/*
auto-gpt.json
log.txt
log-ingestion.txt
logs
*.log
*.mp3
mem.sqlite3
venvAutoGPT
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
plugins/
plugins_config.yaml
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
site/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.direnv/
.env
.venv
env/
venv*/
ENV/
env.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
llama-*
vicuna-*
# mac
.DS_Store
openai/
# news
CURRENT_BULLETIN.md
agbenchmark
*.sqlite
.agbench
.agbenchmark
.benchmarks
.mypy_cache
.pytest_cache
.vscode
ig_*

View File

@@ -0,0 +1,42 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
args: ['--maxkb=500']
- id: check-byte-order-marker
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
language_version: python3.11
- repo: https://github.com/psf/black
rev: 23.3.0
hooks:
- id: black
language_version: python3.11
# - repo: https://github.com/pre-commit/mirrors-mypy
# rev: 'v1.3.0'
# hooks:
# - id: mypy
- repo: local
hooks:
- id: autoflake
name: autoflake
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt
language: python
types: [ python ]
- id: pytest-check
name: pytest-check
entry: pytest
language: system
pass_filenames: false
always_run: true

40
forge/Dockerfile Normal file
View File

@@ -0,0 +1,40 @@
# Use an official Python runtime as a parent image
FROM python:3.11-slim-buster as base
# Set work directory in the container
WORKDIR /app
# Install system dependencies
RUN apt-get update \
&& apt-get install -y build-essential curl ffmpeg \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Poetry - respects $POETRY_VERSION & $POETRY_HOME
ENV POETRY_VERSION=1.1.8 \
POETRY_HOME="/opt/poetry" \
POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_CREATE=false \
PATH="$POETRY_HOME/bin:$PATH"
RUN pip3 install poetry
COPY pyproject.toml poetry.lock* /app/
# Project initialization:
RUN poetry install --no-interaction --no-ansi
ENV PYTHONPATH="/app:$PYTHONPATH"
FROM base as dependencies
# Copy project
COPY . /app
# Make port 80 available to the world outside this container
EXPOSE 8000
# Run the application when the container launches
CMD ["poetry", "run", "python", "autogpt/__main__.py"]

22
forge/Makefile Normal file
View File

@@ -0,0 +1,22 @@
.PHONY: update-protocol
update-protocol:
@if [ -d "../agent-protocol/sdk/python/agent_protocol" ]; then \
cp -r ../agent-protocol/sdk/python/agent_protocol autogpt; \
rm -Rf autogpt/agent_protocol/utils; \
rm -Rf autogpt/agent_protocol/cli.py; \
echo "Protocol updated successfully!"; \
else \
echo "Error: Source directory ../agent-protocol/sdk/python/agent_protocol does not exist."; \
exit 1; \
fi
change-protocol:
@if [ -d "autogpt/agent_protocol" ]; then \
cp -r autogpt/agent_protocol ../agent-protocol/sdk/python; \
rm ../agent-protocol/sdk/python/agent_protocol/README.md; \
echo "Protocol reversed successfully!"; \
else \
echo "Error: Target directory autogpt/agent_protocol does not exist."; \
exit 1; \
fi

44
forge/README.md Normal file
View File

@@ -0,0 +1,44 @@
# 🚀 **Auto-GPT-Forge**: Build Your Own Auto-GPT Agent! 🧠
## (Release date: very soon)
### 🌌 Dive into the Universe of Auto-GPT Creation! 🌌
Ever dreamt of becoming the genius behind an AI agent? Dive into the *Auto-GPT-Forge*, where **you** become the creator!
---
### 🛠️ **Why Auto-GPT-Forge?**
- 💤 **No More Boilerplate!** Don't let the mundane tasks stop you. Fork and build without the headache of starting from scratch!
- 🧠 **Brain-centric Development!** All the tools you need so you can spend 100% of your time on what matters - crafting the brain of your AI!
- 🛠️ **Tooling ecosystem!** We work with the best in class tools to bring you the best experience possible!
---
### 🎁 **Features & Toolset**:
1. **[autopack](https://github.com/AutoPackAI/autopack)**: Equip your agent with the best tools without the fuss!
2. **[agbenchmark](https://github.com/AutoPackAI/Auto-GPT-Benchmarks)**: Test your agent against basic tasks to see how you perform!
3. **[Helicone](https://www.helicone.ai/)**: Log your LLM interactions effortlessly, so you know what your agent is doing.
4. **[Agent communication protocol](https://github.com/e2b-dev/sdk)** from e2b's SDK: deploy your agent easily.
5. **More building blocks coming soon!**: We constantly work with partners to bring the best in class tools to kickstart agent creation !
---
### 🚀 **Get Started!**
1. **[Fork the Project](https://github.com/Significant-Gravitas/Auto-GPT-Forge)**
2. Clone your repo
3. Add your api keys
4. Let the magic happen! Create your specialized or general AI!
---
### 🔗 **Connect & Contribute!**
**Show Your Support!** Star the repo and let us know you're interested!
💡 **Stay Updated**: This is just the beginning. Keep an eye on our repository for more!

View File

31
forge/autogpt/__main__.py Normal file
View File

@@ -0,0 +1,31 @@
import os
from dotenv import load_dotenv
load_dotenv()
import autogpt.sdk.forge_log
autogpt.sdk.forge_log.setup_logger()
LOG = autogpt.sdk.forge_log.ForgeLogger(__name__)
if __name__ == "__main__":
"""Runs the agent server"""
# modules are imported here so that logging is setup first
import autogpt.agent
import autogpt.sdk.db
from autogpt.benchmark_integration import add_benchmark_routes
from autogpt.sdk.workspace import LocalWorkspace
router = add_benchmark_routes()
database_name = os.getenv("DATABASE_STRING")
workspace = LocalWorkspace(os.getenv("AGENT_WORKSPACE"))
port = os.getenv("PORT")
database = autogpt.sdk.db.AgentDB(database_name, debug_enabled=True)
agent = autogpt.agent.AutoGPTAgent(database=database, workspace=workspace)
agent.start(port=port, router=router)

106
forge/autogpt/agent.py Normal file
View File

@@ -0,0 +1,106 @@
from autogpt.sdk import Agent, AgentDB, Step, StepRequestBody, Workspace
class AutoGPTAgent(Agent):
"""
The goal of the Forge is to take care of the boilerplate code so you can focus on
agent design.
There is a great paper surveying the agent landscape: https://arxiv.org/abs/2308.11432
Which I would highly recommend reading as it will help you understand the possabilities.
Here is a summary of the key components of an agent:
Anatomy of an agent:
- Profile
- Memory
- Planning
- Action
Profile:
Agents typically perform a task by assuming specific roles. For example, a teacher,
a coder, a planner etc. In using the profile in the llm prompt it has been shown to
improve the quality of the output. https://arxiv.org/abs/2305.14688
Additionally baed on the profile selected, the agent could be configured to use a
different llm. The possabilities are endless and the profile can be selected selected
dynamically based on the task at hand.
Memory:
Memory is critical for the agent to acculmulate experiences, self-evolve, and behave
in a more consistent, reasonable, and effective manner. There are many approaches to
memory. However, some thoughts: there is long term and short term or working memory.
You may want different approaches for each. There has also been work exploring the
idea of memory reflection, which is the ability to assess its memories and re-evaluate
them. For example, condensting short term memories into long term memories.
Planning:
When humans face a complex task, they first break it down into simple subtasks and then
solve each subtask one by one. The planning module empowers LLM-based agents with the ability
to think and plan for solving complex tasks, which makes the agent more comprehensive,
powerful, and reliable. The two key methods to consider are: Planning with feedback and planning
without feedback.
Action:
Actions translate the agents decisions into specific outcomes. For example, if the agent
decides to write a file, the action would be to write the file. There are many approaches you
could implement actions.
The Forge has a basic module for each of these areas. However, you are free to implement your own.
This is just a starting point.
"""
def __init__(self, database: AgentDB, workspace: Workspace):
"""
The database is used to store tasks, steps and artifact metadata. The workspace is used to
store artifacts. The workspace is a directory on the file system.
Feel free to create subclasses of the database and workspace to implement your own storage
"""
super().__init__(database, workspace)
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
"""
The agent protocol, which is the core of the Forge, works by creating a task and then
executing steps for that task. This method is called when the agent is asked to execute
a step.
The task that is created contains an input string, for the bechmarks this is the task
the agent has been asked to solve and additional input, which is a dictionary and
could contain anything.
If you want to get the task use:
```
task = await self.db.get_task(task_id)
```
The step request body is essentailly the same as the task request and contains an input
string, for the bechmarks this is the task the agent has been asked to solve and
additional input, which is a dictionary and could contain anything.
You need to implement logic that will take in this step input and output the completed step
as a step object. You can do everything in a single step or you can break it down into
multiple steps. Returning a request to continue in the step output, the user can then decide
if they want the agent to continue or not.
"""
# An example that
self.workspace.write(task_id=task_id, path="output.txt", data=b"Washington D.C")
step = await self.db.create_step(
task_id=task_id, input=step_request, is_last=True
)
artifact = await self.db.create_artifact(
task_id=task_id,
step_id=step.step_id,
file_name="output.txt",
relative_path="",
agent_created=True,
)
step.output = "Washington D.C"
return step

View File

@@ -0,0 +1,27 @@
from agbenchmark.app import get_artifact, get_skill_tree
from fastapi import APIRouter
from fastapi import (
HTTPException as FastAPIHTTPException, # Import HTTPException from FastAPI
)
from fastapi.responses import FileResponse
from autogpt.sdk.routes.agent_protocol import base_router
def add_benchmark_routes():
new_router = APIRouter()
@new_router.get("/skill_tree")
async def get_skill_tree_endpoint() -> dict: # Renamed to avoid a clash with the function import
return get_skill_tree()
@new_router.get("/agent/challenges/{challenge_id}/artifacts/{artifact_id}")
async def get_artifact_endpoint(
challenge_id: str, artifact_id: str
) -> FileResponse: # Added return type annotation
return get_artifact(challenge_id, artifact_id)
# Include the new router in the base router
base_router.include_router(new_router)
return base_router

View File

@@ -0,0 +1,9 @@
{% extends "techniques/expert.j2" %}
{% block expert %}Human Resources{% endblock %}
{% block prompt %}
Generate a profile for an expert who can help with the task '{{ task }}'. Please provide the following details:
Name: Enter the expert's name
Expertise: Specify the area in which the expert specializes
Goals: List 4 goals that the expert aims to achieve in order to help with the task
Assessment: Describe how the expert will assess whether they have successfully completed the task
{% endblock %}

View File

@@ -0,0 +1 @@
Answer as an expert in {% block expert %} {% endblock %}. {% block prompt %}{% endblock %}

View File

@@ -0,0 +1,25 @@
"""
The Forge SDK. This is the core of the Forge. It contains the agent protocol, which is the
core of the Forge.
"""
from .agent import Agent
from .db import AgentDB
from .forge_log import ForgeLogger
from .prompting import PromptEngine
from .schema import (
Artifact,
ArtifactUpload,
Pagination,
Status,
Step,
StepInput,
StepOutput,
StepRequestBody,
Task,
TaskArtifactsListResponse,
TaskInput,
TaskListResponse,
TaskRequestBody,
TaskStepsListResponse,
)
from .workspace import LocalWorkspace, Workspace

191
forge/autogpt/sdk/agent.py Normal file
View File

@@ -0,0 +1,191 @@
import asyncio
import os
from uuid import uuid4
from fastapi import APIRouter, FastAPI, UploadFile
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from hypercorn.asyncio import serve
from hypercorn.config import Config
from .db import AgentDB
from .errors import NotFoundError
from .forge_log import ForgeLogger
from .middlewares import AgentMiddleware
from .routes.agent_protocol import base_router
from .schema import *
from .workspace import Workspace
LOG = ForgeLogger(__name__)
class Agent:
def __init__(self, database: AgentDB, workspace: Workspace):
self.db = database
self.workspace = workspace
def start(self, port: int = 8000, router: APIRouter = base_router):
"""
Start the agent server.
"""
config = Config()
config.bind = [f"localhost:{port}"]
app = FastAPI(
title="Auto-GPT Forge",
description="Modified version of The Agent Protocol.",
version="v0.4",
)
# Add CORS middleware
origins = [
"http://localhost:5000",
"http://127.0.0.1:5000",
# Add any other origins you want to whitelist
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(router)
app.add_middleware(AgentMiddleware, agent=self)
config.loglevel = "ERROR"
config.bind = [f"0.0.0.0:{port}"]
LOG.info(f"Agent server starting on http://{config.bind[0]}")
asyncio.run(serve(app, config))
async def create_task(self, task_request: TaskRequestBody) -> Task:
"""
Create a task for the agent.
"""
try:
task = await self.db.create_task(
input=task_request.input,
additional_input=task_request.additional_input,
)
return task
except Exception as e:
raise
async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse:
"""
List all tasks that the agent has created.
"""
try:
tasks, pagination = await self.db.list_tasks(page, pageSize)
response = TaskListResponse(tasks=tasks, pagination=pagination)
return response
except Exception as e:
raise
async def get_task(self, task_id: str) -> Task:
"""
Get a task by ID.
"""
try:
task = await self.db.get_task(task_id)
except Exception as e:
raise
return task
async def list_steps(
self, task_id: str, page: int = 1, pageSize: int = 10
) -> TaskStepsListResponse:
"""
List the IDs of all steps that the task has created.
"""
try:
steps, pagination = await self.db.list_steps(task_id, page, pageSize)
response = TaskStepsListResponse(steps=steps, pagination=pagination)
return response
except Exception as e:
raise
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
"""
Create a step for the task.
"""
raise NotImplementedError
async def get_step(self, task_id: str, step_id: str) -> Step:
"""
Get a step by ID.
"""
try:
step = await self.db.get_step(task_id, step_id)
return step
except Exception as e:
raise
async def list_artifacts(
self, task_id: str, page: int = 1, pageSize: int = 10
) -> TaskArtifactsListResponse:
"""
List the artifacts that the task has created.
"""
try:
artifacts, pagination = await self.db.list_artifacts(
task_id, page, pageSize
)
return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination)
except Exception as e:
raise
async def create_artifact(
self, task_id: str, file: UploadFile, relative_path: str
) -> Artifact:
"""
Create an artifact for the task.
"""
data = None
file_name = file.filename or str(uuid4())
try:
data = b""
while contents := file.file.read(1024 * 1024):
data += contents
# Check if relative path ends with filename
if relative_path.endswith(file_name):
file_path = relative_path
else:
file_path = os.path.join(relative_path, file_name)
self.workspace.write(task_id, file_path, data)
artifact = await self.db.create_artifact(
task_id=task_id,
file_name=file_name,
relative_path=relative_path,
agent_created=False,
)
except Exception as e:
raise
return artifact
async def get_artifact(self, task_id: str, artifact_id: str) -> Artifact:
"""
Get an artifact by ID.
"""
try:
artifact = await self.db.get_artifact(artifact_id)
file_path = os.path.join(artifact.relative_path, artifact.file_name)
retrieved_artifact = self.workspace.read(task_id=task_id, path=file_path)
path = artifact.file_name
with open(path, "wb") as f:
f.write(retrieved_artifact)
except NotFoundError as e:
raise
except FileNotFoundError as e:
raise
except Exception as e:
raise
return FileResponse(
# Note: mimetype is guessed in the FileResponse constructor
path=path,
filename=artifact.file_name,
)

View File

@@ -0,0 +1,107 @@
import pytest
from .agent import Agent
from .db import AgentDB
from .schema import StepRequestBody, Task, TaskListResponse, TaskRequestBody
from .workspace import LocalWorkspace
@pytest.fixture
def agent():
db = AgentDB("sqlite:///test.db")
workspace = LocalWorkspace("./test_workspace")
return Agent(db, workspace)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_create_task(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task: Task = await agent.create_task(task_request)
assert task.input == "test_input"
@pytest.mark.skip
@pytest.mark.asyncio
async def test_list_tasks(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
tasks = await agent.list_tasks()
assert isinstance(tasks, TaskListResponse)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_get_task(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
retrieved_task = await agent.get_task(task.task_id)
assert retrieved_task.task_id == task.task_id
@pytest.mark.skip
@pytest.mark.asyncio
async def test_create_and_execute_step(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
step_request = StepRequestBody(
input="step_input", additional_input={"input": "additional_test_input"}
)
step = await agent.create_and_execute_step(task.task_id, step_request)
assert step.input == "step_input"
assert step.additional_input == {"input": "additional_test_input"}
@pytest.mark.skip
@pytest.mark.asyncio
async def test_get_step(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
step_request = StepRequestBody(
input="step_input", additional_input={"input": "additional_test_input"}
)
step = await agent.create_and_execute_step(task.task_id, step_request)
retrieved_step = await agent.get_step(task.task_id, step.step_id)
assert retrieved_step.step_id == step.step_id
@pytest.mark.skip
@pytest.mark.asyncio
async def test_list_artifacts(agent):
artifacts = await agent.list_artifacts()
assert isinstance(artifacts, list)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_create_artifact(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
artifact_request = ArtifactRequestBody(file=None, uri="test_uri")
artifact = await agent.create_artifact(task.task_id, artifact_request)
assert artifact.uri == "test_uri"
@pytest.mark.skip
@pytest.mark.asyncio
async def test_get_artifact(agent):
task_request = TaskRequestBody(
input="test_input", additional_input={"input": "additional_test_input"}
)
task = await agent.create_task(task_request)
artifact_request = ArtifactRequestBody(file=None, uri="test_uri")
artifact = await agent.create_artifact(task.task_id, artifact_request)
retrieved_artifact = await agent.get_artifact(task.task_id, artifact.artifact_id)
assert retrieved_artifact.artifact_id == artifact.artifact_id

View File

View File

View File

View File

@@ -0,0 +1,25 @@
"""
PROFILE CONCEPT:
The profile generator is used to intiliase and configure an ai agent.
It came from the obsivation that if an llm is provided with a profile such as:
```
Expert:
```
Then it's performance at a task can impove. Here we use the profile to generate
a system prompt for the agent to use. However, it can be used to configure other
aspects of the agent such as memory, planning, and actions available.
The possibilities are limited just by your imagination.
"""
from autogpt.sdk import PromptEngine
class ProfileGenerator:
def __init__(self, task: str, PromptEngine: PromptEngine):
"""
Initialize the profile generator with the task to be performed.
"""
self.task = task

468
forge/autogpt/sdk/db.py Normal file
View File

@@ -0,0 +1,468 @@
"""
This is an example implementation of the Agent Protocol DB for development Purposes
It uses SQLite as the database and file store backend.
IT IS NOT ADVISED TO USE THIS IN PRODUCTION!
"""
import datetime
import math
import uuid
from typing import Any, Dict, List, Optional, Tuple
from sqlalchemy import (
JSON,
Boolean,
Column,
DateTime,
ForeignKey,
String,
create_engine,
)
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import DeclarativeBase, joinedload, relationship, sessionmaker
from .errors import NotFoundError
from .forge_log import ForgeLogger
from .schema import Artifact, Pagination, Status, Step, StepRequestBody, Task, TaskInput
LOG = ForgeLogger(__name__)
class Base(DeclarativeBase):
pass
class TaskModel(Base):
__tablename__ = "tasks"
task_id = Column(String, primary_key=True, index=True)
input = Column(String)
additional_input = Column(JSON)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
modified_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
artifacts = relationship("ArtifactModel", back_populates="task")
class StepModel(Base):
__tablename__ = "steps"
step_id = Column(String, primary_key=True, index=True)
task_id = Column(String, ForeignKey("tasks.task_id"))
name = Column(String)
input = Column(String)
status = Column(String)
is_last = Column(Boolean, default=False)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
modified_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
additional_input = Column(JSON)
artifacts = relationship("ArtifactModel", back_populates="step")
class ArtifactModel(Base):
__tablename__ = "artifacts"
artifact_id = Column(String, primary_key=True, index=True)
task_id = Column(String, ForeignKey("tasks.task_id"))
step_id = Column(String, ForeignKey("steps.step_id"))
agent_created = Column(Boolean, default=False)
file_name = Column(String)
relative_path = Column(String)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
modified_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
step = relationship("StepModel", back_populates="artifacts")
task = relationship("TaskModel", back_populates="artifacts")
def convert_to_task(task_obj: TaskModel, debug_enabled: bool = False) -> Task:
if debug_enabled:
LOG.debug(f"Converting TaskModel to Task for task_id: {task_obj.task_id}")
task_artifacts = [convert_to_artifact(artifact) for artifact in task_obj.artifacts]
return Task(
task_id=task_obj.task_id,
created_at=task_obj.created_at,
modified_at=task_obj.modified_at,
input=task_obj.input,
additional_input=task_obj.additional_input,
artifacts=task_artifacts,
)
def convert_to_step(step_model: StepModel, debug_enabled: bool = False) -> Step:
if debug_enabled:
LOG.debug(f"Converting StepModel to Step for step_id: {step_model.step_id}")
step_artifacts = [
convert_to_artifact(artifact) for artifact in step_model.artifacts
]
status = Status.completed if step_model.status == "completed" else Status.created
return Step(
task_id=step_model.task_id,
step_id=step_model.step_id,
created_at=step_model.created_at,
modified_at=step_model.modified_at,
name=step_model.name,
input=step_model.input,
status=status,
artifacts=step_artifacts,
is_last=step_model.is_last == 1,
additional_input=step_model.additional_input,
)
def convert_to_artifact(artifact_model: ArtifactModel) -> Artifact:
return Artifact(
artifact_id=artifact_model.artifact_id,
created_at=artifact_model.created_at,
modified_at=artifact_model.modified_at,
agent_created=artifact_model.agent_created,
relative_path=artifact_model.relative_path,
file_name=artifact_model.file_name,
)
# sqlite:///{database_name}
class AgentDB:
def __init__(self, database_string, debug_enabled: bool = False) -> None:
super().__init__()
self.debug_enabled = debug_enabled
if self.debug_enabled:
LOG.debug(f"Initializing AgentDB with database_string: {database_string}")
self.engine = create_engine(database_string)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
async def create_task(
self, input: Optional[str], additional_input: Optional[TaskInput] = {}
) -> Task:
if self.debug_enabled:
LOG.debug("Creating new task")
try:
with self.Session() as session:
new_task = TaskModel(
task_id=str(uuid.uuid4()),
input=input,
additional_input=additional_input.json()
if additional_input
else {},
)
session.add(new_task)
session.commit()
session.refresh(new_task)
if self.debug_enabled:
LOG.debug(f"Created new task with task_id: {new_task.task_id}")
return convert_to_task(new_task, self.debug_enabled)
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while creating task: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while creating task: {e}")
raise
async def create_step(
self,
task_id: str,
input: StepRequestBody,
is_last: bool = False,
additional_input: Optional[Dict[str, Any]] = {},
) -> Step:
if self.debug_enabled:
LOG.debug(f"Creating new step for task_id: {task_id}")
try:
with self.Session() as session:
new_step = StepModel(
task_id=task_id,
step_id=str(uuid.uuid4()),
name=input.input,
input=input.input,
status="created",
is_last=is_last,
additional_input=additional_input,
)
session.add(new_step)
session.commit()
session.refresh(new_step)
if self.debug_enabled:
LOG.debug(f"Created new step with step_id: {new_step.step_id}")
return convert_to_step(new_step, self.debug_enabled)
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while creating step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while creating step: {e}")
raise
async def create_artifact(
self,
task_id: str,
file_name: str,
relative_path: str,
agent_created: bool = False,
step_id: str | None = None,
) -> Artifact:
if self.debug_enabled:
LOG.debug(f"Creating new artifact for task_id: {task_id}")
try:
with self.Session() as session:
if (
existing_artifact := session.query(ArtifactModel)
.filter_by(
task_id=task_id,
file_name=file_name,
relative_path=relative_path,
)
.first()
):
session.close()
if self.debug_enabled:
LOG.debug(
f"Artifact already exists with relative_path: {relative_path}"
)
return convert_to_artifact(existing_artifact)
new_artifact = ArtifactModel(
artifact_id=str(uuid.uuid4()),
task_id=task_id,
step_id=step_id,
agent_created=agent_created,
file_name=file_name,
relative_path=relative_path,
)
session.add(new_artifact)
session.commit()
session.refresh(new_artifact)
if self.debug_enabled:
LOG.debug(
f"Created new artifact with artifact_id: {new_artifact.artifact_id}"
)
return convert_to_artifact(new_artifact)
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while creating step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while creating step: {e}")
raise
async def get_task(self, task_id: int) -> Task:
"""Get a task by its id"""
if self.debug_enabled:
LOG.debug(f"Getting task with task_id: {task_id}")
try:
with self.Session() as session:
if task_obj := (
session.query(TaskModel)
.options(joinedload(TaskModel.artifacts))
.filter_by(task_id=task_id)
.first()
):
return convert_to_task(task_obj, self.debug_enabled)
else:
LOG.error(f"Task not found with task_id: {task_id}")
raise NotFoundError("Task not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting task: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting task: {e}")
raise
async def get_step(self, task_id: int, step_id: int) -> Step:
if self.debug_enabled:
LOG.debug(f"Getting step with task_id: {task_id} and step_id: {step_id}")
try:
with self.Session() as session:
if step := (
session.query(StepModel)
.options(joinedload(StepModel.artifacts))
.filter(StepModel.step_id == step_id)
.first()
):
return convert_to_step(step, self.debug_enabled)
else:
LOG.error(
f"Step not found with task_id: {task_id} and step_id: {step_id}"
)
raise NotFoundError("Step not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting step: {e}")
raise
async def update_step(
self,
task_id: str,
step_id: str,
status: str,
additional_input: Optional[Dict[str, Any]] = {},
) -> Step:
if self.debug_enabled:
LOG.debug(f"Updating step with task_id: {task_id} and step_id: {step_id}")
try:
with self.Session() as session:
if (
step := session.query(StepModel)
.filter_by(task_id=task_id, step_id=step_id)
.first()
):
step.status = status
step.additional_input = additional_input
session.commit()
return await self.get_step(task_id, step_id)
else:
LOG.error(
f"Step not found for update with task_id: {task_id} and step_id: {step_id}"
)
raise NotFoundError("Step not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting step: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting step: {e}")
raise
async def get_artifact(self, artifact_id: str) -> Artifact:
if self.debug_enabled:
LOG.debug(f"Getting artifact with and artifact_id: {artifact_id}")
try:
with self.Session() as session:
if (
artifact_model := session.query(ArtifactModel)
.filter_by(artifact_id=artifact_id)
.first()
):
return convert_to_artifact(artifact_model)
else:
LOG.error(f"Artifact not found with and artifact_id: {artifact_id}")
raise NotFoundError("Artifact not found")
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while getting artifact: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while getting artifact: {e}")
raise
async def list_tasks(
self, page: int = 1, per_page: int = 10
) -> Tuple[List[Task], Pagination]:
if self.debug_enabled:
LOG.debug("Listing tasks")
try:
with self.Session() as session:
tasks = (
session.query(TaskModel)
.offset((page - 1) * per_page)
.limit(per_page)
.all()
)
total = session.query(TaskModel).count()
pages = math.ceil(total / per_page)
pagination = Pagination(
total_items=total,
total_pages=pages,
current_page=page,
page_size=per_page,
)
return [
convert_to_task(task, self.debug_enabled) for task in tasks
], pagination
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while listing tasks: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while listing tasks: {e}")
raise
async def list_steps(
self, task_id: str, page: int = 1, per_page: int = 10
) -> Tuple[List[Step], Pagination]:
if self.debug_enabled:
LOG.debug(f"Listing steps for task_id: {task_id}")
try:
with self.Session() as session:
steps = (
session.query(StepModel)
.filter_by(task_id=task_id)
.offset((page - 1) * per_page)
.limit(per_page)
.all()
)
total = session.query(StepModel).filter_by(task_id=task_id).count()
pages = math.ceil(total / per_page)
pagination = Pagination(
total_items=total,
total_pages=pages,
current_page=page,
page_size=per_page,
)
return [
convert_to_step(step, self.debug_enabled) for step in steps
], pagination
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while listing steps: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while listing steps: {e}")
raise
async def list_artifacts(
self, task_id: str, page: int = 1, per_page: int = 10
) -> Tuple[List[Artifact], Pagination]:
if self.debug_enabled:
LOG.debug(f"Listing artifacts for task_id: {task_id}")
try:
with self.Session() as session:
artifacts = (
session.query(ArtifactModel)
.filter_by(task_id=task_id)
.offset((page - 1) * per_page)
.limit(per_page)
.all()
)
total = session.query(ArtifactModel).filter_by(task_id=task_id).count()
pages = math.ceil(total / per_page)
pagination = Pagination(
total_items=total,
total_pages=pages,
current_page=page,
page_size=per_page,
)
return [
convert_to_artifact(artifact) for artifact in artifacts
], pagination
except SQLAlchemyError as e:
LOG.error(f"SQLAlchemy error while listing artifacts: {e}")
raise
except NotFoundError as e:
raise
except Exception as e:
LOG.error(f"Unexpected error while listing artifacts: {e}")
raise

View File

@@ -0,0 +1,325 @@
import os
import sqlite3
from datetime import datetime
import pytest
from autogpt.sdk.db import (
AgentDB,
ArtifactModel,
StepModel,
TaskModel,
convert_to_artifact,
convert_to_step,
convert_to_task,
)
from autogpt.sdk.errors import NotFoundError as DataNotFoundError
from autogpt.sdk.schema import *
@pytest.mark.asyncio
def test_table_creation():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
conn = sqlite3.connect("test_db.sqlite3")
cursor = conn.cursor()
# Test for tasks table existence
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='tasks'")
assert cursor.fetchone() is not None
# Test for steps table existence
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='steps'")
assert cursor.fetchone() is not None
# Test for artifacts table existence
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='artifacts'"
)
assert cursor.fetchone() is not None
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_task_schema():
now = datetime.now()
task = Task(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
input="Write the words you receive to the file 'output.txt'.",
created_at=now,
modified_at=now,
artifacts=[
Artifact(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
agent_created=True,
file_name="main.py",
relative_path="python/code/",
created_at=now,
modified_at=now,
)
],
)
assert task.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert task.input == "Write the words you receive to the file 'output.txt'."
assert len(task.artifacts) == 1
assert task.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
@pytest.mark.asyncio
async def test_step_schema():
now = datetime.now()
step = Step(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
step_id="6bb1801a-fd80-45e8-899a-4dd723cc602e",
created_at=now,
modified_at=now,
name="Write to file",
input="Write the words you receive to the file 'output.txt'.",
status=Status.created,
output="I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
artifacts=[
Artifact(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
file_name="main.py",
relative_path="python/code/",
created_at=now,
modified_at=now,
agent_created=True,
)
],
is_last=False,
)
assert step.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert step.step_id == "6bb1801a-fd80-45e8-899a-4dd723cc602e"
assert step.name == "Write to file"
assert step.status == Status.created
assert (
step.output
== "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>"
)
assert len(step.artifacts) == 1
assert step.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
assert step.is_last == False
@pytest.mark.asyncio
async def test_convert_to_task():
now = datetime.now()
task_model = TaskModel(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
created_at=now,
modified_at=now,
input="Write the words you receive to the file 'output.txt'.",
artifacts=[
ArtifactModel(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
created_at=now,
modified_at=now,
relative_path="file:///path/to/main.py",
agent_created=True,
file_name="main.py",
)
],
)
task = convert_to_task(task_model)
assert task.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert task.input == "Write the words you receive to the file 'output.txt'."
assert len(task.artifacts) == 1
assert task.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
@pytest.mark.asyncio
async def test_convert_to_step():
now = datetime.now()
step_model = StepModel(
task_id="50da533e-3904-4401-8a07-c49adf88b5eb",
step_id="6bb1801a-fd80-45e8-899a-4dd723cc602e",
created_at=now,
modified_at=now,
name="Write to file",
status="created",
input="Write the words you receive to the file 'output.txt'.",
artifacts=[
ArtifactModel(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
created_at=now,
modified_at=now,
relative_path="file:///path/to/main.py",
agent_created=True,
file_name="main.py",
)
],
is_last=False,
)
step = convert_to_step(step_model)
assert step.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
assert step.step_id == "6bb1801a-fd80-45e8-899a-4dd723cc602e"
assert step.name == "Write to file"
assert step.status == Status.created
assert len(step.artifacts) == 1
assert step.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
assert step.is_last == False
@pytest.mark.asyncio
async def test_convert_to_artifact():
now = datetime.now()
artifact_model = ArtifactModel(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
created_at=now,
modified_at=now,
relative_path="file:///path/to/main.py",
agent_created=True,
file_name="main.py",
)
artifact = convert_to_artifact(artifact_model)
assert artifact.artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
assert artifact.relative_path == "file:///path/to/main.py"
assert artifact.agent_created == True
@pytest.mark.asyncio
async def test_create_task():
# Having issues with pytest fixture so added setup and teardown in each test as a rapid workaround
# TODO: Fix this!
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
task = await agent_db.create_task("task_input")
assert task.input == "task_input"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_create_and_get_task():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
task = await agent_db.create_task("test_input")
fetched_task = await agent_db.get_task(task.task_id)
assert fetched_task.input == "test_input"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_get_task_not_found():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
with pytest.raises(DataNotFoundError):
await agent_db.get_task(9999)
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_create_and_get_step():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
task = await agent_db.create_task("task_input")
step_input = StepInput(type="python/code")
request = StepRequestBody(input="test_input debug", additional_input=step_input)
step = await agent_db.create_step(task.task_id, request)
step = await agent_db.get_step(task.task_id, step.step_id)
assert step.input == "test_input debug"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_updating_step():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
created_task = await agent_db.create_task("task_input")
step_input = StepInput(type="python/code")
request = StepRequestBody(input="test_input debug", additional_input=step_input)
created_step = await agent_db.create_step(created_task.task_id, request)
await agent_db.update_step(created_task.task_id, created_step.step_id, "completed")
step = await agent_db.get_step(created_task.task_id, created_step.step_id)
assert step.status.value == "completed"
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_get_step_not_found():
db_name = "sqlite:///test_db.sqlite3"
agent_db = AgentDB(db_name)
with pytest.raises(DataNotFoundError):
await agent_db.get_step(9999, 9999)
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_get_artifact():
db_name = "sqlite:///test_db.sqlite3"
db = AgentDB(db_name)
# Given: A task and its corresponding artifact
task = await db.create_task("test_input debug")
step_input = StepInput(type="python/code")
requst = StepRequestBody(input="test_input debug", additional_input=step_input)
step = await db.create_step(task.task_id, requst)
# Create an artifact
artifact = await db.create_artifact(
task_id=task.task_id,
file_name="test_get_artifact_sample_file.txt",
relative_path="file:///path/to/test_get_artifact_sample_file.txt",
agent_created=True,
step_id=step.step_id,
)
# When: The artifact is fetched by its ID
fetched_artifact = await db.get_artifact(artifact.artifact_id)
# Then: The fetched artifact matches the original
assert fetched_artifact.artifact_id == artifact.artifact_id
assert (
fetched_artifact.relative_path
== "file:///path/to/test_get_artifact_sample_file.txt"
)
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_list_tasks():
db_name = "sqlite:///test_db.sqlite3"
db = AgentDB(db_name)
# Given: Multiple tasks in the database
task1 = await db.create_task("test_input_1")
task2 = await db.create_task("test_input_2")
# When: All tasks are fetched
fetched_tasks, pagination = await db.list_tasks()
# Then: The fetched tasks list includes the created tasks
task_ids = [task.task_id for task in fetched_tasks]
assert task1.task_id in task_ids
assert task2.task_id in task_ids
os.remove(db_name.split("///")[1])
@pytest.mark.asyncio
async def test_list_steps():
db_name = "sqlite:///test_db.sqlite3"
db = AgentDB(db_name)
step_input = StepInput(type="python/code")
requst = StepRequestBody(input="test_input debug", additional_input=step_input)
# Given: A task and multiple steps for that task
task = await db.create_task("test_input")
step1 = await db.create_step(task.task_id, requst)
requst = StepRequestBody(input="step two", additional_input=step_input)
step2 = await db.create_step(task.task_id, requst)
# When: All steps for the task are fetched
fetched_steps, pagination = await db.list_steps(task.task_id)
# Then: The fetched steps list includes the created steps
step_ids = [step.step_id for step in fetched_steps]
assert step1.step_id in step_ids
assert step2.step_id in step_ids
os.remove(db_name.split("///")[1])

View File

@@ -0,0 +1,2 @@
class NotFoundError(Exception):
pass

View File

@@ -0,0 +1,203 @@
import json
import logging
import logging.config
import logging.handlers
import os
import queue
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class ForgeLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": ForgeLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.DEBUG,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)

0
forge/autogpt/sdk/llm.py Normal file
View File

View File

@@ -0,0 +1,34 @@
from fastapi import FastAPI
class AgentMiddleware:
"""
Middleware that injects the agent instance into the request scope.
"""
def __init__(self, app: FastAPI, agent: "Agent"):
"""
Args:
app: The FastAPI app - automatically injected by FastAPI.
agent: The agent instance to inject into the request scope.
Examples:
>>> from fastapi import FastAPI, Request
>>> from agent_protocol.agent import Agent
>>> from agent_protocol.middlewares import AgentMiddleware
>>> app = FastAPI()
>>> @app.get("/")
>>> async def root(request: Request):
>>> agent = request["agent"]
>>> task = agent.db.create_task("Do something.")
>>> return {"task_id": a.task_id}
>>> agent = Agent()
>>> app.add_middleware(AgentMiddleware, agent=agent)
"""
self.app = app
self.agent = agent
async def __call__(self, scope, receive, send):
scope["agent"] = self.agent
await self.app(scope, receive, send)

View File

@@ -0,0 +1,114 @@
"""
Relative to this file I will have a prompt directory its located ../prompts
In this directory there will be a techniques directory and a directory for each model - gpt-3.5-turbo gpt-4, llama-2-70B, code-llama-7B etc
Each directory will have jinga2 templates for the prompts.
prompts in the model directories can use the techniques in the techniques directory.
Write the code I'd need to load and populate the templates.
I want the following functions:
class PromptEngine:
def __init__(self, model):
def load_prompt(model, prompt_name, prompt_ags) -> str:
"""
import glob
import os
from difflib import get_close_matches
from typing import List
from jinja2 import Environment, FileSystemLoader
from .forge_log import ForgeLogger
LOG = ForgeLogger(__name__)
class PromptEngine:
"""
Class to handle loading and populating Jinja2 templates for prompts.
"""
def __init__(self, model: str, debug_enabled: bool = False):
"""
Initialize the PromptEngine with the specified model.
Args:
model (str): The model to use for loading prompts.
debug_enabled (bool): Enable or disable debug logging.
"""
self.model = model
self.debug_enabled = debug_enabled
if self.debug_enabled:
LOG.debug(f"Initializing PromptEngine for model: {model}")
try:
# Get the list of all model directories
models_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../prompts")
)
model_names = [
os.path.basename(os.path.normpath(d))
for d in glob.glob(os.path.join(models_dir, "*/"))
if os.path.isdir(d) and "techniques" not in d
]
self.model = self.get_closest_match(self.model, model_names)
if self.debug_enabled:
LOG.debug(f"Using the closest match model for prompts: {self.model}")
self.env = Environment(loader=FileSystemLoader(models_dir))
except Exception as e:
LOG.error(f"Error initializing Environment: {e}")
raise
@staticmethod
def get_closest_match(target: str, model_dirs: List[str]) -> str:
"""
Find the closest match to the target in the list of model directories.
Args:
target (str): The target model.
model_dirs (list): The list of available model directories.
Returns:
str: The closest match to the target.
"""
try:
matches = get_close_matches(target, model_dirs, n=1, cutoff=0.6)
LOG.warning(matches)
for m in matches:
LOG.info(m)
return matches[0]
except Exception as e:
LOG.error(f"Error finding closest match: {e}")
raise
def load_prompt(self, template: str, **kwargs) -> str:
"""
Load and populate the specified template.
Args:
template (str): The name of the template to load.
**kwargs: The arguments to populate the template with.
Returns:
str: The populated template.
"""
try:
template = os.path.join(self.model, template)
if self.debug_enabled:
LOG.debug(f"Loading template: {template}")
template = self.env.get_template(f"{template}.j2")
if self.debug_enabled:
LOG.debug(f"Rendering template: {template} with args: {kwargs}")
return template.render(**kwargs)
except Exception as e:
LOG.error(f"Error loading or rendering template: {e}")
raise

View File

View File

@@ -0,0 +1,611 @@
"""
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
import json
from typing import Optional
from fastapi import APIRouter, Query, Request, Response, UploadFile
from fastapi.responses import FileResponse
from autogpt.sdk.errors import *
from autogpt.sdk.forge_log import ForgeLogger
from autogpt.sdk.schema import *
base_router = APIRouter()
LOG = ForgeLogger(__name__)
@base_router.get("/", tags=["root"])
async def root():
"""
Root endpoint that returns a welcome message.
"""
return Response(content="Welcome to the Auto-GPT Forge")
@base_router.get("/heartbeat", tags=["server"])
async def check_server_status():
"""
Check if the server is running.
"""
return Response(content="Server is running.", status_code=200)
@base_router.get("/", tags=["root"])
async def root():
"""
Root endpoint that returns a welcome message.
"""
return Response(content="Welcome to the Auto-GPT Forge")
@base_router.get("/heartbeat", tags=["server"])
async def check_server_status():
"""
Check if the server is running.
"""
return Response(content="Server is running.", status_code=200)
@base_router.post("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
}
"""
agent = request["agent"]
try:
task_request = await agent.create_task(task_request)
return Response(
content=task_request.json(),
status_code=200,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to create a task: {task_request}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get("/agent/tasks", tags=["agent"], response_model=TaskListResponse)
async def list_agent_tasks(
request: Request,
page: Optional[int] = Query(1, ge=1),
page_size: Optional[int] = Query(10, ge=1),
) -> TaskListResponse:
"""
Retrieves a paginated list of all tasks.
Args:
request (Request): FastAPI request object.
page (int, optional): The page number for pagination. Defaults to 1.
page_size (int, optional): The number of tasks per page for pagination. Defaults to 10.
Returns:
TaskListResponse: A response object containing a list of tasks and pagination details.
Example:
Request:
GET /agent/tasks?page=1&pageSize=10
Response (TaskListResponse defined in schema.py):
{
"items": [
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [],
"steps": []
},
...
],
"pagination": {
"total": 100,
"pages": 10,
"current": 1,
"pageSize": 10
}
}
"""
agent = request["agent"]
try:
tasks = await agent.list_tasks(page, page_size)
return Response(
content=tasks.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception("Error whilst trying to list tasks")
return Response(
content=json.dumps({"error": "Tasks not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception("Error whilst trying to list tasks")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str) -> Task:
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
try:
task = await agent.get_task(task_id)
return Response(
content=task.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception(f"Error whilst trying to get task: {task_id}")
return Response(
content=json.dumps({"error": "Task not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to get task: {task_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=TaskStepsListResponse
)
async def list_agent_task_steps(
request: Request,
task_id: str,
page: Optional[int] = Query(1, ge=1),
page_size: Optional[int] = Query(10, ge=1, alias="pageSize"),
) -> TaskStepsListResponse:
"""
Retrieves a paginated list of steps associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
page (int, optional): The page number for pagination. Defaults to 1.
page_size (int, optional): The number of steps per page for pagination. Defaults to 10.
Returns:
TaskStepsListResponse: A response object containing a list of steps and pagination details.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps?page=1&pageSize=10
Response (TaskStepsListResponse defined in schema.py):
{
"items": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
},
...
],
"pagination": {
"total": 100,
"pages": 10,
"current": 1,
"pageSize": 10
}
}
"""
agent = request["agent"]
try:
steps = await agent.list_steps(task_id, page, page_size)
return Response(
content=steps.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception("Error whilst trying to list steps")
return Response(
content=json.dumps({"error": "Steps not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception("Error whilst trying to list steps")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.post("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: Optional[StepRequestBody] = None
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
try:
# An empty step request represents a yes to continue command
if not step:
step = StepRequestBody(input="y")
step = await agent.execute_step(task_id, step)
return Response(
content=step.json(),
status_code=200,
media_type="application/json",
)
except NotFoundError:
LOG.exception(f"Error whilst trying to execute a task step: {task_id}")
return Response(
content=json.dumps({"error": f"Task not found {task_id}"}),
status_code=404,
media_type="application/json",
)
except Exception as e:
LOG.exception(f"Error whilst trying to execute a task step: {task_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
try:
step = await agent.get_step(task_id, step_id)
return Response(content=step.json(), status_code=200)
except NotFoundError:
LOG.exception(f"Error whilst trying to get step: {step_id}")
return Response(
content=json.dumps({"error": "Step not found"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to get step: {step_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/artifacts",
tags=["agent"],
response_model=TaskArtifactsListResponse,
)
async def list_agent_task_artifacts(
request: Request,
task_id: str,
page: Optional[int] = Query(1, ge=1),
page_size: Optional[int] = Query(10, ge=1, alias="pageSize"),
) -> TaskArtifactsListResponse:
"""
Retrieves a paginated list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
page (int, optional): The page number for pagination. Defaults to 1.
page_size (int, optional): The number of items per page for pagination. Defaults to 10.
Returns:
TaskArtifactsListResponse: A response object containing a list of artifacts and pagination details.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts?page=1&pageSize=10
Response (TaskArtifactsListResponse defined in schema.py):
{
"items": [
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
],
"pagination": {
"total": 100,
"pages": 10,
"current": 1,
"pageSize": 10
}
}
"""
agent = request["agent"]
try:
artifacts: TaskArtifactsListResponse = await agent.list_artifacts(
task_id, page, page_size
)
LOG.info(f"Artifacts: {artifacts.json()}")
return artifacts
except NotFoundError:
LOG.exception("Error whilst trying to list artifacts")
return Response(
content=json.dumps({"error": "Artifacts not found for task_id"}),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception("Error whilst trying to list artifacts")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.post(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request, task_id: str, file: UploadFile, relative_path: Optional[str] = ""
) -> Artifact:
"""
This endpoint is used to upload an artifact associated with a specific task. The artifact is provided as a file.
Args:
request (Request): The FastAPI request object.
task_id (str): The unique identifier of the task for which the artifact is being uploaded.
file (UploadFile): The file being uploaded as an artifact.
relative_path (str): The relative path for the file. This is a query parameter.
Returns:
Artifact: An object containing metadata of the uploaded artifact, including its unique identifier.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts?relative_path=my_folder/my_other_folder
File: <uploaded_file>
Response:
{
"artifact_id": "b225e278-8b4c-4f99-a696-8facf19f0e56",
"created_at": "2023-01-01T00:00:00Z",
"modified_at": "2023-01-01T00:00:00Z",
"agent_created": false,
"relative_path": "/my_folder/my_other_folder/",
"file_name": "main.py"
}
"""
agent = request["agent"]
if file is None:
return Response(
content=json.dumps({"error": "File must be specified"}),
status_code=404,
media_type="application/json",
)
try:
artifact = await agent.create_artifact(task_id, file, relative_path)
return Response(
content=artifact.json(),
status_code=200,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to upload artifact: {task_id}")
return Response(
content=json.dumps({"error": "Internal server error"}),
status_code=500,
media_type="application/json",
)
@base_router.get(
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
try:
return await agent.get_artifact(task_id, artifact_id)
except NotFoundError:
LOG.exception(f"Error whilst trying to download artifact: {task_id}")
return Response(
content=json.dumps(
{
"error": f"Artifact not found - task_id: {task_id}, artifact_id: {artifact_id}"
}
),
status_code=404,
media_type="application/json",
)
except Exception:
LOG.exception(f"Error whilst trying to download artifact: {task_id}")
return Response(
content=json.dumps(
{
"error": f"Internal server error - task_id: {task_id}, artifact_id: {artifact_id}"
}
),
status_code=500,
media_type="application/json",
)

189
forge/autogpt/sdk/schema.py Normal file
View File

@@ -0,0 +1,189 @@
# generated by fastapi-codegen:
# filename: ../../postman/schemas/openapi.yaml
# timestamp: 2023-08-25T10:36:11+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, Field
class ArtifactUpload(BaseModel):
file: str = Field(..., description="File to upload.", format="binary")
relative_path: str = Field(
...,
description="Relative path of the artifact in the agent's workspace.",
example="python/code",
)
class Pagination(BaseModel):
total_items: int = Field(..., description="Total number of items.", example=42)
total_pages: int = Field(..., description="Total number of pages.", example=97)
current_page: int = Field(..., description="Current_page page number.", example=1)
page_size: int = Field(..., description="Number of items per page.", example=25)
class TaskInput(BaseModel):
pass
class Artifact(BaseModel):
created_at: datetime = Field(
...,
description="The creation datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
modified_at: datetime = Field(
...,
description="The modification datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
artifact_id: str = Field(
...,
description="ID of the artifact.",
example="b225e278-8b4c-4f99-a696-8facf19f0e56",
)
agent_created: bool = Field(
...,
description="Whether the artifact has been created by the agent.",
example=False,
)
relative_path: str = Field(
...,
description="Relative path of the artifact in the agents workspace.",
example="/my_folder/my_other_folder/",
)
file_name: str = Field(
...,
description="Filename of the artifact.",
example="main.py",
)
class StepInput(BaseModel):
pass
class StepOutput(BaseModel):
pass
class TaskRequestBody(BaseModel):
input: str = Field(
...,
min_length=1,
description="Input prompt for the task.",
example="Write the words you receive to the file 'output.txt'.",
)
additional_input: Optional[TaskInput] = {}
class Task(TaskRequestBody):
created_at: datetime = Field(
...,
description="The creation datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
modified_at: datetime = Field(
...,
description="The modification datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
task_id: str = Field(
...,
description="The ID of the task.",
example="50da533e-3904-4401-8a07-c49adf88b5eb",
)
artifacts: Optional[List[Artifact]] = Field(
[],
description="A list of artifacts that the task has produced.",
example=[
"7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"ab7b4091-2560-4692-a4fe-d831ea3ca7d6",
],
)
class StepRequestBody(BaseModel):
name: Optional[str] = Field(
None, description="The name of the task step.", example="Write to file"
)
input: str = Field(
...,
min_length=1,
description="Input prompt for the step.",
example="Washington",
)
additional_input: Optional[StepInput] = {}
class Status(Enum):
created = "created"
running = "running"
completed = "completed"
class Step(StepRequestBody):
created_at: datetime = Field(
...,
description="The creation datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
modified_at: datetime = Field(
...,
description="The modification datetime of the task.",
example="2023-01-01T00:00:00Z",
json_encoders={datetime: lambda v: v.isoformat()},
)
task_id: str = Field(
...,
description="The ID of the task this step belongs to.",
example="50da533e-3904-4401-8a07-c49adf88b5eb",
)
step_id: str = Field(
...,
description="The ID of the task step.",
example="6bb1801a-fd80-45e8-899a-4dd723cc602e",
)
name: Optional[str] = Field(
None, description="The name of the task step.", example="Write to file"
)
status: Status = Field(
..., description="The status of the task step.", example="created"
)
output: Optional[str] = Field(
None,
description="Output of the task step.",
example="I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')",
)
additional_output: Optional[StepOutput] = {}
artifacts: Optional[List[Artifact]] = Field(
[], description="A list of artifacts that the step has produced."
)
is_last: bool = Field(
..., description="Whether this is the last step in the task.", example=True
)
class TaskListResponse(BaseModel):
tasks: Optional[List[Task]] = None
pagination: Optional[Pagination] = None
class TaskStepsListResponse(BaseModel):
steps: Optional[List[Step]] = None
pagination: Optional[Pagination] = None
class TaskArtifactsListResponse(BaseModel):
artifacts: Optional[List[Artifact]] = None
pagination: Optional[Pagination] = None

View File

@@ -0,0 +1,76 @@
import abc
import os
import typing
from pathlib import Path
class Workspace(abc.ABC):
@abc.abstractclassmethod
def __init__(self, base_path: str) -> None:
self.base_path = base_path
@abc.abstractclassmethod
def read(self, task_id: str, path: str) -> bytes:
pass
@abc.abstractclassmethod
def write(self, task_id: str, path: str, data: bytes) -> None:
pass
@abc.abstractclassmethod
def delete(
self, task_id: str, path: str, directory: bool = False, recursive: bool = False
) -> None:
pass
@abc.abstractclassmethod
def exists(self, task_id: str, path: str) -> bool:
pass
@abc.abstractclassmethod
def list(self, task_id: str, path: str) -> typing.List[str]:
pass
class LocalWorkspace(Workspace):
def __init__(self, base_path: str):
self.base_path = Path(base_path).resolve()
def _resolve_path(self, task_id: str, path: str) -> Path:
abs_path = (self.base_path / task_id / path).resolve()
if not str(abs_path).startswith(str(self.base_path)):
print("Error")
raise ValueError("Directory traversal is not allowed!")
abs_path.parent.mkdir(parents=True, exist_ok=True)
return abs_path
def read(self, task_id: str, path: str) -> bytes:
with open(self._resolve_path(task_id, path), "rb") as f:
return f.read()
def write(self, task_id: str, path: str, data: bytes) -> None:
file_path = self._resolve_path(task_id, path)
with open(file_path, "wb") as f:
f.write(data)
def delete(
self, task_id: str, path: str, directory: bool = False, recursive: bool = False
) -> None:
path = self.base_path / task_id / path
resolved_path = self._resolve_path(task_id, path)
if directory:
if recursive:
os.rmdir(resolved_path)
else:
os.removedirs(resolved_path)
else:
os.remove(resolved_path)
def exists(self, task_id: str, path: str) -> bool:
path = self.base_path / task_id / path
return self._resolve_path(task_id, path).exists()
def list(self, task_id: str, path: str) -> typing.List[str]:
path = self.base_path / task_id / path
base = self._resolve_path(task_id, path)
return [str(p.relative_to(self.base_path / task_id)) for p in base.iterdir()]

View File

@@ -0,0 +1,47 @@
import os
import pytest
# Assuming the classes are defined in a file named workspace.py
from .workspace import LocalWorkspace
# Constants
TEST_BASE_PATH = "/tmp/test_workspace"
TEST_FILE_CONTENT = b"Hello World"
TEST_TASK_ID = "1234"
# Setup and Teardown for LocalWorkspace
@pytest.fixture
def setup_local_workspace():
os.makedirs(TEST_BASE_PATH, exist_ok=True)
yield
os.system(f"rm -rf {TEST_BASE_PATH}") # Cleanup after tests
def test_local_read_write_delete_exists(setup_local_workspace):
workspace = LocalWorkspace(TEST_BASE_PATH)
# Write
workspace.write(TEST_TASK_ID, "test_file.txt", TEST_FILE_CONTENT)
# Exists
assert workspace.exists(TEST_TASK_ID, "test_file.txt")
# Read
assert workspace.read(TEST_TASK_ID, "test_file.txt") == TEST_FILE_CONTENT
# Delete
workspace.delete(TEST_TASK_ID, "test_file.txt")
assert not workspace.exists(TEST_TASK_ID, "test_file.txt")
def test_local_list(setup_local_workspace):
workspace = LocalWorkspace(TEST_BASE_PATH)
workspace.write(TEST_TASK_ID, "test1.txt", TEST_FILE_CONTENT)
workspace.write(TEST_TASK_ID, "test2.txt", TEST_FILE_CONTENT)
files = workspace.list(TEST_TASK_ID, ".")
assert set(files) == {"test1.txt", "test2.txt"}

71
forge/helicone_data.py Normal file
View File

@@ -0,0 +1,71 @@
from gql.transport.aiohttp import AIOHTTPTransport
from gql import gql, Client
import os
helicone_api_key = os.getenv('HELICONE_API_KEY')
url = "https://www.helicone.ai/api/graphql"
# Replace <KEY> with your personal access key
transport = AIOHTTPTransport(url=url, headers={
"authorization": f"Bearer {helicone_api_key}"
})
client = Client(transport=transport, fetch_schema_from_transport=True)
MAX_LOOPS = 10
SIZE = 100
import pandas as pd
data = []
for i in range(MAX_LOOPS):
query = gql(
"""
query ExampleQuery($limit: Int, $offset: Int){
heliconeRequest(
limit: $limit
offset: $offset
) {
prompt
properties{
name
value
}
requestBody
response
createdAt
}
}
"""
)
result = client.execute(query,
variable_values={
"limit": SIZE,
"offset": i * SIZE
}
)
for item in result["heliconeRequest"]:
properties = {prop['name']: prop['value'] for prop in item['properties']}
data.append({
'createdAt': item['createdAt'],
'agent': properties.get('agent'),
'job_id': properties.get('job_id'),
'challenge': properties.get('challenge'),
'benchmark_start_time': properties.get('benchmark_start_time'),
'prompt': item['prompt'],
'model': item['requestBody'].get('model'),
'request': item['requestBody'].get('messages'),
})
if (len(result["heliconeRequest"]) == 0):
print("No more results")
break
df = pd.DataFrame(data)
import IPython
IPython.embed()
print(df.head())

13
forge/mypy.ini Normal file
View File

@@ -0,0 +1,13 @@
[mypy]
namespace_packages = True
follow_imports = skip
check_untyped_defs = True
disallow_untyped_defs = True
exclude = ^(agbenchmark/challenges/|agent/|venv|venv-dev)
ignore_missing_imports = True
[mypy-agbenchmark.utils.data_types.*]
ignore_errors = True
[mypy-numpy.*]
ignore_errors = True

3676
forge/poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

8
forge/prometheus.yml Normal file
View File

@@ -0,0 +1,8 @@
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # Evaluate rules every 15 seconds.
scrape_configs:
- job_name: 'autogpt'
static_configs:
- targets: ['agent:8000']

62
forge/pyproject.toml Normal file
View File

@@ -0,0 +1,62 @@
[tool.poetry]
name = "Auto-GPT-Forge"
version = "0.1.0"
description = ""
authors = ["Craig Swift <craigswift13@gmail.com>"]
license = "MIT"
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
python-dotenv = "^1.0.0"
openai = "^0.27.8"
tenacity = "^8.2.2"
sqlalchemy = "^2.0.19"
aiohttp = "^3.8.5"
colorlog = "^6.7.0"
[tool.poetry.group.dev.dependencies]
isort = "^5.12.0"
black = "^23.3.0"
pre-commit = "^3.3.3"
mypy = "^1.4.1"
flake8 = "^6.0.0"
agbenchmark = "^0.0.9"
types-requests = "^2.31.0.2"
pytest = "^7.4.0"
pytest-asyncio = "^0.21.1"
watchdog = "^3.0.0"
mock = "^5.1.0"
autoflake = "^2.2.0"
[tool.poetry.group.ui.dependencies]
gradio = "^3.40.1"
plotly = "^5.16.0"
dash = "^2.11.1"
pandas = "^2.0.3"
dash-bootstrap-components = "^1.4.2"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 88
target-version = ['py310']
include = '\.pyi?$'
packages = ["autogpt"]
extend-exclude = '(/dist|/.venv|/venv|/build|/agent|agbenchmark/challenges)/'
[tool.isort]
profile = "black"
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
use_parentheses = true
ensure_newline_before_comments = true
line_length = 88
sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"]
skip_glob = [".tox", "__pycache__", "*.pyc", "venv*/*", "reports", "venv", "env", "node_modules", ".env", ".venv", "dist", "agent/*", "agbenchmark/challenges/*"]

8
forge/run Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
# poetry install
# poetry shell
export PYTHONPATH=$PYTHONPATH:$PWD; watchmedo auto-restart -p "*.py" -R python3 -- autogpt/__main__.py "$@"