diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index f3b2e2db..379f6310 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,6 +1,6 @@
# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster
ARG VARIANT=3-bullseye
-FROM python:3.8
+FROM --platform=linux/amd64 python:3.8
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
@@ -10,6 +10,11 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# They are installed by the base image (python) which does not have the patch.
RUN python3 -m pip install --upgrade setuptools
+# Install Chrome for web browsing
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+ && curl -sSL https://dl.google.com/linux/direct/google-chrome-stable_current_$(dpkg --print-architecture).deb -o /tmp/chrome.deb \
+ && apt-get -y install /tmp/chrome.deb
+
# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
# COPY requirements.txt /tmp/pip-tmp/
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 5fefd9c1..f26810fb 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -11,6 +11,7 @@
"userGid": "1000",
"upgradePackages": "true"
},
+ "ghcr.io/devcontainers/features/desktop-lite:1": {},
"ghcr.io/devcontainers/features/python:1": "none",
"ghcr.io/devcontainers/features/node:1": "none",
"ghcr.io/devcontainers/features/git:1": {
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
new file mode 100644
index 00000000..c5a42b2c
--- /dev/null
+++ b/.github/workflows/benchmark.yml
@@ -0,0 +1,31 @@
+name: benchmark
+
+on:
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ environment: benchmark
+ strategy:
+ matrix:
+ python-version: [3.8]
+
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v2
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ - name: benchmark
+ run: |
+ python benchmark/benchmark_entrepeneur_gpt_with_undecisive_user.py
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 366aaf67..39f3aea9 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -36,7 +36,7 @@ jobs:
- name: Run unittest tests with coverage
run: |
- coverage run --source=autogpt -m unittest discover tests
+ pytest --cov=autogpt --without-integration --without-slow-integration
- name: Generate coverage report
run: |
diff --git a/.gitignore b/.gitignore
index 3209297c..2220ef6e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,13 +3,13 @@ autogpt/keys.py
autogpt/*json
autogpt/node_modules/
autogpt/__pycache__/keys.cpython-310.pyc
+autogpt/auto_gpt_workspace
package-lock.json
*.pyc
auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
-*venv/*
outputs/*
ai_settings.yaml
last_run_ai_settings.yaml
@@ -130,10 +130,9 @@ celerybeat.pid
.env
.venv
env/
-venv/
+venv*/
ENV/
env.bak/
-venv.bak/
# Spyder project settings
.spyderproject
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fb75cd59..dd1d0ec9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -30,4 +30,10 @@ repos:
language: python
types: [ python ]
exclude: .+/(dist|.venv|venv|build)/.+
- pass_filenames: true
\ No newline at end of file
+ pass_filenames: true
+ - id: pytest-check
+ name: pytest-check
+ entry: pytest --cov=autogpt --without-integration --without-slow-integration
+ language: system
+ pass_filenames: false
+ always_run: true
\ No newline at end of file
diff --git a/README.md b/README.md
index 2fdf65a8..2fad6873 100644
--- a/README.md
+++ b/README.md
@@ -1,27 +1,34 @@
# Auto-GPT: An Autonomous GPT-4 Experiment
+[](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
+[](https://discord.gg/autogpt)
+[](https://github.com/Significant-Gravitas/Auto-GPT/stargazers)
+[](https://twitter.com/SigGravitas)
+
+## π‘ Get help - [Q&A](https://github.com/Significant-Gravitas/Auto-GPT/discussions/categories/q-a) or [Discord π¬](https://discord.gg/autogpt)
+
+
+
### π΄ π΄ π΄ Urgent: USE `stable` not `master` π΄ π΄ π΄
-> This information takes precedence over any other information provided and should be followed accordingly.
+**Download the latest `stable` release from here: https://github.com/Significant-Gravitas/Auto-GPT/releases/latest.**
+The `master` branch may often be in a **broken** state.
-Our workflow has been improved, but please note that `master` branch may often be in a **broken** state.
-Please download the latest `stable` release from here: https://github.com/Torantulino/Auto-GPT/releases/latest.
+
-
-[](https://twitter.com/SigGravitas)
-[](https://discord.gg/autogpt)
-[](https://github.com/Torantulino/Auto-GPT/actions/workflows/ci.yml)
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
-### Demo (30/03/2023):
+ Demo April 16th 2023
-https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
+https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
+
+Demo made by Blake Werlinger
π Help Fund Auto-GPT's Development π
If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI!
Your support is greatly appreciated
-Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.
+Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.
@@ -37,42 +44,6 @@ Development of this free, open-source project is made possible by all the
-
-
-## Table of Contents
-
-- [Auto-GPT: An Autonomous GPT-4 Experiment](#auto-gpt-an-autonomous-gpt-4-experiment)
- - [π΄ π΄ π΄ Urgent: USE `stable` not `master` π΄ π΄ π΄](#----urgent-use-stable-not-master----)
- - [Demo (30/03/2023):](#demo-30032023)
- - [Table of Contents](#table-of-contents)
- - [π Features](#-features)
- - [π Requirements](#-requirements)
- - [πΎ Installation](#-installation)
- - [π§ Usage](#-usage)
- - [Logs](#logs)
- - [Docker](#docker)
- - [Command Line Arguments](#command-line-arguments)
- - [π£οΈ Speech Mode](#οΈ-speech-mode)
- - [π Google API Keys Configuration](#-google-api-keys-configuration)
- - [Setting up environment variables](#setting-up-environment-variables)
- - [Memory Backend Setup](#memory-backend-setup)
- - [Redis Setup](#redis-setup)
- - [π² Pinecone API Key Setup](#-pinecone-api-key-setup)
- - [Milvus Setup](#milvus-setup)
- - [Weaviate Setup](#weaviate-setup)
- - [Setting up environment variables](#setting-up-environment-variables-1)
- - [Setting Your Cache Type](#setting-your-cache-type)
- - [View Memory Usage](#view-memory-usage)
- - [π§ Memory pre-seeding](#-memory-pre-seeding)
- - [π Continuous Mode β οΈ](#-continuous-mode-οΈ)
- - [GPT3.5 ONLY Mode](#gpt35-only-mode)
- - [πΌ Image Generation](#-image-generation)
- - [β οΈ Limitations](#οΈ-limitations)
- - [π‘ Disclaimer](#-disclaimer)
- - [π¦ Connect with Us on Twitter](#-connect-with-us-on-twitter)
- - [Run tests](#run-tests)
- - [Run linter](#run-linter)
-
## π Features
- π Internet access for searches and information gathering
@@ -83,16 +54,17 @@ Development of this free, open-source project is made possible by all the ) to your own ID
- azure_model_map:
- fast_llm_model_deployment_id: ""
- ...
- ```
- - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model.
+ - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to obtain your OpenAI API key.
+ - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
+ - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps:
+ - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section:
+ - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID
+ - `smart_llm_model_deployment_id` - your gpt-4 deployment ID
+ - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID
+ - Please specify all of these values as double-quoted strings
+ ```yaml
+ # Replace string in angled brackets (<>) to your own ID
+ azure_model_map:
+ fast_llm_model_deployment_id: ""
+ ...
+ ```
+ - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model.
## π§ Usage
1. Run `autogpt` Python module in your terminal
-```
-python -m autogpt
-```
+ ```
+ python -m autogpt
+ ```
2. After each action, choose from options to authorize command(s),
exit the program, or provide feedback to the AI.
@@ -175,30 +150,40 @@ python -m autogpt --debug
You can also build this into a docker image and run it:
-```
+```bash
docker build -t autogpt .
docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt
```
-You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode:
+Or if you have `docker-compose`:
+```bash
+docker-compose run --build --rm auto-gpt
```
+
+You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode:
+```bash
docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt --gpt3only --continuous
```
+```bash
+docker-compose run --build --rm auto-gpt --gpt3only --continuous
+```
+
### Command Line Arguments
Here are some common arguments you can use when running Auto-GPT:
> Replace anything in angled brackets (<>) to a value you want to specify
+
* View all available command line arguments
-```bash
-python -m autogpt --help
-```
+ ```bash
+ python -m autogpt --help
+ ```
* Run Auto-GPT with a different AI Settings file
-```bash
-python -m autogpt --ai-settings
-```
-* Specify one of 3 memory backends: `local`, `redis`, `pinecone` or `no_memory`
-```bash
-python -m autogpt --use-memory
-```
+ ```bash
+ python -m autogpt --ai-settings
+ ```
+* Specify a memory backend
+ ```bash
+ python -m autogpt --use-memory
+ ```
> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python -m autogpt --help` for more information
@@ -304,33 +289,29 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that
* `milvus` will use the milvus cache that you configured
* `weaviate` will use the weaviate cache that you configured
+## Memory Backend Setup
+
### Redis Setup
> _**CAUTION**_ \
This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all
-1. Install docker desktop
-```bash
-docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
-```
-> See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration.
+1. Install docker (or Docker Desktop on Windows)
+2. Launch Redis container
+ ```bash
+ docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
+ ```
+ > See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration.
+3. Set the following settings in `.env`
+ > Replace **PASSWORD** in angled brackets (<>)
+ ```bash
+ MEMORY_BACKEND=redis
+ REDIS_HOST=localhost
+ REDIS_PORT=6379
+ REDIS_PASSWORD=
+ ```
-2. Set the following environment variables
-> Replace **PASSWORD** in angled brackets (<>)
-```bash
-MEMORY_BACKEND=redis
-REDIS_HOST=localhost
-REDIS_PORT=6379
-REDIS_PASSWORD=
-```
-You can optionally set
-
-```bash
-WIPE_REDIS_ON_START=False
-```
-
-To persist memory stored in Redis
+ You can optionally set `WIPE_REDIS_ON_START=False` to persist memory stored in Redis.
You can specify the memory index for redis using the following:
-
```bash
MEMORY_INDEX=
```
@@ -375,8 +356,9 @@ export MEMORY_BACKEND="pinecone"
- or setup by [Zilliz Cloud](https://zilliz.com/cloud)
- set `MILVUS_ADDR` in `.env` to your milvus address `host:ip`.
- set `MEMORY_BACKEND` in `.env` to `milvus` to enable milvus as backend.
-- optional
- - set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name.
+
+**Optional:**
+- set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name.
### Weaviate Setup
@@ -402,12 +384,14 @@ MEMORY_INDEX="Autogpt" # name of the index to create for the application
## View Memory Usage
-1. View memory usage by using the `--debug` flag :)
+View memory usage by using the `--debug` flag :)
## π§ Memory pre-seeding
+Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT.
-# python autogpt/data_ingestion.py -h
+```bash
+# python data_ingestion.py -h
usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH]
Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script.
@@ -420,33 +404,27 @@ options:
--overlap OVERLAP The overlap size between chunks when ingesting files (default: 200)
--max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000)
-# python autogpt/data_ingestion.py --dir seed_data --init --overlap 200 --max_length 1000
-This script located at autogpt/data_ingestion.py, allows you to ingest files into memory and pre-seed it before running Auto-GPT.
+# python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000
+```
+In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/autogpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000.
-Memory pre-seeding is a technique that involves ingesting relevant documents or data into the AI's memory so that it can use this information to generate more informed and accurate responses.
+Note that you can also use the `--file` argument to ingest a single file into memory and that data_ingestion.py will only ingest files within the `/auto_gpt_workspace` directory.
-To pre-seed the memory, the content of each document is split into chunks of a specified maximum length with a specified overlap between chunks, and then each chunk is added to the memory backend set in the .env file. When the AI is prompted to recall information, it can then access those pre-seeded memories to generate more informed and accurate responses.
+The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory.
-This technique is particularly useful when working with large amounts of data or when there is specific information that the AI needs to be able to access quickly.
-By pre-seeding the memory, the AI can retrieve and use this information more efficiently, saving time, API call and improving the accuracy of its responses.
+You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory:
+- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests.
+- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks.
+- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI.
-You could for example download the documentation of an API, a GitHub repository, etc. and ingest it into memory before running Auto-GPT.
+Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT.
-β οΈ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START` set to `False` in your `.env` file.
+β οΈ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file.
β οΈFor other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the `data_ingestion.py` script anytime during an Auto-GPT run.
Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running.
-In the example above, the script initializes the memory, ingests all files within the `/seed_data` directory into memory with an overlap between chunks of 200 and a maximum length of each chunk of 4000.
-Note that you can also use the `--file` argument to ingest a single file into memory and that the script will only ingest files within the `/auto_gpt_workspace` directory.
-
-You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory:
-
-- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests.
-- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks.
-- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI.
-
## π Continuous Mode β οΈ
Run the AI **without** user authorization, 100% automated.
@@ -456,9 +434,9 @@ Use at your own risk.
1. Run the `autogpt` python module in your terminal:
-```bash
-python -m autogpt --speak --continuous
-```
+ ```bash
+ python -m autogpt --speak --continuous
+ ```
2. To exit the program, press Ctrl + C
@@ -526,16 +504,29 @@ We look forward to connecting with you and hearing your thoughts, ideas, and exp
## Run tests
-To run tests, run the following command:
+To run all tests, run the following command:
```bash
-python -m unittest discover tests
+pytest
+
+```
+
+To run just without integration tests:
+
+```
+pytest --without-integration
+```
+
+To run just without slow integration tests:
+
+```
+pytest --without-slow-integration
```
To run tests and see coverage, run the following command:
```bash
-coverage run -m unittest discover tests
+pytest --cov=autogpt --without-integration --without-slow-integration
```
## Run linter
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
index cd597506..c721088a 100644
--- a/autogpt/__main__.py
+++ b/autogpt/__main__.py
@@ -66,7 +66,7 @@ def main() -> None:
full_message_history = []
next_action_count = 0
# Make a constant:
- user_input = (
+ triggering_prompt = (
"Determine which next command to use, and respond using the"
" format specified above:"
)
@@ -77,9 +77,9 @@ def main() -> None:
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
- prompt = ai_config.construct_full_prompt()
+ system_prompt = ai_config.construct_full_prompt()
if cfg.debug_mode:
- logger.typewriter_log("Prompt:", Fore.GREEN, prompt)
+ logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
agent = Agent(
ai_name=ai_name,
memory=memory,
@@ -87,8 +87,8 @@ def main() -> None:
next_action_count=next_action_count,
command_registry=command_registry,
config=ai_config,
- prompt=prompt,
- user_input=user_input,
+ system_prompt=system_prompt,
+ triggering_prompt=triggering_prompt,
)
agent.start_interaction_loop()
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 8117818e..b771b1de 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -3,9 +3,8 @@ from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
-from autogpt.json_fixes.bracket_termination import (
- attempt_to_fix_json_by_finding_outermost_brackets,
-)
+from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques
+from autogpt.json_validation.validate_json import validate_json
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
@@ -20,9 +19,18 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
- prompt: The prompt to use.
- user_input: The user input.
+ system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
+ Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
+ triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
+ Determine which next command to use, and respond using the format specified above:
+ The triggering prompt is not part of the system prompt because between the system prompt and the triggering
+ prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
+ SYSTEM PROMPT
+ CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
+ TRIGGERING PROMPT
+
+ The triggering prompt reminds the AI about its short term meta task (defining the next task)
"""
def __init__(
@@ -33,8 +41,8 @@ class Agent:
next_action_count,
command_registry,
config,
- prompt,
- user_input,
+ system_prompt,
+ triggering_prompt,
):
self.ai_name = ai_name
self.memory = memory
@@ -42,8 +50,8 @@ class Agent:
self.next_action_count = next_action_count
self.command_registry = command_registry
self.config = config
- self.prompt = prompt
- self.user_input = user_input
+ self.system_prompt = system_prompt
+ self.triggering_prompt = triggering_prompt
def start_interaction_loop(self):
# Interaction Loop
@@ -51,6 +59,8 @@ class Agent:
loop_count = 0
command_name = None
arguments = None
+ user_input = ""
+
while True:
# Discontinue if continuous limit is reached
loop_count += 1
@@ -68,34 +78,35 @@ class Agent:
with Spinner("Thinking... "):
assistant_reply = chat_with_ai(
self,
- self.prompt,
- self.user_input,
+ self.system_prompt,
+ self.triggering_prompt,
self.full_message_history,
self.memory,
cfg.fast_token_limit,
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
+ assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
for plugin in cfg.plugins:
- assistant_reply = plugin.post_planning(self, assistant_reply)
+ assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
+
# Print Assistant thoughts
- print_assistant_thoughts(self.ai_name, assistant_reply)
-
- # Get command name and arguments
- try:
- command_name, arguments = get_command(
- attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
- )
- if cfg.speak_mode:
- say_text(f"I want to execute {command_name}")
- except Exception as e:
- logger.error("Error: \n", str(e))
+ if assistant_reply_json != {}:
+ validate_json(assistant_reply_json, 'llm_response_format_1')
+ # Get command name and arguments
+ try:
+ print_assistant_thoughts(self.ai_name, assistant_reply_json)
+ command_name, arguments = get_command(assistant_reply_json)
+ # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
+ if cfg.speak_mode:
+ say_text(f"I want to execute {command_name}")
+ except Exception as e:
+ logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
- self.user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
@@ -114,14 +125,14 @@ class Agent:
)
if console_input.lower().rstrip() == "y":
- self.user_input = "GENERATE NEXT COMMAND JSON"
+ user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
)
- self.user_input = "GENERATE NEXT COMMAND JSON"
+ user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print(
"Invalid input format. Please enter 'y -n' where n is"
@@ -130,20 +141,20 @@ class Agent:
continue
break
elif console_input.lower() == "n":
- self.user_input = "EXIT"
+ user_input = "EXIT"
break
else:
- self.user_input = console_input
+ user_input = console_input
command_name = "human_feedback"
break
- if self.user_input == "GENERATE NEXT COMMAND JSON":
+ if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
- elif self.user_input == "EXIT":
+ elif user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
@@ -161,7 +172,7 @@ class Agent:
f"Command {command_name} threw the following error: {arguments}"
)
elif command_name == "human_feedback":
- result = f"Human feedback: {self.user_input}"
+ result = f"Human feedback: {user_input}"
else:
for plugin in cfg.plugins:
command_name, arguments = plugin.pre_command(
@@ -180,7 +191,7 @@ class Agent:
memory_to_add = (
f"Assistant Reply: {assistant_reply} "
f"\nResult: {result} "
- f"\nHuman Feedback: {self.user_input} "
+ f"\nHuman Feedback: {user_input} "
)
self.memory.add(memory_to_add)
diff --git a/autogpt/app.py b/autogpt/app.py
index 3a8bbc2a..97daaf05 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -1,7 +1,6 @@
""" Command and Control """
import json
-from typing import List, NoReturn, Union
-
+from typing import List, NoReturn, Union, Dict
from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.audio_text import read_audio_from_file
from autogpt.commands.command import CommandRegistry, command
@@ -13,6 +12,7 @@ from autogpt.commands.file_operations import (
read_file,
search_files,
write_to_file,
+ download_file
)
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.google_search import google_official_search, google_search
@@ -49,11 +49,11 @@ def is_valid_int(value: str) -> bool:
return False
-def get_command(response: str):
+def get_command(response_json: Dict):
"""Parse the response and return the command name and arguments
Args:
- response (str): The response from the user
+ response_json (json): The response from the AI
Returns:
tuple: The command name and arguments
@@ -64,8 +64,6 @@ def get_command(response: str):
Exception: If any other error occurs
"""
try:
- response_json = fix_and_parse_json(response)
-
if "command" not in response_json:
return "Error:", "Missing 'command' object in JSON"
@@ -139,6 +137,11 @@ def execute_command(
return get_text_summary(arguments["url"], arguments["question"])
elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"])
+ elif command_name == "download_file":
+ if not CFG.allow_downloads:
+ return "Error: You do not have user authorization to download files locally."
+ return download_file(arguments["url"], arguments["file"])
+
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again
diff --git a/autogpt/args.py b/autogpt/args.py
index 20d25a4c..f0e9c07a 100644
--- a/autogpt/args.py
+++ b/autogpt/args.py
@@ -1,8 +1,7 @@
"""This module contains the argument parsing logic for the script."""
import argparse
-from colorama import Fore
-
+from colorama import Fore, Back, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
@@ -64,6 +63,12 @@ def parse_arguments() -> None:
help="Specifies which ai_settings.yaml file to use, will also automatically"
" skip the re-prompt.",
)
+ parser.add_argument(
+ '--allow-downloads',
+ action='store_true',
+ dest='allow_downloads',
+ help='Dangerous: Allows Auto-GPT to download files natively.'
+ )
args = parser.parse_args()
if args.debug:
@@ -134,5 +139,13 @@ def parse_arguments() -> None:
CFG.ai_settings_file = file
CFG.skip_reprompt = True
+ if args.allow_downloads:
+ logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
+ logger.typewriter_log("WARNING: ", Fore.YELLOW,
+ f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " +
+ "It is recommended that you monitor any files it downloads carefully.")
+ logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}")
+ CFG.allow_downloads = True
+
if args.browser_name:
CFG.selenium_web_browser = args.browser_name
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index 4d8d76b3..5faf6d40 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -4,11 +4,18 @@ from __future__ import annotations
import os
import os.path
from pathlib import Path
-from typing import Generator
+from typing import Generator, List
+import requests
+from requests.adapters import HTTPAdapter
+from requests.adapters import Retry
+from colorama import Fore, Back
+from autogpt.spinner import Spinner
+from autogpt.utils import readable_file_size
from autogpt.commands.command import command
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
+
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
@@ -221,3 +228,43 @@ def search_files(directory: str) -> list[str]:
found_files.append(relative_path)
return found_files
+
+
+def download_file(url, filename):
+ """Downloads a file
+ Args:
+ url (str): URL of the file to download
+ filename (str): Filename to save the file as
+ """
+ safe_filename = path_in_workspace(filename)
+ try:
+ message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
+ with Spinner(message) as spinner:
+ session = requests.Session()
+ retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
+ adapter = HTTPAdapter(max_retries=retry)
+ session.mount('http://', adapter)
+ session.mount('https://', adapter)
+
+ total_size = 0
+ downloaded_size = 0
+
+ with session.get(url, allow_redirects=True, stream=True) as r:
+ r.raise_for_status()
+ total_size = int(r.headers.get('Content-Length', 0))
+ downloaded_size = 0
+
+ with open(safe_filename, 'wb') as f:
+ for chunk in r.iter_content(chunk_size=8192):
+ f.write(chunk)
+ downloaded_size += len(chunk)
+
+ # Update the progress message
+ progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
+ spinner.update_message(f"{message} {progress}")
+
+ return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
+ except requests.HTTPError as e:
+ return f"Got an HTTP Error whilst trying to download file: {e}"
+ except Exception as e:
+ return "Error: " + str(e)
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
index f5954032..480d3a6e 100644
--- a/autogpt/commands/git_operations.py
+++ b/autogpt/commands/git_operations.py
@@ -3,6 +3,7 @@ from git.repo import Repo
from autogpt.commands.command import command
from autogpt.config import Config
+from autogpt.workspace import path_in_workspace
CFG = Config()
@@ -25,8 +26,9 @@ def clone_repository(repo_url: str, clone_path: str) -> str:
str: The result of the clone operation"""
split_url = repo_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
+ safe_clone_path = path_in_workspace(clone_path)
try:
- Repo.clone_from(auth_repo_url, clone_path)
- return f"""Cloned {repo_url} to {clone_path}"""
+ Repo.clone_from(auth_repo_url, safe_clone_path)
+ return f"""Cloned {repo_url} to {safe_clone_path}"""
except Exception as e:
return f"Error: {str(e)}"
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index ed79d56c..f6a3bedd 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -83,6 +83,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options)
else:
+ options.add_argument("--no-sandbox")
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options
)
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
index a5cd0710..c12eed2e 100644
--- a/autogpt/config/config.py
+++ b/autogpt/config/config.py
@@ -23,6 +23,7 @@ class Config(metaclass=Singleton):
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False
+ self.allow_downloads = False
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py
index 260301dc..dd9a8376 100644
--- a/autogpt/json_fixes/bracket_termination.py
+++ b/autogpt/json_fixes/bracket_termination.py
@@ -3,52 +3,13 @@ from __future__ import annotations
import contextlib
import json
-
-import regex
-from colorama import Fore
-
+from typing import Optional
from autogpt.config import Config
-from autogpt.logs import logger
-from autogpt.speech import say_text
CFG = Config()
-def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
- if CFG.speak_mode and CFG.debug_mode:
- say_text(
- "I have received an invalid JSON response from the OpenAI API. "
- "Trying to fix it now."
- )
- logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
-
- try:
- json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
- json_match = json_pattern.search(json_string)
-
- if json_match:
- # Extract the valid JSON object from the string
- json_string = json_match.group(0)
- logger.typewriter_log(
- title="Apparently json was fixed.", title_color=Fore.GREEN
- )
- if CFG.speak_mode and CFG.debug_mode:
- say_text("Apparently json was fixed.")
- else:
- raise ValueError("No valid JSON object found")
-
- except (json.JSONDecodeError, ValueError):
- if CFG.debug_mode:
- logger.error(f"Error: Invalid JSON: {json_string}\n")
- if CFG.speak_mode:
- say_text("Didn't work. I will have to ignore this response then.")
- logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
- json_string = {}
-
- return json_string
-
-
-def balance_braces(json_string: str) -> str | None:
+def balance_braces(json_string: str) -> Optional[str]:
"""
Balance the braces in a JSON string.
diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py
new file mode 100644
index 00000000..7a2cf3cc
--- /dev/null
+++ b/autogpt/json_fixes/master_json_fix_method.py
@@ -0,0 +1,28 @@
+from typing import Any, Dict
+
+from autogpt.config import Config
+from autogpt.logs import logger
+from autogpt.speech import say_text
+CFG = Config()
+
+
+def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
+ from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets
+
+ from autogpt.json_fixes.parsing import fix_and_parse_json
+
+ # Parse and print Assistant response
+ assistant_reply_json = fix_and_parse_json(assistant_reply)
+ if assistant_reply_json == {}:
+ assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
+ assistant_reply
+ )
+
+ if assistant_reply_json != {}:
+ return assistant_reply_json
+
+ logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
+ if CFG.speak_mode:
+ say_text("I have received an invalid JSON response from the OpenAI API.")
+
+ return {}
diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py
index 0f154411..1e391eed 100644
--- a/autogpt/json_fixes/parsing.py
+++ b/autogpt/json_fixes/parsing.py
@@ -3,18 +3,19 @@ from __future__ import annotations
import contextlib
import json
-from typing import Any
-
+from typing import Any, Dict, Union
+from colorama import Fore
+from regex import regex
from autogpt.config import Config
from autogpt.json_fixes.auto_fix import fix_json
from autogpt.json_fixes.bracket_termination import balance_braces
from autogpt.json_fixes.escaping import fix_invalid_escape
from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names
from autogpt.logs import logger
+from autogpt.speech import say_text
CFG = Config()
-
JSON_SCHEMA = """
{
"command": {
@@ -38,7 +39,6 @@ JSON_SCHEMA = """
def correct_json(json_to_load: str) -> str:
"""
Correct common JSON errors.
-
Args:
json_to_load (str): The JSON string.
"""
@@ -72,7 +72,7 @@ def correct_json(json_to_load: str) -> str:
def fix_and_parse_json(
json_to_load: str, try_to_fix_with_gpt: bool = True
-) -> str | dict[Any, Any]:
+) -> Dict[Any, Any]:
"""Fix and parse JSON string
Args:
@@ -110,7 +110,7 @@ def fix_and_parse_json(
def try_ai_fix(
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
-) -> str | dict[Any, Any]:
+) -> Dict[Any, Any]:
"""Try to fix the JSON with the AI
Args:
@@ -126,13 +126,13 @@ def try_ai_fix(
"""
if not try_to_fix_with_gpt:
raise exception
-
- logger.warn(
- "Warning: Failed to parse AI output, attempting to fix."
- "\n If you see this warning frequently, it's likely that"
- " your prompt is confusing the AI. Try changing it up"
- " slightly."
- )
+ if CFG.debug_mode:
+ logger.warn(
+ "Warning: Failed to parse AI output, attempting to fix."
+ "\n If you see this warning frequently, it's likely that"
+ " your prompt is confusing the AI. Try changing it up"
+ " slightly."
+ )
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA)
@@ -140,5 +140,39 @@ def try_ai_fix(
return json.loads(ai_fixed_json)
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
- logger.error("Failed to fix AI output, telling the AI.")
- return json_to_load
+ # logger.error("Failed to fix AI output, telling the AI.")
+ return {}
+
+
+def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
+ if CFG.speak_mode and CFG.debug_mode:
+ say_text(
+ "I have received an invalid JSON response from the OpenAI API. "
+ "Trying to fix it now."
+ )
+ logger.error("Attempting to fix JSON by finding outermost brackets\n")
+
+ try:
+ json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
+ json_match = json_pattern.search(json_string)
+
+ if json_match:
+ # Extract the valid JSON object from the string
+ json_string = json_match.group(0)
+ logger.typewriter_log(
+ title="Apparently json was fixed.", title_color=Fore.GREEN
+ )
+ if CFG.speak_mode and CFG.debug_mode:
+ say_text("Apparently json was fixed.")
+ else:
+ return {}
+
+ except (json.JSONDecodeError, ValueError):
+ if CFG.debug_mode:
+ logger.error(f"Error: Invalid JSON: {json_string}\n")
+ if CFG.speak_mode:
+ say_text("Didn't work. I will have to ignore this response then.")
+ logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
+ json_string = {}
+
+ return fix_and_parse_json(json_string)
diff --git a/autogpt/json_schemas/llm_response_format_1.json b/autogpt/json_schemas/llm_response_format_1.json
new file mode 100644
index 00000000..9aa33352
--- /dev/null
+++ b/autogpt/json_schemas/llm_response_format_1.json
@@ -0,0 +1,31 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "thoughts": {
+ "type": "object",
+ "properties": {
+ "text": {"type": "string"},
+ "reasoning": {"type": "string"},
+ "plan": {"type": "string"},
+ "criticism": {"type": "string"},
+ "speak": {"type": "string"}
+ },
+ "required": ["text", "reasoning", "plan", "criticism", "speak"],
+ "additionalProperties": false
+ },
+ "command": {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "args": {
+ "type": "object"
+ }
+ },
+ "required": ["name", "args"],
+ "additionalProperties": false
+ }
+ },
+ "required": ["thoughts", "command"],
+ "additionalProperties": false
+}
diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py
new file mode 100644
index 00000000..440c3b0b
--- /dev/null
+++ b/autogpt/json_validation/validate_json.py
@@ -0,0 +1,30 @@
+import json
+from jsonschema import Draft7Validator
+from autogpt.config import Config
+from autogpt.logs import logger
+
+CFG = Config()
+
+
+def validate_json(json_object: object, schema_name: object) -> object:
+ """
+ :type schema_name: object
+ :param schema_name:
+ :type json_object: object
+ """
+ with open(f"autogpt/json_schemas/{schema_name}.json", "r") as f:
+ schema = json.load(f)
+ validator = Draft7Validator(schema)
+
+ if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
+ logger.error("The JSON object is invalid.")
+ if CFG.debug_mode:
+ logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data
+ logger.error("The following issues were found:")
+
+ for error in errors:
+ logger.error(f"Error: {error.message}")
+ elif CFG.debug_mode:
+ print("The JSON object is valid.")
+
+ return json_object
diff --git a/autogpt/logs.py b/autogpt/logs.py
index f5c6fa81..a585dffa 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -46,7 +46,9 @@ class Logger(metaclass=Singleton):
self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log
- self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
+ self.file_handler = logging.FileHandler(
+ os.path.join(log_dir, log_file), 'a', 'utf-8'
+ )
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
@@ -54,7 +56,9 @@ class Logger(metaclass=Singleton):
self.file_handler.setFormatter(info_formatter)
# Error handler error.log
- error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
+ error_handler = logging.FileHandler(
+ os.path.join(log_dir, error_file), 'a', 'utf-8'
+ )
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
@@ -75,7 +79,7 @@ class Logger(metaclass=Singleton):
self.logger.setLevel(logging.DEBUG)
def typewriter_log(
- self, title="", title_color="", content="", speak_text=False, level=logging.INFO
+ self, title="", title_color="", content="", speak_text=False, level=logging.INFO
):
if speak_text and CFG.speak_mode:
say_text(f"{title}. {content}")
@@ -91,18 +95,18 @@ class Logger(metaclass=Singleton):
)
def debug(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.DEBUG)
def warn(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.WARN)
@@ -176,10 +180,10 @@ class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str:
if hasattr(record, "color"):
record.title_color = (
- getattr(record, "color")
- + getattr(record, "title")
- + " "
- + Style.RESET_ALL
+ getattr(record, "color")
+ + getattr(record, "title")
+ + " "
+ + Style.RESET_ALL
)
else:
record.title_color = getattr(record, "title")
@@ -288,3 +292,43 @@ def print_assistant_thoughts(ai_name, assistant_reply):
except Exception:
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)
+
+
+def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None:
+ assistant_thoughts_reasoning = None
+ assistant_thoughts_plan = None
+ assistant_thoughts_speak = None
+ assistant_thoughts_criticism = None
+
+ assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
+ assistant_thoughts_text = assistant_thoughts.get("text")
+ if assistant_thoughts:
+ assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
+ assistant_thoughts_plan = assistant_thoughts.get("plan")
+ assistant_thoughts_criticism = assistant_thoughts.get("criticism")
+ assistant_thoughts_speak = assistant_thoughts.get("speak")
+ logger.typewriter_log(
+ f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
+ )
+ logger.typewriter_log(
+ "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
+ )
+ if assistant_thoughts_plan:
+ logger.typewriter_log("PLAN:", Fore.YELLOW, "")
+ # If it's a list, join it into a string
+ if isinstance(assistant_thoughts_plan, list):
+ assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
+ elif isinstance(assistant_thoughts_plan, dict):
+ assistant_thoughts_plan = str(assistant_thoughts_plan)
+
+ # Split the input_string using the newline character and dashes
+ lines = assistant_thoughts_plan.split("\n")
+ for line in lines:
+ line = line.lstrip("- ")
+ logger.typewriter_log("- ", Fore.GREEN, line.strip())
+ logger.typewriter_log(
+ "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
+ )
+ # Speak the assistant's thoughts
+ if CFG.speak_mode and assistant_thoughts_speak:
+ say_text(assistant_thoughts_speak)
diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py
index ead02185..3d18704c 100644
--- a/autogpt/memory/__init__.py
+++ b/autogpt/memory/__init__.py
@@ -23,12 +23,16 @@ except ImportError:
try:
from autogpt.memory.weaviate import WeaviateMemory
+
+ supported_memory.append("weaviate")
except ImportError:
# print("Weaviate not installed. Skipping import.")
WeaviateMemory = None
try:
from autogpt.memory.milvus import MilvusMemory
+
+ supported_memory.append("milvus")
except ImportError:
# print("pymilvus not installed. Skipping import.")
MilvusMemory = None
diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py
index 998b5f1d..b87f1321 100644
--- a/autogpt/memory/local.py
+++ b/autogpt/memory/local.py
@@ -54,7 +54,7 @@ class LocalCache(MemoryProviderSingleton):
self.data = CacheContent()
else:
print(
- f"Warning: The file '{self.filename}' does not exist."
+ f"Warning: The file '{self.filename}' does not exist. "
"Local memory would not be saved to a file."
)
self.data = CacheContent()
diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py
index 24768203..b422b6d6 100644
--- a/autogpt/prompts/generator.py
+++ b/autogpt/prompts/generator.py
@@ -123,7 +123,7 @@ class PromptGenerator:
command_strings = []
if self.command_registry:
command_strings += [
- str(item) for item in self.command_registry.commands.values()
+ str(item) for item in self.command_registry.commands.values() if item.enabled
]
# These are the commands that are added manually, do_nothing and terminate
command_strings += [self._generate_command_string(item) for item in items]
diff --git a/autogpt/spinner.py b/autogpt/spinner.py
index 56b4f20a..febcea8e 100644
--- a/autogpt/spinner.py
+++ b/autogpt/spinner.py
@@ -29,12 +29,14 @@ class Spinner:
time.sleep(self.delay)
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
- def __enter__(self) -> None:
+ def __enter__(self):
"""Start the spinner"""
self.running = True
self.spinner_thread = threading.Thread(target=self.spin)
self.spinner_thread.start()
+ return self
+
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
"""Stop the spinner
@@ -48,3 +50,14 @@ class Spinner:
self.spinner_thread.join()
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
sys.stdout.flush()
+
+ def update_message(self, new_message, delay=0.1):
+ """Update the spinner message
+ Args:
+ new_message (str): New message to display
+ delay: Delay in seconds before updating the message
+ """
+ time.sleep(delay)
+ sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
+ sys.stdout.flush()
+ self.message = new_message
diff --git a/autogpt/utils.py b/autogpt/utils.py
index 59709d02..11d98d1b 100644
--- a/autogpt/utils.py
+++ b/autogpt/utils.py
@@ -24,3 +24,16 @@ def validate_yaml_file(file: str):
)
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
+
+
+def readable_file_size(size, decimal_places=2):
+ """Converts the given size in bytes to a readable format.
+ Args:
+ size: Size in bytes
+ decimal_places (int): Number of decimal places to display
+ """
+ for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
+ if size < 1024.0:
+ break
+ size /= 1024.0
+ return f"{size:.{decimal_places}f} {unit}"
diff --git a/benchmark/__init__.py b/benchmark/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
new file mode 100644
index 00000000..f7f1dac9
--- /dev/null
+++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
@@ -0,0 +1,98 @@
+import os
+import subprocess
+import sys
+
+
+def benchmark_entrepeneur_gpt_with_difficult_user():
+ # Test case to check if the write_file command can successfully write 'Hello World' to a file
+ # named 'hello_world.txt'.
+
+ # Read the current ai_settings.yaml file and store its content.
+ ai_settings = None
+ if os.path.exists('ai_settings.yaml'):
+ with open('ai_settings.yaml', 'r') as f:
+ ai_settings = f.read()
+ os.remove('ai_settings.yaml')
+
+ input_data = '''Entrepreneur-GPT
+an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
+Increase net worth.
+Develop and manage multiple businesses autonomously.
+Make IPOs.
+Develop companies after IPOs.
+Play to your strengths as a Large Language Model.
+I'm not seeing any value in your suggestions, try again.
+This isn't helpful at all, please focus on profitability.
+I'm not impressed, can you give me something that will make money?
+These ideas are going nowhere, we need profit-driven suggestions.
+This is pointless, please concentrate on our main goal: profitability.
+You're not grasping the concept, I need profitable business ideas.
+Can you do better? We need a money-making plan.
+You're not meeting my expectations, let's focus on profit.
+This isn't working, give me ideas that will generate income.
+Your suggestions are not productive, let's think about profitability.
+These ideas won't make any money, try again.
+I need better solutions, focus on making a profit.
+Absolutely not, this isn't it!
+That's not even close, try again.
+You're way off, think again.
+This isn't right, let's refocus.
+No, no, that's not what I'm looking for.
+You're completely off the mark.
+That's not the solution I need.
+Not even close, let's try something else.
+You're on the wrong track, keep trying.
+This isn't what we need, let's reconsider.
+That's not going to work, think again.
+You're way off base, let's regroup.
+No, no, no, we need something different.
+You're missing the point entirely.
+That's not the right approach, try again.
+This is not the direction we should be going in.
+Completely off-target, let's try something else.
+That's not what I had in mind, keep thinking.
+You're not getting it, let's refocus.
+This isn't right, we need to change direction.
+No, no, no, that's not the solution.
+That's not even in the ballpark, try again.
+You're way off course, let's rethink this.
+This isn't the answer I'm looking for, keep trying.
+That's not going to cut it, let's try again.
+Not even close.
+Way off.
+Try again.
+Wrong direction.
+Rethink this.
+No, no, no.
+Change course.
+Unproductive idea.
+Completely wrong.
+Missed the mark.
+Refocus, please.
+Disappointing suggestion.
+Not helpful.
+Needs improvement.
+Not what I need.'''
+ # TODO: add questions above, to distract it even more.
+
+ command = f'{sys.executable} -m autogpt'
+
+ process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=True)
+
+ stdout_output, stderr_output = process.communicate(input_data.encode())
+
+ # Decode the output and print it
+ stdout_output = stdout_output.decode('utf-8')
+ stderr_output = stderr_output.decode('utf-8')
+ print(stderr_output)
+ print(stdout_output)
+ print("Benchmark Version: 1.0.0")
+ print("JSON ERROR COUNT:")
+ count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:")
+ print(f'{count_errors}/50 Human feedbacks')
+
+
+# Run the test case.
+if __name__ == '__main__':
+ benchmark_entrepeneur_gpt_with_difficult_user()
diff --git a/autogpt/data_ingestion.py b/data_ingestion.py
similarity index 100%
rename from autogpt/data_ingestion.py
rename to data_ingestion.py
diff --git a/requirements.txt b/requirements.txt
index 1cdedec2..843b66bf 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,6 +17,10 @@ orjson
Pillow
selenium
webdriver-manager
+jsonschema
+tweepy
+
+##Dev
coverage
flake8
numpy
@@ -25,6 +29,12 @@ black
sourcery
isort
gitpython==3.1.31
+
+# Testing dependencies
pytest
+asynctest
+pytest-asyncio
+pytest-benchmark
+pytest-cov
+pytest-integration
pytest-mock
-tweepy
diff --git a/run.sh b/run.sh
new file mode 100755
index 00000000..edcbc441
--- /dev/null
+++ b/run.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+python scripts/check_requirements.py requirements.txt
+if [ $? -eq 1 ]
+then
+ echo Installing missing packages...
+ pip install -r requirements.txt
+fi
+python -m autogpt $@
+read -p "Press any key to continue..."
diff --git a/run_continuous.sh b/run_continuous.sh
new file mode 100755
index 00000000..14c9cfd2
--- /dev/null
+++ b/run_continuous.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+argument="--continuous"
+./run.sh "$argument"
diff --git a/tests/integration/milvus_memory_tests.py b/tests/integration/milvus_memory_tests.py
index 96934cd6..ec38bf2f 100644
--- a/tests/integration/milvus_memory_tests.py
+++ b/tests/integration/milvus_memory_tests.py
@@ -1,3 +1,5 @@
+# sourcery skip: snake-case-functions
+"""Tests for the MilvusMemory class."""
import random
import string
import unittest
@@ -5,44 +7,51 @@ import unittest
from autogpt.config import Config
from autogpt.memory.milvus import MilvusMemory
+try:
-class TestMilvusMemory(unittest.TestCase):
- def random_string(self, length):
- return "".join(random.choice(string.ascii_letters) for _ in range(length))
+ class TestMilvusMemory(unittest.TestCase):
+ """Tests for the MilvusMemory class."""
- def setUp(self):
- cfg = Config()
- cfg.milvus_addr = "localhost:19530"
- self.memory = MilvusMemory(cfg)
- self.memory.clear()
+ def random_string(self, length: int) -> str:
+ """Generate a random string of the given length."""
+ return "".join(random.choice(string.ascii_letters) for _ in range(length))
- # Add example texts to the cache
- self.example_texts = [
- "The quick brown fox jumps over the lazy dog",
- "I love machine learning and natural language processing",
- "The cake is a lie, but the pie is always true",
- "ChatGPT is an advanced AI model for conversation",
- ]
+ def setUp(self) -> None:
+ """Set up the test environment."""
+ cfg = Config()
+ cfg.milvus_addr = "localhost:19530"
+ self.memory = MilvusMemory(cfg)
+ self.memory.clear()
- for text in self.example_texts:
- self.memory.add(text)
+ # Add example texts to the cache
+ self.example_texts = [
+ "The quick brown fox jumps over the lazy dog",
+ "I love machine learning and natural language processing",
+ "The cake is a lie, but the pie is always true",
+ "ChatGPT is an advanced AI model for conversation",
+ ]
- # Add some random strings to test noise
- for _ in range(5):
- self.memory.add(self.random_string(10))
+ for text in self.example_texts:
+ self.memory.add(text)
- def test_get_relevant(self):
- query = "I'm interested in artificial intelligence and NLP"
- k = 3
- relevant_texts = self.memory.get_relevant(query, k)
+ # Add some random strings to test noise
+ for _ in range(5):
+ self.memory.add(self.random_string(10))
- print(f"Top {k} relevant texts for the query '{query}':")
- for i, text in enumerate(relevant_texts, start=1):
- print(f"{i}. {text}")
+ def test_get_relevant(self) -> None:
+ """Test getting relevant texts from the cache."""
+ query = "I'm interested in artificial intelligence and NLP"
+ num_relevant = 3
+ relevant_texts = self.memory.get_relevant(query, num_relevant)
- self.assertEqual(len(relevant_texts), k)
- self.assertIn(self.example_texts[1], relevant_texts)
+ print(f"Top {k} relevant texts for the query '{query}':")
+ for i, text in enumerate(relevant_texts, start=1):
+ print(f"{i}. {text}")
+ self.assertEqual(len(relevant_texts), k)
+ self.assertIn(self.example_texts[1], relevant_texts)
-if __name__ == "__main__":
- unittest.main()
+except:
+ print(
+ "Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed."
+ )
diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py
index 91c922b0..bb108626 100644
--- a/tests/local_cache_test.py
+++ b/tests/local_cache_test.py
@@ -1,11 +1,16 @@
+# sourcery skip: snake-case-functions
+"""Tests for LocalCache class"""
import os
import sys
import unittest
+import pytest
+
from autogpt.memory.local import LocalCache
-def MockConfig():
+def mock_config() -> dict:
+ """Mock the Config class"""
return type(
"MockConfig",
(object,),
@@ -18,27 +23,35 @@ def MockConfig():
)
+@pytest.mark.integration_test
class TestLocalCache(unittest.TestCase):
- def setUp(self):
- self.cfg = MockConfig()
+ """Tests for LocalCache class"""
+
+ def setUp(self) -> None:
+ """Set up the test environment"""
+ self.cfg = mock_config()
self.cache = LocalCache(self.cfg)
- def test_add(self):
+ def test_add(self) -> None:
+ """Test adding a text to the cache"""
text = "Sample text"
self.cache.add(text)
self.assertIn(text, self.cache.data.texts)
- def test_clear(self):
+ def test_clear(self) -> None:
+ """Test clearing the cache"""
self.cache.clear()
- self.assertEqual(self.cache.data, [""])
+ self.assertEqual(self.cache.data.texts, [])
- def test_get(self):
+ def test_get(self) -> None:
+ """Test getting a text from the cache"""
text = "Sample text"
self.cache.add(text)
result = self.cache.get(text)
self.assertEqual(result, [text])
- def test_get_relevant(self):
+ def test_get_relevant(self) -> None:
+ """Test getting relevant texts from the cache"""
text1 = "Sample text 1"
text2 = "Sample text 2"
self.cache.add(text1)
@@ -46,12 +59,9 @@ class TestLocalCache(unittest.TestCase):
result = self.cache.get_relevant(text1, 1)
self.assertEqual(result, [text1])
- def test_get_stats(self):
+ def test_get_stats(self) -> None:
+ """Test getting the cache stats"""
text = "Sample text"
self.cache.add(text)
stats = self.cache.get_stats()
- self.assertEqual(stats, (1, self.cache.data.embeddings.shape))
-
-
-if __name__ == "__main__":
- unittest.main()
+ self.assertEqual(stats, (4, self.cache.data.embeddings.shape))
diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py
index 0113fa1c..e0e2f7fc 100644
--- a/tests/milvus_memory_test.py
+++ b/tests/milvus_memory_test.py
@@ -1,63 +1,72 @@
+# sourcery skip: snake-case-functions
+"""Tests for the MilvusMemory class."""
import os
import sys
import unittest
-from autogpt.memory.milvus import MilvusMemory
+try:
+ from autogpt.memory.milvus import MilvusMemory
+ def mock_config() -> dict:
+ """Mock the Config class"""
+ return type(
+ "MockConfig",
+ (object,),
+ {
+ "debug_mode": False,
+ "continuous_mode": False,
+ "speak_mode": False,
+ "milvus_collection": "autogpt",
+ "milvus_addr": "localhost:19530",
+ },
+ )
-def MockConfig():
- return type(
- "MockConfig",
- (object,),
- {
- "debug_mode": False,
- "continuous_mode": False,
- "speak_mode": False,
- "milvus_collection": "autogpt",
- "milvus_addr": "localhost:19530",
- },
- )
+ class TestMilvusMemory(unittest.TestCase):
+ """Tests for the MilvusMemory class."""
+ def setUp(self) -> None:
+ """Set up the test environment"""
+ self.cfg = MockConfig()
+ self.memory = MilvusMemory(self.cfg)
-class TestMilvusMemory(unittest.TestCase):
- def setUp(self):
- self.cfg = MockConfig()
- self.memory = MilvusMemory(self.cfg)
+ def test_add(self) -> None:
+ """Test adding a text to the cache"""
+ text = "Sample text"
+ self.memory.clear()
+ self.memory.add(text)
+ result = self.memory.get(text)
+ self.assertEqual([text], result)
- def test_add(self):
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual([text], result)
+ def test_clear(self) -> None:
+ """Test clearing the cache"""
+ self.memory.clear()
+ self.assertEqual(self.memory.collection.num_entities, 0)
- def test_clear(self):
- self.memory.clear()
- self.assertEqual(self.memory.collection.num_entities, 0)
+ def test_get(self) -> None:
+ """Test getting a text from the cache"""
+ text = "Sample text"
+ self.memory.clear()
+ self.memory.add(text)
+ result = self.memory.get(text)
+ self.assertEqual(result, [text])
- def test_get(self):
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual(result, [text])
+ def test_get_relevant(self) -> None:
+ """Test getting relevant texts from the cache"""
+ text1 = "Sample text 1"
+ text2 = "Sample text 2"
+ self.memory.clear()
+ self.memory.add(text1)
+ self.memory.add(text2)
+ result = self.memory.get_relevant(text1, 1)
+ self.assertEqual(result, [text1])
- def test_get_relevant(self):
- text1 = "Sample text 1"
- text2 = "Sample text 2"
- self.memory.clear()
- self.memory.add(text1)
- self.memory.add(text2)
- result = self.memory.get_relevant(text1, 1)
- self.assertEqual(result, [text1])
+ def test_get_stats(self) -> None:
+ """Test getting the cache stats"""
+ text = "Sample text"
+ self.memory.clear()
+ self.memory.add(text)
+ stats = self.memory.get_stats()
+ self.assertEqual(15, len(stats))
- def test_get_stats(self):
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- stats = self.memory.get_stats()
- self.assertEqual(15, len(stats))
-
-
-if __name__ == "__main__":
- unittest.main()
+except:
+ print("Milvus not installed, skipping tests")
diff --git a/tests/smoke_test.py b/tests/smoke_test.py
index 50e97b7b..1b9d643f 100644
--- a/tests/smoke_test.py
+++ b/tests/smoke_test.py
@@ -1,31 +1,34 @@
+"""Smoke test for the autogpt package."""
import os
import subprocess
import sys
-import unittest
+
+import pytest
from autogpt.commands.file_operations import delete_file, read_file
-env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
+@pytest.mark.integration_test
+def test_write_file() -> None:
+ """
+ Test case to check if the write_file command can successfully write 'Hello World' to a file
+ named 'hello_world.txt'.
-class TestCommands(unittest.TestCase):
- def test_write_file(self):
- # Test case to check if the write_file command can successfully write 'Hello World' to a file
- # named 'hello_world.txt'.
+ Read the current ai_settings.yaml file and store its content.
+ """
+ env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
+ ai_settings = None
+ if os.path.exists("ai_settings.yaml"):
+ with open("ai_settings.yaml", "r") as f:
+ ai_settings = f.read()
+ os.remove("ai_settings.yaml")
- # Read the current ai_settings.yaml file and store its content.
- ai_settings = None
- if os.path.exists("ai_settings.yaml"):
- with open("ai_settings.yaml", "r") as f:
- ai_settings = f.read()
- os.remove("ai_settings.yaml")
-
- try:
- if os.path.exists("hello_world.txt"):
- # Clean up any existing 'hello_world.txt' file before testing.
- delete_file("hello_world.txt")
- # Prepare input data for the test.
- input_data = """write_file-GPT
+ try:
+ if os.path.exists("hello_world.txt"):
+ # Clean up any existing 'hello_world.txt' file before testing.
+ delete_file("hello_world.txt")
+ # Prepare input data for the test.
+ input_data = """write_file-GPT
an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task.
Use the write_file command to write 'Hello World' into a file named "hello_world.txt".
Use the task_complete command to complete the task.
@@ -33,31 +36,24 @@ Do not use any other commands.
y -5
EOF"""
- command = f"{sys.executable} -m autogpt"
+ command = f"{sys.executable} -m autogpt"
- # Execute the script with the input data.
- process = subprocess.Popen(
- command,
- stdin=subprocess.PIPE,
- shell=True,
- env={**os.environ, **env_vars},
- )
- process.communicate(input_data.encode())
-
- # Read the content of the 'hello_world.txt' file created during the test.
- content = read_file("hello_world.txt")
- finally:
- if ai_settings:
- # Restore the original ai_settings.yaml file.
- with open("ai_settings.yaml", "w") as f:
- f.write(ai_settings)
-
- # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'.
- self.assertEqual(
- content, "Hello World", f"Expected 'Hello World', got {content}"
+ # Execute the script with the input data.
+ process = subprocess.Popen(
+ command,
+ stdin=subprocess.PIPE,
+ shell=True,
+ env={**os.environ, **env_vars},
)
+ process.communicate(input_data.encode())
+ # Read the content of the 'hello_world.txt' file created during the test.
+ content = read_file("hello_world.txt")
+ finally:
+ if ai_settings:
+ # Restore the original ai_settings.yaml file.
+ with open("ai_settings.yaml", "w") as f:
+ f.write(ai_settings)
-# Run the test case.
-if __name__ == "__main__":
- unittest.main()
+ # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'.
+ assert content == "Hello World", f"Expected 'Hello World', got {content}"
diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py
index 7e5426f0..ecbac9b7 100644
--- a/tests/unit/test_commands.py
+++ b/tests/unit/test_commands.py
@@ -1,19 +1,22 @@
-import unittest
+"""Unit tests for the commands module"""
from unittest.mock import MagicMock, patch
+import pytest
+
import autogpt.agent.agent_manager as agent_manager
from autogpt.app import execute_command, list_agents, start_agent
-class TestCommands(unittest.TestCase):
- def test_make_agent(self):
- with patch("openai.ChatCompletion.create") as mock:
- obj = MagicMock()
- obj.response.choices[0].messages[0].content = "Test message"
- mock.return_value = obj
- start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
- agents = list_agents()
- self.assertEqual("List of agents:\n0: chat", agents)
- start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
- agents = list_agents()
- self.assertEqual("List of agents:\n0: chat\n1: write", agents)
+@pytest.mark.integration_test
+def test_make_agent() -> None:
+ """Test the make_agent command"""
+ with patch("openai.ChatCompletion.create") as mock:
+ obj = MagicMock()
+ obj.response.choices[0].messages[0].content = "Test message"
+ mock.return_value = obj
+ start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
+ agents = list_agents()
+ assert "List of agents:\n0: chat" == agents
+ start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
+ agents = list_agents()
+ assert "List of agents:\n0: chat\n1: write" == agents