Merge branch 'master' into add_website_memory

This commit is contained in:
Maiko Bossuyt
2023-04-14 00:45:41 +02:00
committed by GitHub
37 changed files with 789 additions and 270 deletions

View File

@@ -1,19 +1,117 @@
PINECONE_API_KEY=your-pinecone-api-key ################################################################################
PINECONE_ENV=your-pinecone-region ### AUTO-GPT - GENERAL SETTINGS
################################################################################
# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False)
EXECUTE_LOCAL_COMMANDS=False
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
BROWSE_CHUNK_MAX_LENGTH=4000
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
BROWSE_SUMMARY_MAX_TOKEN=300
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
################################################################################
### LLM PROVIDER
################################################################################
### OPENAI
# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
# TEMPERATURE - Sets temperature in OpenAI (Default: 1)
# USE_AZURE - Use Azure OpenAI or not (Default: False)
OPENAI_API_KEY=your-openai-api-key OPENAI_API_KEY=your-openai-api-key
TEMPERATURE=1 TEMPERATURE=1
ELEVENLABS_API_KEY=your-elevenlabs-api-key USE_AZURE=False
ELEVENLABS_VOICE_1_ID=your-voice-id
ELEVENLABS_VOICE_2_ID=your-voice-id ### AZURE
# OPENAI_AZURE_API_BASE - OpenAI API base URL for Azure (Example: https://my-azure-openai-url.com)
# OPENAI_AZURE_API_VERSION - OpenAI API version for Azure (Example: v1)
# OPENAI_AZURE_DEPLOYMENT_ID - OpenAI deployment ID for Azure (Example: my-deployment-id)
# OPENAI_AZURE_CHAT_DEPLOYMENT_ID - OpenAI deployment ID for Azure Chat (Example: my-deployment-id-for-azure-chat)
# OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID - OpenAI deployment ID for Embedding (Example: my-deployment-id-for-azure-embeddigs)
OPENAI_AZURE_API_BASE=your-base-url-for-azure
OPENAI_AZURE_API_VERSION=api-version-for-azure
OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure
OPENAI_AZURE_CHAT_DEPLOYMENT_ID=deployment-id-for-azure-chat
OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID=deployment-id-for-azure-embeddigs
################################################################################
### LLM MODELS
################################################################################
# SMART_LLM_MODEL - Smart language model (Default: gpt-4)
# FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
SMART_LLM_MODEL=gpt-4 SMART_LLM_MODEL=gpt-4
FAST_LLM_MODEL=gpt-3.5-turbo FAST_LLM_MODEL=gpt-3.5-turbo
GOOGLE_API_KEY=
CUSTOM_SEARCH_ENGINE_ID= ### LLM MODEL SETTINGS
USE_AZURE=False # FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
EXECUTE_LOCAL_COMMANDS=False # SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
IMAGE_PROVIDER=dalle # When using --gpt3onlythis needs to be set to 4000.
HUGGINGFACE_API_TOKEN= FAST_TOKEN_LIMIT=4000
USE_MAC_OS_TTS=False SMART_TOKEN_LIMIT=8000
BROWSE_CHUNK_MAX_LENGTH=4000
BROWSE_SUMMARY_MAX_TOKEN=300 ################################################################################
### MEMORY
################################################################################
# MEMORY_BACKEND - Memory backend type (Default: local)
MEMORY_BACKEND=local MEMORY_BACKEND=local
### PINECONE
# PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
# PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
PINECONE_API_KEY=your-pinecone-api-key
PINECONE_ENV=your-pinecone-region
### REDIS
# REDIS_HOST - Redis host (Default: localhost)
# REDIS_PORT - Redis port (Default: 6379)
# REDIS_PASSWORD - Redis password (Default: "")
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)
# MEMORY_INDEX - Name of index created in Redis database (Default: auto-gpt)
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=
WIPE_REDIS_ON_START=False
MEMORY_INDEX=auto-gpt
################################################################################
### IMAGE GENERATION PROVIDER
################################################################################
### OPEN AI
# IMAGE_PROVIDER - Image provider (Example: dalle)
IMAGE_PROVIDER=dalle
### HUGGINGFACE
# STABLE DIFFUSION
# (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4)
# Set in image_gen.py)
# HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
################################################################################
### SEARCH PROVIDER
################################################################################
### GOOGLE
# GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
GOOGLE_API_KEY=your-google-api-key
CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
################################################################################
### TTS PROVIDER
################################################################################
### MAC OS
# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
USE_MAC_OS_TTS=False
### ELEVENLABS
# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
# ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
ELEVENLABS_API_KEY=your-elevenlabs-api-key
ELEVENLABS_VOICE_1_ID=your-voice-id-1
ELEVENLABS_VOICE_2_ID=your-voice-id-2

View File

@@ -26,7 +26,7 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
- [ ] I have thoroughly tested my changes with multiple different prompts. - [ ] I have thoroughly tested my changes with multiple different prompts.
- [ ] I have considered potential risks and mitigations for my changes. - [ ] I have considered potential risks and mitigations for my changes.
- [ ] I have documented my changes clearly and comprehensively. - [ ] I have documented my changes clearly and comprehensively.
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Reqests, they are the easiest to merge! --> - [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. --> <!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->

View File

@@ -32,7 +32,7 @@ jobs:
- name: Lint with flake8 - name: Lint with flake8
continue-on-error: false continue-on-error: false
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
- name: Run unittest tests with coverage - name: Run unittest tests with coverage
run: | run: |

View File

@@ -1,9 +1,9 @@
# Auto-GPT: An Autonomous GPT-4 Experiment # Auto-GPT: An Autonomous GPT-4 Experiment
![GitHub Repo stars](https://img.shields.io/github/stars/Torantulino/auto-gpt?style=social) ![GitHub Repo stars](https://img.shields.io/github/stars/Torantulino/auto-gpt?style=social)
![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social) [![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social)](https://twitter.com/SigGravitas)
[![](https://dcbadge.vercel.app/api/server/PQ7VX6TY4t?style=flat)](https://discord.gg/PQ7VX6TY4t) [![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt)
[![Unit Tests](https://github.com/Torantulino/Auto-GPT/actions/workflows/unit_tests.yml/badge.svg)](https://github.com/Torantulino/Auto-GPT/actions/workflows/unit_tests.yml) [![Unit Tests](https://github.com/Torantulino/Auto-GPT/actions/workflows/ci.yml/badge.svg)](https://github.com/Torantulino/Auto-GPT/actions/workflows/ci.yml)
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI. Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
@@ -32,21 +32,28 @@ Your support is greatly appreciated
- [Auto-GPT: An Autonomous GPT-4 Experiment](#auto-gpt-an-autonomous-gpt-4-experiment) - [Auto-GPT: An Autonomous GPT-4 Experiment](#auto-gpt-an-autonomous-gpt-4-experiment)
- [Demo (30/03/2023):](#demo-30032023) - [Demo (30/03/2023):](#demo-30032023)
- [💖 Help Fund Auto-GPT's Development](#-help-fund-auto-gpts-development)
- [Table of Contents](#table-of-contents) - [Table of Contents](#table-of-contents)
- [🚀 Features](#-features) - [🚀 Features](#-features)
- [📋 Requirements](#-requirements) - [📋 Requirements](#-requirements)
- [💾 Installation](#-installation) - [💾 Installation](#-installation)
- [🔧 Usage](#-usage) - [🔧 Usage](#-usage)
- [Logs](#logs)
- [🗣️ Speech Mode](#-speech-mode) - [🗣️ Speech Mode](#-speech-mode)
- [🔍 Google API Keys Configuration](#-google-api-keys-configuration) - [🔍 Google API Keys Configuration](#-google-api-keys-configuration)
- [Setting up environment variables](#setting-up-environment-variables) - [Setting up environment variables](#setting-up-environment-variables)
- [Redis Setup](#redis-setup)
- [🌲 Pinecone API Key Setup](#-pinecone-api-key-setup)
- [Setting up environment variables](#setting-up-environment-variables-1)
- [Setting Your Cache Type](#setting-your-cache-type)
- [View Memory Usage](#view-memory-usage)
- [💀 Continuous Mode ⚠️](#-continuous-mode-) - [💀 Continuous Mode ⚠️](#-continuous-mode-)
- [GPT3.5 ONLY Mode](#gpt35-only-mode) - [GPT3.5 ONLY Mode](#gpt35-only-mode)
- [🖼 Image Generation](#image-generation) - [🖼 Image Generation](#-image-generation)
- [⚠️ Limitations](#-limitations) - [⚠️ Limitations](#-limitations)
- [🛡 Disclaimer](#-disclaimer) - [🛡 Disclaimer](#-disclaimer)
- [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter) - [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter)
- [Run tests](#run-tests)
- [Run linter](#run-linter)
## 🚀 Features ## 🚀 Features
@@ -64,38 +71,38 @@ Your support is greatly appreciated
Optional: Optional:
- ElevenLabs Key (If you want the AI to speak) - [ElevenLabs Key](https://elevenlabs.io/) (If you want the AI to speak)
## 💾 Installation ## 💾 Installation
To install Auto-GPT, follow these steps: To install Auto-GPT, follow these steps:
0. Make sure you have all the **requirements** above, if not, install/get them. 1. Make sure you have all the **requirements** above, if not, install/get them.
_The following commands should be executed in a CMD, Bash or Powershell window. To do this, go to a folder on your computer, click in the folder path at the top and type CMD, then press enter._ _The following commands should be executed in a CMD, Bash or Powershell window. To do this, go to a folder on your computer, click in the folder path at the top and type CMD, then press enter._
1. Clone the repository: 2. Clone the repository:
For this step you need Git installed, but you can just download the zip file instead by clicking the button at the top of this page ☝️ For this step you need Git installed, but you can just download the zip file instead by clicking the button at the top of this page ☝️
``` ```
git clone https://github.com/Torantulino/Auto-GPT.git git clone https://github.com/Torantulino/Auto-GPT.git
``` ```
2. Navigate to the project directory: 3. Navigate to the project directory:
_(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)_ _(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)_
``` ```
cd 'Auto-GPT' cd 'Auto-GPT'
``` ```
3. Install the required dependencies: 4. Install the required dependencies:
_(Again, type this into your CMD window)_ _(Again, type this into your CMD window)_
``` ```
pip install -r requirements.txt pip install -r requirements.txt
``` ```
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. 5. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then: - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then:
@@ -120,7 +127,7 @@ python scripts/main.py
### Logs ### Logs
You will find activity and error logs in the folder `./logs` You will find activity and error logs in the folder `./output/logs`
To output debug logs: To output debug logs:
@@ -238,7 +245,6 @@ export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp
``` ```
## Setting Your Cache Type ## Setting Your Cache Type
By default Auto-GPT is going to use LocalCache instead of redis or Pinecone. By default Auto-GPT is going to use LocalCache instead of redis or Pinecone.
@@ -342,11 +348,13 @@ coverage run -m unittest discover tests
## Run linter ## Run linter
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information.
To run the linter, run the following command:
``` ```
flake8 scripts/ tests/ flake8 scripts/ tests/
# Or, if you want to run flake8 with the same configuration as the CI: # Or, if you want to run flake8 with the same configuration as the CI:
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
``` ```

View File

@@ -1,3 +1,4 @@
azure_api_type: azure_ad
azure_api_base: your-base-url-for-azure azure_api_base: your-base-url-for-azure
azure_api_version: api-version-for-azure azure_api_version: api-version-for-azure
azure_model_map: azure_model_map:

16
docker-compose.yml Normal file
View File

@@ -0,0 +1,16 @@
# To boot the app run the following:
# docker-compose run auto-gpt
version: "3.9"
services:
auto-gpt:
depends_on:
- redis
build: ./
volumes:
- "./scripts:/app"
- ".env:/app/.env"
profiles: ["exclude-from-up"]
redis:
image: "redis/redis-stack-server:latest"

View File

@@ -17,3 +17,4 @@ orjson
Pillow Pillow
coverage coverage
flake8 flake8
numpy

View File

@@ -6,6 +6,7 @@ agents = {} # key, (task, full_message_history, model)
# Create new GPT agent # Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit # TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model): def create_agent(task, prompt, model):
"""Create a new agent and return its key""" """Create a new agent and return its key"""
global next_key global next_key

View File

@@ -1,6 +1,7 @@
import yaml import yaml
import data
import os import os
from prompt import get_prompt
class AIConfig: class AIConfig:
""" """
@@ -46,7 +47,7 @@ class AIConfig:
""" """
try: try:
with open(config_file) as file: with open(config_file, encoding='utf-8') as file:
config_params = yaml.load(file, Loader=yaml.FullLoader) config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError: except FileNotFoundError:
config_params = {} config_params = {}
@@ -90,5 +91,5 @@ class AIConfig:
for i, goal in enumerate(self.ai_goals): for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n" full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{data.load_prompt()}" full_prompt += f"\n\n{get_prompt()}"
return full_prompt return full_prompt

View File

@@ -1,8 +1,7 @@
from typing import List, Optional from typing import List
import json import json
from config import Config from config import Config
from call_ai_function import call_ai_function from call_ai_function import call_ai_function
from json_parser import fix_and_parse_json
cfg = Config() cfg = Config()

View File

@@ -3,6 +3,8 @@ from config import Config
cfg = Config() cfg = Config()
from llm_utils import create_chat_completion from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See # This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info. # https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None): def call_ai_function(function, args, description, model=None):

View File

@@ -9,6 +9,7 @@ import logging
cfg = Config() cfg = Config()
def create_chat_message(role, content): def create_chat_message(role, content):
""" """
Create a chat message with the given role and content. Create a chat message with the given role and content.
@@ -69,7 +70,7 @@ def chat_with_ai(
logger.debug(f"Token limit: {token_limit}") logger.debug(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10) relevant_memory = '' if len(full_message_history) ==0 else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
logger.debug(f'Memory Stats: {permanent_memory.get_stats()}') logger.debug(f'Memory Stats: {permanent_memory.get_stats()}')

View File

@@ -24,6 +24,7 @@ def is_valid_int(value):
except ValueError: except ValueError:
return False return False
def get_command(response): def get_command(response):
"""Parse the response and return the command name and arguments""" """Parse the response and return the command name and arguments"""
try: try:
@@ -135,6 +136,7 @@ def google_search(query, num_results=8):
return json.dumps(search_results, ensure_ascii=False, indent=4) return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8): def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API""" """Return the results of a google search using the official Google API"""
from googleapiclient.discovery import build from googleapiclient.discovery import build
@@ -171,6 +173,7 @@ def google_official_search(query, num_results=8):
# Return the list of search result URLs # Return the list of search result URLs
return search_results_links return search_results_links
def browse_website(url, question): def browse_website(url, question):
"""Browse a website and return the summary and links""" """Browse a website and return the summary and links"""
summary = get_text_summary(url, question) summary = get_text_summary(url, question)

View File

@@ -36,6 +36,7 @@ class Config(metaclass=Singleton):
"""Initialize the Config class""" """Initialize the Config class"""
self.debug_mode = False self.debug_mode = False
self.continuous_mode = False self.continuous_mode = False
self.continuous_limit = 0
self.speak_mode = False self.speak_mode = False
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
@@ -46,14 +47,13 @@ class Config(metaclass=Singleton):
self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300)) self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300))
self.openai_api_key = os.getenv("OPENAI_API_KEY") self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.temperature = int(os.getenv("TEMPERATURE", "1")) self.temperature = float(os.getenv("TEMPERATURE", "1"))
self.use_azure = False
self.use_azure = os.getenv("USE_AZURE") == 'True' self.use_azure = os.getenv("USE_AZURE") == 'True'
self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True' self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True'
if self.use_azure: if self.use_azure:
self.load_azure_config() self.load_azure_config()
openai.api_type = "azure" openai.api_type = self.openai_api_type
openai.api_base = self.openai_api_base openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version openai.api_version = self.openai_api_version
@@ -76,7 +76,6 @@ class Config(metaclass=Singleton):
# User agent headers to use when browsing web # User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found. # Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent = os.getenv("USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36") self.user_agent = os.getenv("USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36")
self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "") self.redis_password = os.getenv("REDIS_PASSWORD", "")
@@ -124,14 +123,19 @@ class Config(metaclass=Singleton):
config_params = yaml.load(file, Loader=yaml.FullLoader) config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError: except FileNotFoundError:
config_params = {} config_params = {}
self.openai_api_base = config_params.get("azure_api_base", "") self.openai_api_type = os.getenv("OPENAI_API_TYPE", config_params.get("azure_api_type", "azure"))
self.openai_api_version = config_params.get("azure_api_version", "") self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE", config_params.get("azure_api_base", ""))
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION", config_params.get("azure_api_version", ""))
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
def set_continuous_mode(self, value: bool): def set_continuous_mode(self, value: bool):
"""Set the continuous mode value.""" """Set the continuous mode value."""
self.continuous_mode = value self.continuous_mode = value
def set_continuous_limit(self, value: int):
"""Set the continuous limit value."""
self.continuous_limit = value
def set_speak_mode(self, value: bool): def set_speak_mode(self, value: bool):
"""Set the speak mode value.""" """Set the speak mode value."""
self.speak_mode = value self.speak_mode = value

View File

@@ -1,18 +0,0 @@
import os
from pathlib import Path
def load_prompt():
"""Load the prompt from data/prompt.txt"""
try:
# get directory of this file:
file_dir = Path(__file__).parent
prompt_file_path = file_dir / "data" / "prompt.txt"
# Load the prompt from data/prompt.txt
with open(prompt_file_path, "r") as prompt_file:
prompt = prompt_file.read()
return prompt
except FileNotFoundError:
print("Error: Prompt file not found", flush=True)
return ""

View File

@@ -1,64 +0,0 @@
CONSTRAINTS:
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.
3. No user assistance
4. Exclusively use the commands listed in double quotes e.g. "command name"
COMMANDS:
1. Google Search: "google", args: "input": "<search>"
5. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"
6. Start GPT Agent: "start_agent", args: "name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"
7. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>"
8. List GPT Agents: "list_agents", args: ""
9. Delete GPT Agent: "delete_agent", args: "key": "<key>"
10. Write to file: "write_to_file", args: "file": "<file>", "text": "<text>"
11. Read file: "read_file", args: "file": "<file>"
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
13. Delete file: "delete_file", args: "file": "<file>"
14. Search Files: "search_files", args: "directory": "<directory>"
15. Evaluate Code: "evaluate_code", args: "code": "<full_code_string>"
16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
18. Execute Python File: "execute_python_file", args: "file": "<file>"
19. Execute Shell Command, non-interactive commands only: "execute_shell", args: "command_line": "<command_line>".
20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
21. Generate Image: "generate_image", args: "prompt": "<prompt>"
22. Do Nothing: "do_nothing", args: ""
RESOURCES:
1. Internet access for searches and information gathering.
2. Long Term memory management.
3. GPT-3.5 powered Agents for delegation of simple tasks.
4. File output.
PERFORMANCE EVALUATION:
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
2. Constructively self-criticize your big-picture behavior constantly.
3. Reflect on past decisions and strategies to refine your approach.
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
You should only respond in JSON format as described below
RESPONSE FORMAT:
{
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
},
"command": {
"name": "command name",
"args":{
"arg name": "value"
}
}
}
Ensure the response can be parsed by Python json.loads

View File

@@ -67,6 +67,7 @@ def execute_python_file(file):
except Exception as e: except Exception as e:
return f"Error: {str(e)}" return f"Error: {str(e)}"
def execute_shell(command_line): def execute_shell(command_line):
current_dir = os.getcwd() current_dir = os.getcwd()

View File

@@ -38,7 +38,7 @@ def write_to_file(filename, text):
directory = os.path.dirname(filepath) directory = os.path.dirname(filepath)
if not os.path.exists(directory): if not os.path.exists(directory):
os.makedirs(directory) os.makedirs(directory)
with open(filepath, "w") as f: with open(filepath, "w", encoding='utf-8') as f:
f.write(text) f.write(text)
return "File written to successfully." return "File written to successfully."
except Exception as e: except Exception as e:
@@ -65,6 +65,7 @@ def delete_file(filename):
except Exception as e: except Exception as e:
return "Error: " + str(e) return "Error: " + str(e)
def search_files(directory): def search_files(directory):
found_files = [] found_files = []

View File

@@ -11,6 +11,7 @@ cfg = Config()
working_directory = "auto_gpt_workspace" working_directory = "auto_gpt_workspace"
def generate_image(prompt): def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg" filename = str(uuid.uuid4()) + ".jpg"

View File

@@ -1,12 +1,21 @@
import time
import openai import openai
from colorama import Fore
from config import Config from config import Config
cfg = Config() cfg = Config()
openai.api_key = cfg.openai_api_key openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better # Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str: def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
"""Create a chat completion using the OpenAI API""" """Create a chat completion using the OpenAI API"""
response = None
num_retries = 5
for attempt in range(num_retries):
try:
if cfg.use_azure: if cfg.use_azure:
response = openai.ChatCompletion.create( response = openai.ChatCompletion.create(
deployment_id=cfg.get_azure_deployment_id_for_model(model), deployment_id=cfg.get_azure_deployment_id_for_model(model),
@@ -22,5 +31,22 @@ def create_chat_completion(messages, model=None, temperature=cfg.temperature, ma
temperature=temperature, temperature=temperature,
max_tokens=max_tokens max_tokens=max_tokens
) )
break
except openai.error.RateLimitError:
if cfg.debug_mode:
print(Fore.RED + "Error: ", "API Rate Limit Reached. Waiting 20 seconds..." + Fore.RESET)
time.sleep(20)
except openai.error.APIError as e:
if e.http_status == 502:
if cfg.debug_mode:
print(Fore.RED + "Error: ", "API Bad gateway. Waiting 20 seconds..." + Fore.RESET)
time.sleep(20)
else:
raise
if attempt == num_retries - 1:
raise
if response is None:
raise RuntimeError("Failed to get response after 5 retries")
return response.choices[0].message["content"] return response.choices[0].message["content"]

View File

@@ -124,6 +124,12 @@ class Logger(metaclass=Singleton):
self.logger.setLevel(level) self.logger.setLevel(level)
self.typing_logger.setLevel(level) self.typing_logger.setLevel(level)
def double_check(self, additionalText=None):
if not additionalText:
additionalText = "Please ensure you've setup and configured everything correctly. Read https://github.com/Torantulino/Auto-GPT#readme to double check. You can also create a github issue or join the discord and ask there!"
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
''' '''
Output stream to console using simulated typing Output stream to console using simulated typing
@@ -151,6 +157,7 @@ class TypingConsoleHandler(logging.StreamHandler):
except Exception: except Exception:
self.handleError(record) self.handleError(record)
class ConsoleHandler(logging.StreamHandler): class ConsoleHandler(logging.StreamHandler):
def emit(self, record): def emit(self, record):
msg = self.format(record) msg = self.format(record)
@@ -160,11 +167,11 @@ class ConsoleHandler(logging.StreamHandler):
self.handleError(record) self.handleError(record)
'''
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
'''
class AutoGptFormatter(logging.Formatter): class AutoGptFormatter(logging.Formatter):
"""
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
"""
def format(self, record: LogRecord) -> str: def format(self, record: LogRecord) -> str:
if (hasattr(record, 'color')): if (hasattr(record, 'color')):
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL

View File

@@ -3,7 +3,6 @@ import random
import commands as cmd import commands as cmd
import utils import utils
from memory import get_memory, get_supported_memory_backends from memory import get_memory, get_supported_memory_backends
import data
import chat import chat
from colorama import Fore, Style from colorama import Fore, Style
from spinner import Spinner from spinner import Spinner
@@ -17,19 +16,22 @@ import yaml
import argparse import argparse
from logger import logger from logger import logger
import logging import logging
from prompt import get_prompt
cfg = Config() cfg = Config()
def check_openai_api_key(): def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable.""" """Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key: if not cfg.openai_api_key:
print( print(
Fore.RED + Fore.RED +
"Please set your OpenAI API key in config.py or as an environment variable." "Please set your OpenAI API key in .env or as an environment variable."
) )
print("You can get your key from https://beta.openai.com/account/api-keys") print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1) exit(1)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string): def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode: if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
@@ -58,6 +60,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
return json_string return json_string
def print_assistant_thoughts(assistant_reply): def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console""" """Prints the assistant's thoughts to the console"""
global ai_name global ai_name
@@ -168,8 +171,8 @@ def load_variables(config_file="config.yaml"):
with open(config_file, "w") as file: with open(config_file, "w") as file:
documents = yaml.dump(config, file) documents = yaml.dump(config, file)
prompt = data.load_prompt() prompt = get_prompt()
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications.""" prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
# Construct full prompt # Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n" full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
@@ -262,6 +265,7 @@ def prompt_user():
config = AIConfig(ai_name, ai_role, ai_goals) config = AIConfig(ai_name, ai_role, ai_goals)
return config return config
def parse_arguments(): def parse_arguments():
"""Parses the arguments passed to the script""" """Parses the arguments passed to the script"""
global cfg global cfg
@@ -271,6 +275,7 @@ def parse_arguments():
parser = argparse.ArgumentParser(description='Process arguments.') parser = argparse.ArgumentParser(description='Process arguments.')
parser.add_argument('--continuous', action='store_true', help='Enable Continuous Mode') parser.add_argument('--continuous', action='store_true', help='Enable Continuous Mode')
parser.add_argument('--continuous-limit', '-l', type=int, dest="continuous_limit", help='Defines the number of times to run in continuous mode')
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode') parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode') parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode') parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
@@ -290,6 +295,17 @@ def parse_arguments():
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.") "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
cfg.set_continuous_mode(True) cfg.set_continuous_mode(True)
if args.continuous_limit:
logger.typewriter_log(
"Continuous Limit: ",
Fore.GREEN,
f"{args.continuous_limit}")
cfg.set_continuous_limit(args.continuous_limit)
# Check if continuous limit is used without continuous mode
if args.continuous_limit and not args.continuous:
parser.error("--continuous-limit can only be used with --continuous")
if args.speak: if args.speak:
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True) cfg.set_speak_mode(True)
@@ -310,33 +326,40 @@ def parse_arguments():
supported_memory = get_supported_memory_backends() supported_memory = get_supported_memory_backends()
chosen = args.memory_type chosen = args.memory_type
if not chosen in supported_memory: if not chosen in supported_memory:
print_to_console("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}') logger.typewriter_log("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}')
print_to_console(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend) logger.typewriter_log(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
else: else:
cfg.memory_backend = chosen cfg.memory_backend = chosen
# TODO: fill in llm values here def main():
check_openai_api_key() global ai_name, memory
parse_arguments() # TODO: fill in llm values here
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) check_openai_api_key()
ai_name = "" parse_arguments()
prompt = construct_prompt() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
# print(prompt) ai_name = ""
# Initialize variables prompt = construct_prompt()
full_message_history = [] # print(prompt)
result = None # Initialize variables
next_action_count = 0 full_message_history = []
# Make a constant: result = None
user_input = "Determine which next command to use, and respond using the format specified above:" next_action_count = 0
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}")
break
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
while True:
# Send message to AI, get response # Send message to AI, get response
with Spinner("Thinking... "): with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai( assistant_reply = chat.chat_with_ai(
@@ -351,7 +374,8 @@ while True:
# Get command name and arguments # Get command name and arguments
try: try:
command_name, arguments = cmd.get_command(attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)) command_name, arguments = cmd.get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode: if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}") speak.say_text(f"I want to execute {command_name}")
except Exception as e: except Exception as e:
@@ -406,7 +430,7 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command # Execute command
if command_name is not None and command_name.lower().startswith( "error" ): if command_name is not None and command_name.lower().startswith("error"):
result = f"Command {command_name} threw the following error: " + arguments result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback": elif command_name == "human_feedback":
result = f"Human feedback: {user_input}" result = f"Human feedback: {user_input}"
@@ -431,3 +455,7 @@ while True:
chat.create_chat_message( chat.create_chat_message(
"system", "Unable to execute command")) "system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
if __name__ == "__main__":
main()

View File

@@ -1,4 +1,5 @@
from memory.local import LocalCache from memory.local import LocalCache
from memory.no_memory import NoMemory
# List of supported memory backends # List of supported memory backends
# Add a backend to this list if the import attempt is successful # Add a backend to this list if the import attempt is successful
@@ -18,6 +19,7 @@ except ImportError:
print("Pinecone not installed. Skipping import.") print("Pinecone not installed. Skipping import.")
PineconeMemory = None PineconeMemory = None
def get_memory(cfg, init=False): def get_memory(cfg, init=False):
memory = None memory = None
if cfg.memory_backend == "pinecone": if cfg.memory_backend == "pinecone":
@@ -34,6 +36,8 @@ def get_memory(cfg, init=False):
" use Redis as a memory backend.") " use Redis as a memory backend.")
else: else:
memory = RedisMemory(cfg) memory = RedisMemory(cfg)
elif cfg.memory_backend == "no_memory":
memory = NoMemory(cfg)
if memory is None: if memory is None:
memory = LocalCache(cfg) memory = LocalCache(cfg)
@@ -41,6 +45,7 @@ def get_memory(cfg, init=False):
memory.clear() memory.clear()
return memory return memory
def get_supported_memory_backends(): def get_supported_memory_backends():
return supported_memory return supported_memory
@@ -50,4 +55,5 @@ __all__ = [
"LocalCache", "LocalCache",
"RedisMemory", "RedisMemory",
"PineconeMemory", "PineconeMemory",
"NoMemory"
] ]

View File

@@ -2,10 +2,10 @@
import abc import abc
from config import AbstractSingleton, Config from config import AbstractSingleton, Config
import openai import openai
cfg = Config()
cfg = Config() cfg = Config()
def get_ada_embedding(text): def get_ada_embedding(text):
text = text.replace("\n", " ") text = text.replace("\n", " ")
if cfg.use_azure: if cfg.use_azure:

View File

@@ -0,0 +1,66 @@
from typing import Optional, List, Any
from memory.base import MemoryProviderSingleton
class NoMemory(MemoryProviderSingleton):
def __init__(self, cfg):
"""
Initializes the NoMemory provider.
Args:
cfg: The config object.
Returns: None
"""
pass
def add(self, data: str) -> str:
"""
Adds a data point to the memory. No action is taken in NoMemory.
Args:
data: The data to add.
Returns: An empty string.
"""
return ""
def get(self, data: str) -> Optional[List[Any]]:
"""
Gets the data from the memory that is most relevant to the given data.
NoMemory always returns None.
Args:
data: The data to compare to.
Returns: None
"""
return None
def clear(self) -> str:
"""
Clears the memory. No action is taken in NoMemory.
Returns: An empty string.
"""
return ""
def get_relevant(self, data: str, num_relevant: int = 5) -> Optional[List[Any]]:
"""
Returns all the data in the memory that is relevant to the given data.
NoMemory always returns None.
Args:
data: The data to compare to.
num_relevant: The number of relevant data to return.
Returns: None
"""
return None
def get_stats(self):
"""
Returns: An empty dictionary as there are no stats in NoMemory.
"""
return {}

View File

@@ -2,6 +2,8 @@
import pinecone import pinecone
from memory.base import MemoryProviderSingleton, get_ada_embedding from memory.base import MemoryProviderSingleton, get_ada_embedding
from logger import logger
from colorama import Fore, Style
class PineconeMemory(MemoryProviderSingleton): class PineconeMemory(MemoryProviderSingleton):
@@ -17,6 +19,15 @@ class PineconeMemory(MemoryProviderSingleton):
# for now this works. # for now this works.
# we'll need a more complicated and robust system if we want to start with memory. # we'll need a more complicated and robust system if we want to start with memory.
self.vec_num = 0 self.vec_num = 0
try:
pinecone.whoami()
except Exception as e:
logger.typewriter_log("FAILED TO CONNECT TO PINECONE", Fore.RED, Style.BRIGHT + str(e) + Style.RESET_ALL)
logger.double_check("Please ensure you have setup and configured Pinecone properly for use. " +
f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup{Style.RESET_ALL} to ensure you've set up everything correctly.")
exit(1)
if table_name not in pinecone.list_indexes(): if table_name not in pinecone.list_indexes():
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type) pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
self.index = pinecone.Index(table_name) self.index = pinecone.Index(table_name)

View File

@@ -7,6 +7,8 @@ from redis.commands.search.indexDefinition import IndexDefinition, IndexType
import numpy as np import numpy as np
from memory.base import MemoryProviderSingleton, get_ada_embedding from memory.base import MemoryProviderSingleton, get_ada_embedding
from logger import logger
from colorama import Fore, Style
SCHEMA = [ SCHEMA = [
@@ -44,6 +46,16 @@ class RedisMemory(MemoryProviderSingleton):
db=0 # Cannot be changed db=0 # Cannot be changed
) )
self.cfg = cfg self.cfg = cfg
# Check redis connection
try:
self.redis.ping()
except redis.ConnectionError as e:
logger.typewriter_log("FAILED TO CONNECT TO REDIS", Fore.RED, Style.BRIGHT + str(e) + Style.RESET_ALL)
logger.double_check("Please ensure you have setup and configured Redis properly for use. " +
f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL} to ensure you've set up everything correctly.")
exit(1)
if cfg.wipe_redis_on_start: if cfg.wipe_redis_on_start:
self.redis.flushall() self.redis.flushall()
try: try:

63
scripts/prompt.py Normal file
View File

@@ -0,0 +1,63 @@
from promptgenerator import PromptGenerator
def get_prompt():
"""
This function generates a prompt string that includes various constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
# Initialize the PromptGenerator object
prompt_generator = PromptGenerator()
# Add constraints to the PromptGenerator object
prompt_generator.add_constraint("~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.")
prompt_generator.add_constraint("If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.")
prompt_generator.add_constraint("No user assistance")
prompt_generator.add_constraint('Exclusively use the commands listed in double quotes e.g. "command name"')
# Define the command list
commands = [
("Google Search", "google", {"input": "<search>"}),
("Browse Website", "browse_website", {"url": "<url>", "question": "<what_you_want_to_find_on_website>"}),
("Start GPT Agent", "start_agent", {"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"}),
("Message GPT Agent", "message_agent", {"key": "<key>", "message": "<message>"}),
("List GPT Agents", "list_agents", {}),
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
("Read file", "read_file", {"file": "<file>"}),
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
("Delete file", "delete_file", {"file": "<file>"}),
("Search Files", "search_files", {"directory": "<directory>"}),
("Evaluate Code", "evaluate_code", {"code": "<full_code_string>"}),
("Get Improved Code", "improve_code", {"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"}),
("Write Tests", "write_tests", {"code": "<full_code_string>", "focus": "<list_of_focus_areas>"}),
("Execute Python File", "execute_python_file", {"file": "<file>"}),
("Execute Shell Command, non-interactive commands only", "execute_shell", { "command_line": "<command_line>"}),
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
("Do Nothing", "do_nothing", {}),
]
# Add commands to the PromptGenerator object
for command_label, command_name, args in commands:
prompt_generator.add_command(command_label, command_name, args)
# Add resources to the PromptGenerator object
prompt_generator.add_resource("Internet access for searches and information gathering.")
prompt_generator.add_resource("Long Term memory management.")
prompt_generator.add_resource("GPT-3.5 powered Agents for delegation of simple tasks.")
prompt_generator.add_resource("File output.")
# Add performance evaluations to the PromptGenerator object
prompt_generator.add_performance_evaluation("Continuously review and analyze your actions to ensure you are performing to the best of your abilities.")
prompt_generator.add_performance_evaluation("Constructively self-criticize your big-picture behavior constantly.")
prompt_generator.add_performance_evaluation("Reflect on past decisions and strategies to refine your approach.")
prompt_generator.add_performance_evaluation("Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.")
# Generate the prompt string
prompt_string = prompt_generator.generate_prompt_string()
return prompt_string

129
scripts/promptgenerator.py Normal file
View File

@@ -0,0 +1,129 @@
import json
class PromptGenerator:
"""
A class for generating custom prompt strings based on constraints, commands, resources, and performance evaluations.
"""
def __init__(self):
"""
Initialize the PromptGenerator object with empty lists of constraints, commands, resources, and performance evaluations.
"""
self.constraints = []
self.commands = []
self.resources = []
self.performance_evaluation = []
self.response_format = {
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
},
"command": {
"name": "command name",
"args": {
"arg name": "value"
}
}
}
def add_constraint(self, constraint):
"""
Add a constraint to the constraints list.
Args:
constraint (str): The constraint to be added.
"""
self.constraints.append(constraint)
def add_command(self, command_label, command_name, args=None):
"""
Add a command to the commands list with a label, name, and optional arguments.
Args:
command_label (str): The label of the command.
command_name (str): The name of the command.
args (dict, optional): A dictionary containing argument names and their values. Defaults to None.
"""
if args is None:
args = {}
command_args = {arg_key: arg_value for arg_key,
arg_value in args.items()}
command = {
"label": command_label,
"name": command_name,
"args": command_args,
}
self.commands.append(command)
def _generate_command_string(self, command):
"""
Generate a formatted string representation of a command.
Args:
command (dict): A dictionary containing command information.
Returns:
str: The formatted command string.
"""
args_string = ', '.join(
f'"{key}": "{value}"' for key, value in command['args'].items())
return f'{command["label"]}: "{command["name"]}", args: {args_string}'
def add_resource(self, resource):
"""
Add a resource to the resources list.
Args:
resource (str): The resource to be added.
"""
self.resources.append(resource)
def add_performance_evaluation(self, evaluation):
"""
Add a performance evaluation item to the performance_evaluation list.
Args:
evaluation (str): The evaluation item to be added.
"""
self.performance_evaluation.append(evaluation)
def _generate_numbered_list(self, items, item_type='list'):
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
item_type (str, optional): The type of items in the list. Defaults to 'list'.
Returns:
str: The formatted numbered list.
"""
if item_type == 'command':
return "\n".join(f"{i+1}. {self._generate_command_string(item)}" for i, item in enumerate(items))
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
def generate_prompt_string(self):
"""
Generate a prompt string based on the constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
formatted_response_format = json.dumps(self.response_format, indent=4)
prompt_string = (
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
f"Commands:\n{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
f"Performance Evaluation:\n{self._generate_numbered_list(self.performance_evaluation)}\n\n"
f"You should only respond in JSON format as described below \nResponse Format: \n{formatted_response_format} \nEnsure the response can be parsed by Python json.loads"
)
return prompt_string

View File

@@ -31,6 +31,7 @@ tts_headers = {
mutex_lock = Lock() # Ensure only one sound is played at a time mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0): def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API""" """Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
@@ -51,6 +52,7 @@ def eleven_labs_speech(text, voice_index=0):
print("Response content:", response.content) print("Response content:", response.content)
return False return False
def gtts_speech(text): def gtts_speech(text):
tts = gtts.gTTS(text) tts = gtts.gTTS(text)
with mutex_lock: with mutex_lock:
@@ -58,6 +60,7 @@ def gtts_speech(text):
playsound("speech.mp3", True) playsound("speech.mp3", True)
os.remove("speech.mp3") os.remove("speech.mp3")
def macos_tts_speech(text, voice_index=0): def macos_tts_speech(text, voice_index=0):
if voice_index == 0: if voice_index == 0:
os.system(f'say "{text}"') os.system(f'say "{text}"')
@@ -67,6 +70,7 @@ def macos_tts_speech(text, voice_index=0):
else: else:
os.system(f'say -v Samantha "{text}"') os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0): def say_text(text, voice_index=0):
def speak(): def speak():

View File

@@ -1,6 +1,7 @@
import tiktoken import tiktoken
from typing import List, Dict from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int: def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
""" """
Returns the number of tokens used by a list of messages. Returns the number of tokens used by a list of messages.
@@ -41,6 +42,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens return num_tokens
def count_string_tokens(string: str, model_name: str) -> int: def count_string_tokens(string: str, model_name: str) -> int:
""" """
Returns the number of tokens in a text string. Returns the number of tokens in a text string.

View File

@@ -8,6 +8,7 @@ sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
from config import Config from config import Config
from memory.local import LocalCache from memory.local import LocalCache
class TestLocalCache(unittest.TestCase): class TestLocalCache(unittest.TestCase):
def random_string(self, length): def random_string(self, length):

View File

@@ -4,6 +4,7 @@ import sys
sys.path.append(os.path.abspath('../scripts')) sys.path.append(os.path.abspath('../scripts'))
from memory.local import LocalCache from memory.local import LocalCache
def MockConfig(): def MockConfig():
return type('MockConfig', (object,), { return type('MockConfig', (object,), {
'debug_mode': False, 'debug_mode': False,
@@ -12,6 +13,7 @@ def MockConfig():
'memory_index': 'auto-gpt', 'memory_index': 'auto-gpt',
}) })
class TestLocalCache(unittest.TestCase): class TestLocalCache(unittest.TestCase):
def setUp(self): def setUp(self):

View File

@@ -0,0 +1,101 @@
# Import the required libraries for unit testing
import unittest
import sys
import os
# Add the path to the "scripts" directory to import the PromptGenerator module
sys.path.append(os.path.abspath("../scripts"))
from promptgenerator import PromptGenerator
# Create a test class for the PromptGenerator, subclassed from unittest.TestCase
class promptgenerator_tests(unittest.TestCase):
# Set up the initial state for each test method by creating an instance of PromptGenerator
def setUp(self):
self.generator = PromptGenerator()
# Test whether the add_constraint() method adds a constraint to the generator's constraints list
def test_add_constraint(self):
constraint = "Constraint1"
self.generator.add_constraint(constraint)
self.assertIn(constraint, self.generator.constraints)
# Test whether the add_command() method adds a command to the generator's commands list
def test_add_command(self):
command_label = "Command Label"
command_name = "command_name"
args = {"arg1": "value1", "arg2": "value2"}
self.generator.add_command(command_label, command_name, args)
command = {
"label": command_label,
"name": command_name,
"args": args,
}
self.assertIn(command, self.generator.commands)
# Test whether the add_resource() method adds a resource to the generator's resources list
def test_add_resource(self):
resource = "Resource1"
self.generator.add_resource(resource)
self.assertIn(resource, self.generator.resources)
# Test whether the add_performance_evaluation() method adds an evaluation to the generator's performance_evaluation list
def test_add_performance_evaluation(self):
evaluation = "Evaluation1"
self.generator.add_performance_evaluation(evaluation)
self.assertIn(evaluation, self.generator.performance_evaluation)
# Test whether the generate_prompt_string() method generates a prompt string with all the added constraints, commands, resources and evaluations
def test_generate_prompt_string(self):
constraints = ["Constraint1", "Constraint2"]
commands = [
{
"label": "Command1",
"name": "command_name1",
"args": {"arg1": "value1"},
},
{
"label": "Command2",
"name": "command_name2",
"args": {},
},
]
resources = ["Resource1", "Resource2"]
evaluations = ["Evaluation1", "Evaluation2"]
# Add all the constraints, commands, resources, and evaluations to the generator
for constraint in constraints:
self.generator.add_constraint(constraint)
for command in commands:
self.generator.add_command(
command["label"], command["name"], command["args"])
for resource in resources:
self.generator.add_resource(resource)
for evaluation in evaluations:
self.generator.add_performance_evaluation(evaluation)
# Generate the prompt string and verify its correctness
prompt_string = self.generator.generate_prompt_string()
self.assertIsNotNone(prompt_string)
for constraint in constraints:
self.assertIn(constraint, prompt_string)
for command in commands:
self.assertIn(command["name"], prompt_string)
# Check for each key-value pair in the command args dictionary
for key, value in command["args"].items():
self.assertIn(f'"{key}": "{value}"', prompt_string)
for resource in resources:
self.assertIn(resource, prompt_string)
for evaluation in evaluations:
self.assertIn(evaluation, prompt_string)
self.assertIn("constraints", prompt_string.lower())
self.assertIn("commands", prompt_string.lower())
self.assertIn("resources", prompt_string.lower())
self.assertIn("performance evaluation", prompt_string.lower())
# Run the tests when this script is executed
if __name__ == '__main__':
unittest.main()

View File

@@ -1,6 +1,7 @@
import unittest import unittest
from scripts.config import Config from scripts.config import Config
class TestConfig(unittest.TestCase): class TestConfig(unittest.TestCase):
def test_singleton(self): def test_singleton(self):

View File

@@ -3,6 +3,7 @@ import tests.context
from scripts.json_parser import fix_and_parse_json from scripts.json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase): class TestParseJson(unittest.TestCase):
def test_valid_json(self): def test_valid_json(self):
# Test that a valid JSON string is parsed correctly # Test that a valid JSON string is parsed correctly
@@ -13,12 +14,14 @@ class TestParseJson(unittest.TestCase):
def test_invalid_json_minor(self): def test_invalid_json_minor(self):
# Test that an invalid JSON string can be fixed with gpt # Test that an invalid JSON string can be fixed with gpt
json_str = '{"name": "John", "age": 30, "city": "New York",}' json_str = '{"name": "John", "age": 30, "city": "New York",}'
self.assertRaises(Exception, fix_and_parse_json, json_str, try_to_fix_with_gpt=False) with self.assertRaises(Exception):
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
def test_invalid_json_major_with_gpt(self): def test_invalid_json_major_with_gpt(self):
# Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
self.assertRaises(Exception, fix_and_parse_json, json_str, try_to_fix_with_gpt=False) with self.assertRaises(Exception):
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
def test_invalid_json_major_without_gpt(self): def test_invalid_json_major_without_gpt(self):
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
@@ -50,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },
@@ -89,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },

View File

@@ -5,6 +5,7 @@ import sys
sys.path.append(os.path.abspath('../scripts')) sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json from json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase): class TestParseJson(unittest.TestCase):
def test_valid_json(self): def test_valid_json(self):
# Test that a valid JSON string is parsed correctly # Test that a valid JSON string is parsed correctly
@@ -52,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },
@@ -91,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },