Merge branch 'master' into prompt-generator

This commit is contained in:
Alrik Olson
2023-04-13 10:54:39 -07:00
25 changed files with 173 additions and 132 deletions

View File

@@ -32,7 +32,7 @@ jobs:
- name: Lint with flake8
continue-on-error: false
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
- name: Run unittest tests with coverage
run: |

View File

@@ -77,32 +77,32 @@ Optional:
To install Auto-GPT, follow these steps:
0. Make sure you have all the **requirements** above, if not, install/get them.
1. Make sure you have all the **requirements** above, if not, install/get them.
_The following commands should be executed in a CMD, Bash or Powershell window. To do this, go to a folder on your computer, click in the folder path at the top and type CMD, then press enter._
1. Clone the repository:
2. Clone the repository:
For this step you need Git installed, but you can just download the zip file instead by clicking the button at the top of this page ☝️
```
git clone https://github.com/Torantulino/Auto-GPT.git
```
2. Navigate to the project directory:
3. Navigate to the project directory:
_(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)_
```
cd 'Auto-GPT'
```
3. Install the required dependencies:
4. Install the required dependencies:
_(Again, type this into your CMD window)_
```
pip install -r requirements.txt
```
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
5. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then:
@@ -348,11 +348,13 @@ coverage run -m unittest discover tests
## Run linter
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command:
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information.
To run the linter, run the following command:
```
flake8 scripts/ tests/
# Or, if you want to run flake8 with the same configuration as the CI:
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
```

View File

@@ -1,3 +1,4 @@
azure_api_type: azure_ad
azure_api_base: your-base-url-for-azure
azure_api_version: api-version-for-azure
azure_model_map:

View File

@@ -6,6 +6,7 @@ agents = {} # key, (task, full_message_history, model)
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model):
"""Create a new agent and return its key"""
global next_key

View File

@@ -2,6 +2,7 @@ import yaml
import os
from prompt import get_prompt
class AIConfig:
"""
A class object that contains the configuration information for the AI

View File

@@ -3,6 +3,8 @@ from config import Config
cfg = Config()
from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None):

View File

@@ -9,6 +9,7 @@ import logging
cfg = Config()
def create_chat_message(role, content):
"""
Create a chat message with the given role and content.

View File

@@ -24,6 +24,7 @@ def is_valid_int(value):
except ValueError:
return False
def get_command(response):
"""Parse the response and return the command name and arguments"""
try:
@@ -135,6 +136,7 @@ def google_search(query, num_results=8):
return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API"""
from googleapiclient.discovery import build
@@ -171,6 +173,7 @@ def google_official_search(query, num_results=8):
# Return the list of search result URLs
return search_results_links
def browse_website(url, question):
"""Browse a website and return the summary and links"""
summary = get_text_summary(url, question)

View File

@@ -44,14 +44,13 @@ class Config(metaclass=Singleton):
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.temperature = int(os.getenv("TEMPERATURE", "1"))
self.use_azure = False
self.temperature = float(os.getenv("TEMPERATURE", "1"))
self.use_azure = os.getenv("USE_AZURE") == 'True'
self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True'
if self.use_azure:
self.load_azure_config()
openai.api_type = "azure"
openai.api_type = self.openai_api_type
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
@@ -73,7 +72,7 @@ class Config(metaclass=Singleton):
# User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
self.user_agent_header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "")
@@ -121,8 +120,9 @@ class Config(metaclass=Singleton):
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
self.openai_api_base = config_params.get("azure_api_base", "")
self.openai_api_version = config_params.get("azure_api_version", "")
self.openai_api_type = os.getenv("OPENAI_API_TYPE", config_params.get("azure_api_type", "azure"))
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE", config_params.get("azure_api_base", ""))
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION", config_params.get("azure_api_version", ""))
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
def set_continuous_mode(self, value: bool):

View File

@@ -67,6 +67,7 @@ def execute_python_file(file):
except Exception as e:
return f"Error: {str(e)}"
def execute_shell(command_line):
current_dir = os.getcwd()

View File

@@ -38,7 +38,7 @@ def write_to_file(filename, text):
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filepath, "w") as f:
with open(filepath, "w", encoding='utf-8') as f:
f.write(text)
return "File written to successfully."
except Exception as e:
@@ -65,6 +65,7 @@ def delete_file(filename):
except Exception as e:
return "Error: " + str(e)
def search_files(directory):
found_files = []

View File

@@ -11,6 +11,7 @@ cfg = Config()
working_directory = "auto_gpt_workspace"
def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg"

View File

@@ -4,6 +4,7 @@ cfg = Config()
openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
"""Create a chat completion using the OpenAI API"""

View File

@@ -157,6 +157,7 @@ class TypingConsoleHandler(logging.StreamHandler):
except Exception:
self.handleError(record)
class ConsoleHandler(logging.StreamHandler):
def emit(self, record):
msg = self.format(record)
@@ -166,11 +167,11 @@ class ConsoleHandler(logging.StreamHandler):
self.handleError(record)
'''
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
'''
class AutoGptFormatter(logging.Formatter):
"""
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
"""
def format(self, record: LogRecord) -> str:
if (hasattr(record, 'color')):
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL

View File

@@ -20,16 +20,18 @@ from prompt import get_prompt
cfg = Config()
def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key:
print(
Fore.RED +
"Please set your OpenAI API key in config.py or as an environment variable."
"Please set your OpenAI API key in .env or as an environment variable."
)
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
@@ -58,6 +60,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
return json_string
def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console"""
global ai_name
@@ -262,6 +265,7 @@ def prompt_user():
config = AIConfig(ai_name, ai_role, ai_goals)
return config
def parse_arguments():
"""Parses the arguments passed to the script"""
global cfg
@@ -316,27 +320,27 @@ def parse_arguments():
cfg.memory_backend = chosen
# TODO: fill in llm values here
check_openai_api_key()
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
prompt = construct_prompt()
# print(prompt)
# Initialize variables
full_message_history = []
result = None
next_action_count = 0
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
while True:
def main():
global ai_name, memory
# TODO: fill in llm values here
check_openai_api_key()
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
prompt = construct_prompt()
# print(prompt)
# Initialize variables
full_message_history = []
result = None
next_action_count = 0
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
while True:
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
@@ -351,7 +355,8 @@ while True:
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
command_name, arguments = cmd.get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
@@ -406,7 +411,7 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name is not None and command_name.lower().startswith( "error" ):
if command_name is not None and command_name.lower().startswith("error"):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
@@ -431,3 +436,7 @@ while True:
chat.create_chat_message(
"system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
if __name__ == "__main__":
main()

View File

@@ -19,6 +19,7 @@ except ImportError:
print("Pinecone not installed. Skipping import.")
PineconeMemory = None
def get_memory(cfg, init=False):
memory = None
if cfg.memory_backend == "pinecone":
@@ -44,6 +45,7 @@ def get_memory(cfg, init=False):
memory.clear()
return memory
def get_supported_memory_backends():
return supported_memory

View File

@@ -2,6 +2,7 @@ from typing import Optional, List, Any
from memory.base import MemoryProviderSingleton
class NoMemory(MemoryProviderSingleton):
def __init__(self, cfg):
"""

View File

@@ -5,6 +5,7 @@ from memory.base import MemoryProviderSingleton, get_ada_embedding
from logger import logger
from colorama import Fore, Style
class PineconeMemory(MemoryProviderSingleton):
def __init__(self, cfg):
pinecone_api_key = cfg.pinecone_api_key

View File

@@ -31,6 +31,7 @@ tts_headers = {
mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
@@ -51,6 +52,7 @@ def eleven_labs_speech(text, voice_index=0):
print("Response content:", response.content)
return False
def gtts_speech(text):
tts = gtts.gTTS(text)
with mutex_lock:
@@ -58,6 +60,7 @@ def gtts_speech(text):
playsound("speech.mp3", True)
os.remove("speech.mp3")
def macos_tts_speech(text, voice_index=0):
if voice_index == 0:
os.system(f'say "{text}"')
@@ -67,6 +70,7 @@ def macos_tts_speech(text, voice_index=0):
else:
os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0):
def speak():

View File

@@ -1,6 +1,7 @@
import tiktoken
from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
"""
Returns the number of tokens used by a list of messages.
@@ -41,6 +42,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def count_string_tokens(string: str, model_name: str) -> int:
"""
Returns the number of tokens in a text string.

View File

@@ -8,6 +8,7 @@ sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
from config import Config
from memory.local import LocalCache
class TestLocalCache(unittest.TestCase):
def random_string(self, length):

View File

@@ -4,6 +4,7 @@ import sys
sys.path.append(os.path.abspath('../scripts'))
from memory.local import LocalCache
def MockConfig():
return type('MockConfig', (object,), {
'debug_mode': False,
@@ -12,6 +13,7 @@ def MockConfig():
'memory_index': 'auto-gpt',
})
class TestLocalCache(unittest.TestCase):
def setUp(self):

View File

@@ -1,6 +1,7 @@
import unittest
from scripts.config import Config
class TestConfig(unittest.TestCase):
def test_singleton(self):

View File

@@ -3,6 +3,7 @@ import tests.context
from scripts.json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase):
def test_valid_json(self):
# Test that a valid JSON string is parsed correctly
@@ -52,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = {
"command": {
"name": "browse_website",
"args":{
"args": {
"url": "https://github.com/Torantulino/Auto-GPT"
}
},
@@ -91,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = {
"command": {
"name": "browse_website",
"args":{
"args": {
"url": "https://github.com/Torantulino/Auto-GPT"
}
},

View File

@@ -5,6 +5,7 @@ import sys
sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase):
def test_valid_json(self):
# Test that a valid JSON string is parsed correctly
@@ -52,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = {
"command": {
"name": "browse_website",
"args":{
"args": {
"url": "https://github.com/Torantulino/Auto-GPT"
}
},
@@ -91,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = {
"command": {
"name": "browse_website",
"args":{
"args": {
"url": "https://github.com/Torantulino/Auto-GPT"
}
},