Merge remote-tracking branch 'origin/master' into prompt-generator

This commit is contained in:
Alrik Olson
2023-04-12 07:33:36 -07:00
17 changed files with 116 additions and 28 deletions

View File

@@ -2,6 +2,8 @@ PINECONE_API_KEY=your-pinecone-api-key
PINECONE_ENV=your-pinecone-region
OPENAI_API_KEY=your-openai-api-key
ELEVENLABS_API_KEY=your-elevenlabs-api-key
ELEVENLABS_VOICE_1_ID=your-voice-id
ELEVENLABS_VOICE_2_ID=your-voice-id
SMART_LLM_MODEL=gpt-4
FAST_LLM_MODEL=gpt-3.5-turbo
GOOGLE_API_KEY=

View File

@@ -23,10 +23,10 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
### PR Quality Checklist
- [ ] My pull request is atomic and focuses on a single change.
- [ ] I have thouroughly tested my changes with multiple different prompts.
- [ ] I have thoroughly tested my changes with multiple different prompts.
- [ ] I have considered potential risks and mitigations for my changes.
- [ ] I have documented my changes clearly and comprehensively.
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as seperate Pull Reqests, they are the easiest to merge! -->
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Reqests, they are the easiest to merge! -->
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->

3
.gitignore vendored
View File

@@ -7,9 +7,10 @@ package-lock.json
auto_gpt_workspace/*
*.mpeg
.env
venv/*
*venv/*
outputs/*
ai_settings.yaml
.vscode
.idea/*
auto-gpt.json
log.txt

View File

@@ -6,7 +6,7 @@ To contribute to this GitHub project, you can follow these steps:
2. Clone the repository to your local machine using the following command:
```
git clone https://github.com/Torantulino/Auto-GPT
git clone https://github.com/<YOUR-GITHUB-USERNAME>/Auto-GPT
```
3. Create a new branch for your changes using the following command:

View File

@@ -3,7 +3,7 @@
![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social)
[![](https://dcbadge.vercel.app/api/server/PQ7VX6TY4t?style=flat)](https://discord.gg/PQ7VX6TY4t)
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, autonomously develops and manages businesses to increase net worth. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
### Demo (30/03/2023):
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
@@ -58,7 +58,7 @@ Your support is greatly appreciated
## 📋 Requirements
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
- OpenAI API key
- [OpenAI API key](https://platform.openai.com/account/api-keys)
- [PINECONE API key](https://www.pinecone.io/)
Optional:
@@ -267,3 +267,8 @@ Stay up-to-date with the latest news, updates, and insights about Auto-GPT by fo
We look forward to connecting with you and hearing your thoughts, ideas, and experiences with Auto-GPT. Join us on Twitter and let's explore the future of AI together!
<p align="center">
<a href="https://star-history.com/#Torantulino/auto-gpt&Date">
<img src="https://api.star-history.com/svg?repos=Torantulino/auto-gpt&type=Date" alt="Star History Chart">
</a>
</p>

View File

@@ -13,7 +13,7 @@ def create_agent(task, prompt, model):
messages = [{"role": "user", "content": prompt}, ]
# Start GTP3 instance
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
@@ -41,7 +41,7 @@ def message_agent(key, message):
# Add user message to message history before sending to agent
messages.append({"role": "user", "content": message})
# Start GTP3 instance
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,

View File

@@ -42,7 +42,7 @@ class AIConfig:
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
Returns:
cls (object): A instance of given cls object
cls (object): An instance of given cls object
"""
try:
@@ -80,7 +80,7 @@ class AIConfig:
None
Returns:
full_prompt (str): A string containing the intitial prompt for the user including the ai_name, ai_role and ai_goals.
full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals.
"""
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""

View File

@@ -64,14 +64,14 @@ def chat_with_ai(
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
if cfg.debug:
if cfg.debug_mode:
print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
if cfg.debug:
if cfg.debug_mode:
print('Memory Stats: ', permanent_memory.get_stats())
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
@@ -110,7 +110,7 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context
if cfg.debug:
if cfg.debug_mode:
print(f"Token limit: {token_limit}")
print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}")
@@ -141,6 +141,6 @@ def chat_with_ai(
return assistant_reply
except openai.error.RateLimitError:
# TODO: WHen we switch to langchain, this is built in
# TODO: When we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10)

View File

@@ -110,7 +110,7 @@ def execute_command(command_name, arguments):
elif command_name == "task_complete":
shutdown()
else:
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format."
# All errors, return "Error: + error message"
except Exception as e:
return "Error: " + str(e)

View File

@@ -54,6 +54,8 @@ class Config(metaclass=Singleton):
openai.api_version = self.openai_api_version
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
@@ -75,7 +77,7 @@ class Config(metaclass=Singleton):
self.redis_password = os.getenv("REDIS_PASSWORD", "")
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
# Note that indexes must be created on db 0 in redis, this is not configureable.
# Note that indexes must be created on db 0 in redis, this is not configurable.
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
# Initialize the OpenAI API client
@@ -113,6 +115,14 @@ class Config(metaclass=Singleton):
"""Set the ElevenLabs API key value."""
self.elevenlabs_api_key = value
def set_elevenlabs_voice_1_id(self, value: str):
"""Set the ElevenLabs Voice 1 ID value."""
self.elevenlabs_voice_1_id = value
def set_elevenlabs_voice_2_id(self, value: str):
"""Set the ElevenLabs Voice 2 ID value."""
self.elevenlabs_voice_2_id = value
def set_google_api_key(self, value: str):
"""Set the Google API key value."""
self.google_api_key = value

View File

@@ -53,7 +53,7 @@ def fix_and_parse_json(
last_brace_index = json_str.rindex("}")
json_str = json_str[:last_brace_index+1]
return json.loads(json_str)
except json.JSONDecodeError as e: # noqa: F841
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
if try_to_fix_with_gpt:
print("Warning: Failed to parse AI output, attempting to fix."
"\n If you see this warning frequently, it's likely that"
@@ -67,22 +67,22 @@ def fix_and_parse_json(
else:
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
print("Failed to fix ai output, telling the AI.")
print("Failed to fix AI output, telling the AI.")
return json_str
else:
raise e
def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully complient with the provided schema."""
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
# Try to fix the JSON using gpt:
# Try to fix the JSON using GPT:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"]
description_string = "Fixes the provided JSON string to make it parseable"\
" and fully complient with the provided schema.\n If an object or"\
" and fully compliant with the provided schema.\n If an object or"\
" field specified in the schema isn't contained within the correct"\
" JSON, it is ommited.\n This function is brilliant at guessing"\
" JSON, it is omitted.\n This function is brilliant at guessing"\
" when the format is incorrect."
# If it doesn't already start with a "`", add one:
@@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str:
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
if cfg.debug:
if cfg.debug_mode:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")
print("-----------")

View File

@@ -266,6 +266,7 @@ def prompt_user():
def parse_arguments():
"""Parses the arguments passed to the script"""
global cfg
cfg.set_debug_mode(False)
cfg.set_continuous_mode(False)
cfg.set_speak_mode(False)
@@ -274,6 +275,7 @@ def parse_arguments():
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
args = parser.parse_args()
if args.continuous:
@@ -291,7 +293,14 @@ def parse_arguments():
if args.gpt3only:
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model)
if args.gpt4only:
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_fast_llm_model(cfg.smart_llm_model)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
# TODO: fill in llm values here
@@ -383,7 +392,7 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name.lower().startswith( "error" ):
if command_name is not None and command_name.lower().startswith( "error" ):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"

View File

@@ -54,8 +54,8 @@ class LocalCache(MemoryProviderSingleton):
vector = vector[np.newaxis, :]
self.data.embeddings = np.concatenate(
[
vector,
self.data.embeddings,
vector,
],
axis=0,
)

View File

@@ -7,9 +7,21 @@ import gtts
import threading
from threading import Lock, Semaphore
# Default voice IDs
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
# TODO: Nicer names for these ids
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
# Retrieve custom voice IDs from the Config class
custom_voice_1 = cfg.elevenlabs_voice_1_id
custom_voice_2 = cfg.elevenlabs_voice_2_id
# Placeholder values that should be treated as empty
placeholders = {"your-voice-id"}
# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs
voices = [
custom_voice_1 if custom_voice_1 and custom_voice_1 not in placeholders else default_voices[0],
custom_voice_2 if custom_voice_2 and custom_voice_2 not in placeholders else default_voices[1]
]
tts_headers = {
"Content-Type": "application/json",

View File

@@ -20,7 +20,7 @@ class Spinner:
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b' * (len(self.message) + 2))
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
def __enter__(self):
"""Start the spinner"""

View File

@@ -0,0 +1,49 @@
import unittest
import random
import string
import sys
from pathlib import Path
# Add the parent directory of the 'scripts' folder to the Python path
sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
from config import Config
from memory.local import LocalCache
class TestLocalCache(unittest.TestCase):
def random_string(self, length):
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
def setUp(self):
cfg = cfg = Config()
self.cache = LocalCache(cfg)
self.cache.clear()
# Add example texts to the cache
self.example_texts = [
'The quick brown fox jumps over the lazy dog',
'I love machine learning and natural language processing',
'The cake is a lie, but the pie is always true',
'ChatGPT is an advanced AI model for conversation'
]
for text in self.example_texts:
self.cache.add(text)
# Add some random strings to test noise
for _ in range(5):
self.cache.add(self.random_string(10))
def test_get_relevant(self):
query = "I'm interested in artificial intelligence and NLP"
k = 3
relevant_texts = self.cache.get_relevant(query, k)
print(f"Top {k} relevant texts for the query '{query}':")
for i, text in enumerate(relevant_texts, start=1):
print(f"{i}. {text}")
self.assertEqual(len(relevant_texts), k)
self.assertIn(self.example_texts[1], relevant_texts)
if __name__ == '__main__':
unittest.main()