mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
Merge branch 'master' of github.com:Torantulino/Auto-GPT into shellcommands
This commit is contained in:
@@ -2,6 +2,8 @@ PINECONE_API_KEY=your-pinecone-api-key
|
|||||||
PINECONE_ENV=your-pinecone-region
|
PINECONE_ENV=your-pinecone-region
|
||||||
OPENAI_API_KEY=your-openai-api-key
|
OPENAI_API_KEY=your-openai-api-key
|
||||||
ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||||
|
ELEVENLABS_VOICE_1_ID=your-voice-id
|
||||||
|
ELEVENLABS_VOICE_2_ID=your-voice-id
|
||||||
SMART_LLM_MODEL=gpt-4
|
SMART_LLM_MODEL=gpt-4
|
||||||
FAST_LLM_MODEL=gpt-3.5-turbo
|
FAST_LLM_MODEL=gpt-3.5-turbo
|
||||||
GOOGLE_API_KEY=
|
GOOGLE_API_KEY=
|
||||||
|
|||||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -23,10 +23,10 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
|||||||
|
|
||||||
### PR Quality Checklist
|
### PR Quality Checklist
|
||||||
- [ ] My pull request is atomic and focuses on a single change.
|
- [ ] My pull request is atomic and focuses on a single change.
|
||||||
- [ ] I have thouroughly tested my changes with multiple different prompts.
|
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||||
- [ ] I have considered potential risks and mitigations for my changes.
|
- [ ] I have considered potential risks and mitigations for my changes.
|
||||||
- [ ] I have documented my changes clearly and comprehensively.
|
- [ ] I have documented my changes clearly and comprehensively.
|
||||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as seperate Pull Reqests, they are the easiest to merge! -->
|
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Reqests, they are the easiest to merge! -->
|
||||||
|
|
||||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||||
|
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -7,9 +7,10 @@ package-lock.json
|
|||||||
auto_gpt_workspace/*
|
auto_gpt_workspace/*
|
||||||
*.mpeg
|
*.mpeg
|
||||||
.env
|
.env
|
||||||
venv/*
|
*venv/*
|
||||||
outputs/*
|
outputs/*
|
||||||
ai_settings.yaml
|
ai_settings.yaml
|
||||||
.vscode
|
.vscode
|
||||||
|
.idea/*
|
||||||
auto-gpt.json
|
auto-gpt.json
|
||||||
log.txt
|
log.txt
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ To contribute to this GitHub project, you can follow these steps:
|
|||||||
2. Clone the repository to your local machine using the following command:
|
2. Clone the repository to your local machine using the following command:
|
||||||
|
|
||||||
```
|
```
|
||||||
git clone https://github.com/Torantulino/Auto-GPT
|
git clone https://github.com/<YOUR-GITHUB-USERNAME>/Auto-GPT
|
||||||
```
|
```
|
||||||
3. Create a new branch for your changes using the following command:
|
3. Create a new branch for your changes using the following command:
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||

|

|
||||||
[](https://discord.gg/PQ7VX6TY4t)
|
[](https://discord.gg/PQ7VX6TY4t)
|
||||||
|
|
||||||
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, autonomously develops and manages businesses to increase net worth. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
|
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
|
||||||
|
|
||||||
### Demo (30/03/2023):
|
### Demo (30/03/2023):
|
||||||
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
|
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
|
||||||
@@ -58,7 +58,7 @@ Your support is greatly appreciated
|
|||||||
|
|
||||||
## 📋 Requirements
|
## 📋 Requirements
|
||||||
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
|
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
|
||||||
- OpenAI API key
|
- [OpenAI API key](https://platform.openai.com/account/api-keys)
|
||||||
- [PINECONE API key](https://www.pinecone.io/)
|
- [PINECONE API key](https://www.pinecone.io/)
|
||||||
|
|
||||||
Optional:
|
Optional:
|
||||||
@@ -267,3 +267,8 @@ Stay up-to-date with the latest news, updates, and insights about Auto-GPT by fo
|
|||||||
|
|
||||||
We look forward to connecting with you and hearing your thoughts, ideas, and experiences with Auto-GPT. Join us on Twitter and let's explore the future of AI together!
|
We look forward to connecting with you and hearing your thoughts, ideas, and experiences with Auto-GPT. Join us on Twitter and let's explore the future of AI together!
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://star-history.com/#Torantulino/auto-gpt&Date">
|
||||||
|
<img src="https://api.star-history.com/svg?repos=Torantulino/auto-gpt&type=Date" alt="Star History Chart">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ def create_agent(task, prompt, model):
|
|||||||
|
|
||||||
messages = [{"role": "user", "content": prompt}, ]
|
messages = [{"role": "user", "content": prompt}, ]
|
||||||
|
|
||||||
# Start GTP3 instance
|
# Start GPT instance
|
||||||
agent_reply = create_chat_completion(
|
agent_reply = create_chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
@@ -41,7 +41,7 @@ def message_agent(key, message):
|
|||||||
# Add user message to message history before sending to agent
|
# Add user message to message history before sending to agent
|
||||||
messages.append({"role": "user", "content": message})
|
messages.append({"role": "user", "content": message})
|
||||||
|
|
||||||
# Start GTP3 instance
|
# Start GPT instance
|
||||||
agent_reply = create_chat_completion(
|
agent_reply = create_chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ class AIConfig:
|
|||||||
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
cls (object): A instance of given cls object
|
cls (object): An instance of given cls object
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -80,7 +80,7 @@ class AIConfig:
|
|||||||
None
|
None
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
full_prompt (str): A string containing the intitial prompt for the user including the ai_name, ai_role and ai_goals.
|
full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||||
@@ -92,4 +92,3 @@ class AIConfig:
|
|||||||
|
|
||||||
full_prompt += f"\n\n{data.load_prompt()}"
|
full_prompt += f"\n\n{data.load_prompt()}"
|
||||||
return full_prompt
|
return full_prompt
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ def scrape_text(url):
|
|||||||
# Most basic check if the URL is valid:
|
# Most basic check if the URL is valid:
|
||||||
if not url.startswith('http'):
|
if not url.startswith('http'):
|
||||||
return "Error: Invalid URL"
|
return "Error: Invalid URL"
|
||||||
|
|
||||||
# Restrict access to local files
|
# Restrict access to local files
|
||||||
if check_local_file_access(url):
|
if check_local_file_access(url):
|
||||||
return "Error: Access to local files is restricted"
|
return "Error: Access to local files is restricted"
|
||||||
|
|||||||
@@ -63,15 +63,15 @@ def chat_with_ai(
|
|||||||
"""
|
"""
|
||||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||||
# Reserve 1000 tokens for the response
|
# Reserve 1000 tokens for the response
|
||||||
|
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print(f"Token limit: {token_limit}")
|
print(f"Token limit: {token_limit}")
|
||||||
|
|
||||||
send_token_limit = token_limit - 1000
|
send_token_limit = token_limit - 1000
|
||||||
|
|
||||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||||
|
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print('Memory Stats: ', permanent_memory.get_stats())
|
print('Memory Stats: ', permanent_memory.get_stats())
|
||||||
|
|
||||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||||
@@ -110,7 +110,7 @@ def chat_with_ai(
|
|||||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||||
|
|
||||||
# Debug print the current context
|
# Debug print the current context
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print(f"Token limit: {token_limit}")
|
print(f"Token limit: {token_limit}")
|
||||||
print(f"Send Token Count: {current_tokens_used}")
|
print(f"Send Token Count: {current_tokens_used}")
|
||||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||||
@@ -141,6 +141,6 @@ def chat_with_ai(
|
|||||||
|
|
||||||
return assistant_reply
|
return assistant_reply
|
||||||
except openai.error.RateLimitError:
|
except openai.error.RateLimitError:
|
||||||
# TODO: WHen we switch to langchain, this is built in
|
# TODO: When we switch to langchain, this is built in
|
||||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ def execute_command(command_name, arguments):
|
|||||||
elif command_name == "task_complete":
|
elif command_name == "task_complete":
|
||||||
shutdown()
|
shutdown()
|
||||||
else:
|
else:
|
||||||
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
|
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format."
|
||||||
# All errors, return "Error: + error message"
|
# All errors, return "Error: + error message"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "Error: " + str(e)
|
return "Error: " + str(e)
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ class Config(metaclass=Singleton):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initialize the Config class"""
|
"""Initialize the Config class"""
|
||||||
self.debug = False
|
self.debug_mode = False
|
||||||
self.continuous_mode = False
|
self.continuous_mode = False
|
||||||
self.speak_mode = False
|
self.speak_mode = False
|
||||||
|
|
||||||
@@ -56,6 +56,8 @@ class Config(metaclass=Singleton):
|
|||||||
openai.api_version = self.openai_api_version
|
openai.api_version = self.openai_api_version
|
||||||
|
|
||||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||||
|
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||||
|
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
||||||
|
|
||||||
self.use_mac_os_tts = False
|
self.use_mac_os_tts = False
|
||||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||||
@@ -77,7 +79,7 @@ class Config(metaclass=Singleton):
|
|||||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
|
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
|
||||||
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
|
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
|
||||||
# Note that indexes must be created on db 0 in redis, this is not configureable.
|
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||||
|
|
||||||
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
|
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
|
||||||
# Initialize the OpenAI API client
|
# Initialize the OpenAI API client
|
||||||
@@ -91,9 +93,6 @@ class Config(metaclass=Singleton):
|
|||||||
"""Set the speak mode value."""
|
"""Set the speak mode value."""
|
||||||
self.speak_mode = value
|
self.speak_mode = value
|
||||||
|
|
||||||
def set_debug_mode(self, value: bool):
|
|
||||||
self.debug_mode = value
|
|
||||||
|
|
||||||
def set_fast_llm_model(self, value: str):
|
def set_fast_llm_model(self, value: str):
|
||||||
"""Set the fast LLM model value."""
|
"""Set the fast LLM model value."""
|
||||||
self.fast_llm_model = value
|
self.fast_llm_model = value
|
||||||
@@ -118,6 +117,14 @@ class Config(metaclass=Singleton):
|
|||||||
"""Set the ElevenLabs API key value."""
|
"""Set the ElevenLabs API key value."""
|
||||||
self.elevenlabs_api_key = value
|
self.elevenlabs_api_key = value
|
||||||
|
|
||||||
|
def set_elevenlabs_voice_1_id(self, value: str):
|
||||||
|
"""Set the ElevenLabs Voice 1 ID value."""
|
||||||
|
self.elevenlabs_voice_1_id = value
|
||||||
|
|
||||||
|
def set_elevenlabs_voice_2_id(self, value: str):
|
||||||
|
"""Set the ElevenLabs Voice 2 ID value."""
|
||||||
|
self.elevenlabs_voice_2_id = value
|
||||||
|
|
||||||
def set_google_api_key(self, value: str):
|
def set_google_api_key(self, value: str):
|
||||||
"""Set the Google API key value."""
|
"""Set the Google API key value."""
|
||||||
self.google_api_key = value
|
self.google_api_key = value
|
||||||
@@ -136,4 +143,4 @@ class Config(metaclass=Singleton):
|
|||||||
|
|
||||||
def set_debug_mode(self, value: bool):
|
def set_debug_mode(self, value: bool):
|
||||||
"""Set the debug mode value."""
|
"""Set the debug mode value."""
|
||||||
self.debug = value
|
self.debug_mode = value
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ def fix_and_parse_json(
|
|||||||
last_brace_index = json_str.rindex("}")
|
last_brace_index = json_str.rindex("}")
|
||||||
json_str = json_str[:last_brace_index+1]
|
json_str = json_str[:last_brace_index+1]
|
||||||
return json.loads(json_str)
|
return json.loads(json_str)
|
||||||
except json.JSONDecodeError as e: # noqa: F841
|
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
|
||||||
if try_to_fix_with_gpt:
|
if try_to_fix_with_gpt:
|
||||||
print("Warning: Failed to parse AI output, attempting to fix."
|
print("Warning: Failed to parse AI output, attempting to fix."
|
||||||
"\n If you see this warning frequently, it's likely that"
|
"\n If you see this warning frequently, it's likely that"
|
||||||
@@ -67,22 +67,22 @@ def fix_and_parse_json(
|
|||||||
else:
|
else:
|
||||||
# This allows the AI to react to the error message,
|
# This allows the AI to react to the error message,
|
||||||
# which usually results in it correcting its ways.
|
# which usually results in it correcting its ways.
|
||||||
print("Failed to fix ai output, telling the AI.")
|
print("Failed to fix AI output, telling the AI.")
|
||||||
return json_str
|
return json_str
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
def fix_json(json_str: str, schema: str) -> str:
|
def fix_json(json_str: str, schema: str) -> str:
|
||||||
"""Fix the given JSON string to make it parseable and fully complient with the provided schema."""
|
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
|
||||||
|
|
||||||
# Try to fix the JSON using gpt:
|
# Try to fix the JSON using GPT:
|
||||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||||
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
||||||
description_string = "Fixes the provided JSON string to make it parseable"\
|
description_string = "Fixes the provided JSON string to make it parseable"\
|
||||||
" and fully complient with the provided schema.\n If an object or"\
|
" and fully compliant with the provided schema.\n If an object or"\
|
||||||
" field specified in the schema isn't contained within the correct"\
|
" field specified in the schema isn't contained within the correct"\
|
||||||
" JSON, it is ommited.\n This function is brilliant at guessing"\
|
" JSON, it is omitted.\n This function is brilliant at guessing"\
|
||||||
" when the format is incorrect."
|
" when the format is incorrect."
|
||||||
|
|
||||||
# If it doesn't already start with a "`", add one:
|
# If it doesn't already start with a "`", add one:
|
||||||
@@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str:
|
|||||||
result_string = call_ai_function(
|
result_string = call_ai_function(
|
||||||
function_string, args, description_string, model=cfg.fast_llm_model
|
function_string, args, description_string, model=cfg.fast_llm_model
|
||||||
)
|
)
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print("------------ JSON FIX ATTEMPT ---------------")
|
print("------------ JSON FIX ATTEMPT ---------------")
|
||||||
print(f"Original JSON: {json_str}")
|
print(f"Original JSON: {json_str}")
|
||||||
print("-----------")
|
print("-----------")
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ def fix_invalid_escape(json_str: str, error_message: str) -> str:
|
|||||||
json.loads(json_str)
|
json.loads(json_str)
|
||||||
return json_str
|
return json_str
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print('json loads error - fix invalid escape', e)
|
print('json loads error - fix invalid escape', e)
|
||||||
error_message = str(e)
|
error_message = str(e)
|
||||||
return json_str
|
return json_str
|
||||||
@@ -103,12 +103,12 @@ def correct_json(json_str: str) -> str:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print("json", json_str)
|
print("json", json_str)
|
||||||
json.loads(json_str)
|
json.loads(json_str)
|
||||||
return json_str
|
return json_str
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print('json loads error', e)
|
print('json loads error', e)
|
||||||
error_message = str(e)
|
error_message = str(e)
|
||||||
if error_message.startswith('Invalid \\escape'):
|
if error_message.startswith('Invalid \\escape'):
|
||||||
@@ -119,7 +119,7 @@ def correct_json(json_str: str) -> str:
|
|||||||
json.loads(json_str)
|
json.loads(json_str)
|
||||||
return json_str
|
return json_str
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print('json loads error - add quotes', e)
|
print('json loads error - add quotes', e)
|
||||||
error_message = str(e)
|
error_message = str(e)
|
||||||
if balanced_str := balance_braces(json_str):
|
if balanced_str := balance_braces(json_str):
|
||||||
|
|||||||
@@ -9,8 +9,6 @@ from colorama import Fore, Style
|
|||||||
from spinner import Spinner
|
from spinner import Spinner
|
||||||
import time
|
import time
|
||||||
import speak
|
import speak
|
||||||
from enum import Enum, auto
|
|
||||||
import sys
|
|
||||||
from config import Config
|
from config import Config
|
||||||
from json_parser import fix_and_parse_json
|
from json_parser import fix_and_parse_json
|
||||||
from ai_config import AIConfig
|
from ai_config import AIConfig
|
||||||
@@ -172,7 +170,7 @@ def load_variables(config_file="config.yaml"):
|
|||||||
documents = yaml.dump(config, file)
|
documents = yaml.dump(config, file)
|
||||||
|
|
||||||
prompt = data.load_prompt()
|
prompt = data.load_prompt()
|
||||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications."""
|
||||||
|
|
||||||
# Construct full prompt
|
# Construct full prompt
|
||||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||||
@@ -268,6 +266,7 @@ def prompt_user():
|
|||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
"""Parses the arguments passed to the script"""
|
"""Parses the arguments passed to the script"""
|
||||||
global cfg
|
global cfg
|
||||||
|
cfg.set_debug_mode(False)
|
||||||
cfg.set_continuous_mode(False)
|
cfg.set_continuous_mode(False)
|
||||||
cfg.set_speak_mode(False)
|
cfg.set_speak_mode(False)
|
||||||
|
|
||||||
@@ -276,6 +275,7 @@ def parse_arguments():
|
|||||||
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
|
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
|
||||||
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
|
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
|
||||||
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
|
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
|
||||||
|
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if args.continuous:
|
if args.continuous:
|
||||||
@@ -290,13 +290,13 @@ def parse_arguments():
|
|||||||
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||||
cfg.set_speak_mode(True)
|
cfg.set_speak_mode(True)
|
||||||
|
|
||||||
if args.debug:
|
|
||||||
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
|
||||||
cfg.set_debug_mode(True)
|
|
||||||
|
|
||||||
if args.gpt3only:
|
if args.gpt3only:
|
||||||
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||||
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
||||||
|
|
||||||
|
if args.gpt4only:
|
||||||
|
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||||
|
cfg.set_fast_llm_model(cfg.smart_llm_model)
|
||||||
|
|
||||||
if args.debug:
|
if args.debug:
|
||||||
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||||
@@ -392,7 +392,7 @@ while True:
|
|||||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||||
|
|
||||||
# Execute command
|
# Execute command
|
||||||
if command_name.lower().startswith( "error" ):
|
if command_name is not None and command_name.lower().startswith( "error" ):
|
||||||
result = f"Command {command_name} threw the following error: " + arguments
|
result = f"Command {command_name} threw the following error: " + arguments
|
||||||
elif command_name == "human_feedback":
|
elif command_name == "human_feedback":
|
||||||
result = f"Human feedback: {user_input}"
|
result = f"Human feedback: {user_input}"
|
||||||
@@ -417,4 +417,3 @@ while True:
|
|||||||
chat.create_chat_message(
|
chat.create_chat_message(
|
||||||
"system", "Unable to execute command"))
|
"system", "Unable to execute command"))
|
||||||
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||||
|
|
||||||
|
|||||||
@@ -54,8 +54,8 @@ class LocalCache(MemoryProviderSingleton):
|
|||||||
vector = vector[np.newaxis, :]
|
vector = vector[np.newaxis, :]
|
||||||
self.data.embeddings = np.concatenate(
|
self.data.embeddings = np.concatenate(
|
||||||
[
|
[
|
||||||
vector,
|
|
||||||
self.data.embeddings,
|
self.data.embeddings,
|
||||||
|
vector,
|
||||||
],
|
],
|
||||||
axis=0,
|
axis=0,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,16 +4,33 @@ import requests
|
|||||||
from config import Config
|
from config import Config
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
import gtts
|
import gtts
|
||||||
|
import threading
|
||||||
|
from threading import Lock, Semaphore
|
||||||
|
|
||||||
|
# Default voice IDs
|
||||||
|
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||||
|
|
||||||
# TODO: Nicer names for these ids
|
# Retrieve custom voice IDs from the Config class
|
||||||
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
custom_voice_1 = cfg.elevenlabs_voice_1_id
|
||||||
|
custom_voice_2 = cfg.elevenlabs_voice_2_id
|
||||||
|
|
||||||
|
# Placeholder values that should be treated as empty
|
||||||
|
placeholders = {"your-voice-id"}
|
||||||
|
|
||||||
|
# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs
|
||||||
|
voices = [
|
||||||
|
custom_voice_1 if custom_voice_1 and custom_voice_1 not in placeholders else default_voices[0],
|
||||||
|
custom_voice_2 if custom_voice_2 and custom_voice_2 not in placeholders else default_voices[1]
|
||||||
|
]
|
||||||
|
|
||||||
tts_headers = {
|
tts_headers = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"xi-api-key": cfg.elevenlabs_api_key
|
"xi-api-key": cfg.elevenlabs_api_key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock = Lock() # Ensure only one sound is played at a time
|
||||||
|
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
|
||||||
|
|
||||||
def eleven_labs_speech(text, voice_index=0):
|
def eleven_labs_speech(text, voice_index=0):
|
||||||
"""Speak text using elevenlabs.io's API"""
|
"""Speak text using elevenlabs.io's API"""
|
||||||
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
||||||
@@ -23,10 +40,11 @@ def eleven_labs_speech(text, voice_index=0):
|
|||||||
tts_url, headers=tts_headers, json=formatted_message)
|
tts_url, headers=tts_headers, json=formatted_message)
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
with open("speech.mpeg", "wb") as f:
|
with mutex_lock:
|
||||||
f.write(response.content)
|
with open("speech.mpeg", "wb") as f:
|
||||||
playsound("speech.mpeg")
|
f.write(response.content)
|
||||||
os.remove("speech.mpeg")
|
playsound("speech.mpeg", True)
|
||||||
|
os.remove("speech.mpeg")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
print("Request failed with status code:", response.status_code)
|
print("Request failed with status code:", response.status_code)
|
||||||
@@ -35,21 +53,29 @@ def eleven_labs_speech(text, voice_index=0):
|
|||||||
|
|
||||||
def gtts_speech(text):
|
def gtts_speech(text):
|
||||||
tts = gtts.gTTS(text)
|
tts = gtts.gTTS(text)
|
||||||
tts.save("speech.mp3")
|
with mutex_lock:
|
||||||
playsound("speech.mp3")
|
tts.save("speech.mp3")
|
||||||
os.remove("speech.mp3")
|
playsound("speech.mp3", True)
|
||||||
|
os.remove("speech.mp3")
|
||||||
|
|
||||||
def macos_tts_speech(text):
|
def macos_tts_speech(text):
|
||||||
os.system(f'say "{text}"')
|
os.system(f'say "{text}"')
|
||||||
|
|
||||||
def say_text(text, voice_index=0):
|
def say_text(text, voice_index=0):
|
||||||
if not cfg.elevenlabs_api_key:
|
|
||||||
if cfg.use_mac_os_tts == 'True':
|
|
||||||
macos_tts_speech(text)
|
|
||||||
else:
|
|
||||||
gtts_speech(text)
|
|
||||||
else:
|
|
||||||
success = eleven_labs_speech(text, voice_index)
|
|
||||||
if not success:
|
|
||||||
gtts_speech(text)
|
|
||||||
|
|
||||||
|
def speak():
|
||||||
|
if not cfg.elevenlabs_api_key:
|
||||||
|
if cfg.use_mac_os_tts == 'True':
|
||||||
|
macos_tts_speech(text)
|
||||||
|
else:
|
||||||
|
gtts_speech(text)
|
||||||
|
else:
|
||||||
|
success = eleven_labs_speech(text, voice_index)
|
||||||
|
if not success:
|
||||||
|
gtts_speech(text)
|
||||||
|
|
||||||
|
queue_semaphore.release()
|
||||||
|
|
||||||
|
queue_semaphore.acquire(True)
|
||||||
|
thread = threading.Thread(target=speak)
|
||||||
|
thread.start()
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ class Spinner:
|
|||||||
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
|
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
time.sleep(self.delay)
|
time.sleep(self.delay)
|
||||||
sys.stdout.write('\b' * (len(self.message) + 2))
|
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
"""Start the spinner"""
|
"""Start the spinner"""
|
||||||
|
|||||||
49
tests/integration/memory_tests.py
Normal file
49
tests/integration/memory_tests.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
import unittest
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
# Add the parent directory of the 'scripts' folder to the Python path
|
||||||
|
sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
|
||||||
|
from config import Config
|
||||||
|
from memory.local import LocalCache
|
||||||
|
|
||||||
|
class TestLocalCache(unittest.TestCase):
|
||||||
|
|
||||||
|
def random_string(self, length):
|
||||||
|
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
cfg = cfg = Config()
|
||||||
|
self.cache = LocalCache(cfg)
|
||||||
|
self.cache.clear()
|
||||||
|
|
||||||
|
# Add example texts to the cache
|
||||||
|
self.example_texts = [
|
||||||
|
'The quick brown fox jumps over the lazy dog',
|
||||||
|
'I love machine learning and natural language processing',
|
||||||
|
'The cake is a lie, but the pie is always true',
|
||||||
|
'ChatGPT is an advanced AI model for conversation'
|
||||||
|
]
|
||||||
|
|
||||||
|
for text in self.example_texts:
|
||||||
|
self.cache.add(text)
|
||||||
|
|
||||||
|
# Add some random strings to test noise
|
||||||
|
for _ in range(5):
|
||||||
|
self.cache.add(self.random_string(10))
|
||||||
|
|
||||||
|
def test_get_relevant(self):
|
||||||
|
query = "I'm interested in artificial intelligence and NLP"
|
||||||
|
k = 3
|
||||||
|
relevant_texts = self.cache.get_relevant(query, k)
|
||||||
|
|
||||||
|
print(f"Top {k} relevant texts for the query '{query}':")
|
||||||
|
for i, text in enumerate(relevant_texts, start=1):
|
||||||
|
print(f"{i}. {text}")
|
||||||
|
|
||||||
|
self.assertEqual(len(relevant_texts), k)
|
||||||
|
self.assertIn(self.example_texts[1], relevant_texts)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
Reference in New Issue
Block a user