Merge remote-tracking branch 'upstream/master' into fix-user-feedback-json-error

This commit is contained in:
Wlad
2023-04-11 23:32:31 +02:00
10 changed files with 46 additions and 45 deletions

1
.gitignore vendored
View File

@@ -12,3 +12,4 @@ outputs/*
ai_settings.yaml ai_settings.yaml
.vscode .vscode
auto-gpt.json auto-gpt.json
log.txt

View File

@@ -92,4 +92,3 @@ class AIConfig:
full_prompt += f"\n\n{data.load_prompt()}" full_prompt += f"\n\n{data.load_prompt()}"
return full_prompt return full_prompt

View File

@@ -15,7 +15,7 @@ def scrape_text(url):
# Most basic check if the URL is valid: # Most basic check if the URL is valid:
if not url.startswith('http'): if not url.startswith('http'):
return "Error: Invalid URL" return "Error: Invalid URL"
# Restrict access to local files # Restrict access to local files
if check_local_file_access(url): if check_local_file_access(url):
return "Error: Access to local files is restricted" return "Error: Access to local files is restricted"

View File

@@ -63,10 +63,10 @@ def chat_with_ai(
""" """
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
if cfg.debug: if cfg.debug:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)

View File

@@ -33,7 +33,7 @@ class Config(metaclass=Singleton):
def __init__(self): def __init__(self):
"""Initialize the Config class""" """Initialize the Config class"""
self.debug = False self.debug_mode = False
self.continuous_mode = False self.continuous_mode = False
self.speak_mode = False self.speak_mode = False
@@ -89,9 +89,6 @@ class Config(metaclass=Singleton):
"""Set the speak mode value.""" """Set the speak mode value."""
self.speak_mode = value self.speak_mode = value
def set_debug_mode(self, value: bool):
self.debug_mode = value
def set_fast_llm_model(self, value: str): def set_fast_llm_model(self, value: str):
"""Set the fast LLM model value.""" """Set the fast LLM model value."""
self.fast_llm_model = value self.fast_llm_model = value
@@ -134,4 +131,4 @@ class Config(metaclass=Singleton):
def set_debug_mode(self, value: bool): def set_debug_mode(self, value: bool):
"""Set the debug mode value.""" """Set the debug mode value."""
self.debug = value self.debug_mode = value

View File

@@ -24,7 +24,7 @@ def read_file(filename):
"""Read a file and return the contents""" """Read a file and return the contents"""
try: try:
filepath = safe_join(working_directory, filename) filepath = safe_join(working_directory, filename)
with open(filepath, "r") as f: with open(filepath, "r", encoding='utf-8') as f:
content = f.read() content = f.read()
return content return content
except Exception as e: except Exception as e:

View File

@@ -71,11 +71,11 @@ def fix_and_parse_json(
return json_str return json_str
else: else:
raise e raise e
def fix_json(json_str: str, schema: str) -> str: def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully complient with the provided schema.""" """Fix the given JSON string to make it parseable and fully complient with the provided schema."""
# Try to fix the JSON using gpt: # Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:" function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"] args = [f"'''{json_str}'''", f"'''{schema}'''"]

View File

@@ -76,7 +76,7 @@ def balance_braces(json_string: str) -> str:
json.loads(json_string) json.loads(json_string)
return json_string return json_string
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
raise e pass
def fix_invalid_escape(json_str: str, error_message: str) -> str: def fix_invalid_escape(json_str: str, error_message: str) -> str:
@@ -88,7 +88,7 @@ def fix_invalid_escape(json_str: str, error_message: str) -> str:
json.loads(json_str) json.loads(json_str)
return json_str return json_str
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
if cfg.debug: if cfg.debug_mode:
print('json loads error - fix invalid escape', e) print('json loads error - fix invalid escape', e)
error_message = str(e) error_message = str(e)
return json_str return json_str
@@ -103,12 +103,12 @@ def correct_json(json_str: str) -> str:
""" """
try: try:
if cfg.debug: if cfg.debug_mode:
print("json", json_str) print("json", json_str)
json.loads(json_str) json.loads(json_str)
return json_str return json_str
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
if cfg.debug: if cfg.debug_mode:
print('json loads error', e) print('json loads error', e)
error_message = str(e) error_message = str(e)
if error_message.startswith('Invalid \\escape'): if error_message.startswith('Invalid \\escape'):
@@ -119,7 +119,7 @@ def correct_json(json_str: str) -> str:
json.loads(json_str) json.loads(json_str)
return json_str return json_str
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
if cfg.debug: if cfg.debug_mode:
print('json loads error - add quotes', e) print('json loads error - add quotes', e)
error_message = str(e) error_message = str(e)
if balanced_str := balance_braces(json_str): if balanced_str := balance_braces(json_str):

View File

@@ -9,8 +9,6 @@ from colorama import Fore, Style
from spinner import Spinner from spinner import Spinner
import time import time
import speak import speak
from enum import Enum, auto
import sys
from config import Config from config import Config
from json_parser import fix_and_parse_json from json_parser import fix_and_parse_json
from ai_config import AIConfig from ai_config import AIConfig
@@ -207,7 +205,7 @@ def load_variables(config_file="config.yaml"):
documents = yaml.dump(config, file) documents = yaml.dump(config, file)
prompt = data.load_prompt() prompt = data.load_prompt()
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.""" prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications."""
# Construct full prompt # Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n" full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
@@ -325,17 +323,10 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True) cfg.set_speak_mode(True)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.gpt3only: if args.gpt3only:
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model) cfg.set_smart_llm_model(cfg.fast_llm_model)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
# TODO: fill in llm values here # TODO: fill in llm values here
@@ -454,4 +445,3 @@ while True:
chat.create_chat_message( chat.create_chat_message(
"system", "Unable to execute command")) "system", "Unable to execute command"))
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")

View File

@@ -4,6 +4,8 @@ import requests
from config import Config from config import Config
cfg = Config() cfg = Config()
import gtts import gtts
import threading
from threading import Lock, Semaphore
# TODO: Nicer names for these ids # TODO: Nicer names for these ids
@@ -14,6 +16,9 @@ tts_headers = {
"xi-api-key": cfg.elevenlabs_api_key "xi-api-key": cfg.elevenlabs_api_key
} }
mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0): def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API""" """Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
@@ -23,10 +28,11 @@ def eleven_labs_speech(text, voice_index=0):
tts_url, headers=tts_headers, json=formatted_message) tts_url, headers=tts_headers, json=formatted_message)
if response.status_code == 200: if response.status_code == 200:
with open("speech.mpeg", "wb") as f: with mutex_lock:
f.write(response.content) with open("speech.mpeg", "wb") as f:
playsound("speech.mpeg") f.write(response.content)
os.remove("speech.mpeg") playsound("speech.mpeg", True)
os.remove("speech.mpeg")
return True return True
else: else:
print("Request failed with status code:", response.status_code) print("Request failed with status code:", response.status_code)
@@ -35,21 +41,29 @@ def eleven_labs_speech(text, voice_index=0):
def gtts_speech(text): def gtts_speech(text):
tts = gtts.gTTS(text) tts = gtts.gTTS(text)
tts.save("speech.mp3") with mutex_lock:
playsound("speech.mp3") tts.save("speech.mp3")
os.remove("speech.mp3") playsound("speech.mp3", True)
os.remove("speech.mp3")
def macos_tts_speech(text): def macos_tts_speech(text):
os.system(f'say "{text}"') os.system(f'say "{text}"')
def say_text(text, voice_index=0): def say_text(text, voice_index=0):
if not cfg.elevenlabs_api_key:
if cfg.use_mac_os_tts == 'True':
macos_tts_speech(text)
else:
gtts_speech(text)
else:
success = eleven_labs_speech(text, voice_index)
if not success:
gtts_speech(text)
def speak():
if not cfg.elevenlabs_api_key:
if cfg.use_mac_os_tts == 'True':
macos_tts_speech(text)
else:
gtts_speech(text)
else:
success = eleven_labs_speech(text, voice_index)
if not success:
gtts_speech(text)
queue_semaphore.release()
queue_semaphore.acquire(True)
thread = threading.Thread(target=speak)
thread.start()