add my fixes

This commit is contained in:
Wlad
2023-04-12 10:56:54 +02:00
parent edf364efe8
commit 4063483b87
3 changed files with 15 additions and 9 deletions

View File

@@ -64,14 +64,14 @@ def chat_with_ai(
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
if cfg.debug: if cfg.debug_mode:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
if cfg.debug: if cfg.debug_mode:
print('Memory Stats: ', permanent_memory.get_stats()) print('Memory Stats: ', permanent_memory.get_stats())
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
@@ -110,7 +110,7 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context # Debug print the current context
if cfg.debug: if cfg.debug_mode:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
print(f"Send Token Count: {current_tokens_used}") print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}") print(f"Tokens remaining for response: {tokens_remaining}")

View File

@@ -67,7 +67,7 @@ def print_to_console(
print() print()
def attempt_to_fix_json_by_finding_outermost_brackets(json_string): def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug: if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
print_to_console("Attempting to fix JSON by finding outermost brackets\n", Fore.RED, "") print_to_console("Attempting to fix JSON by finding outermost brackets\n", Fore.RED, "")
@@ -81,7 +81,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
# Extract the valid JSON object from the string # Extract the valid JSON object from the string
json_string = json_match.group(0) json_string = json_match.group(0)
print_to_console("Apparently json was fixed.", Fore.GREEN,"") print_to_console("Apparently json was fixed.", Fore.GREEN,"")
if cfg.speak_mode and cfg.debug: if cfg.speak_mode and cfg.debug_mode:
speak.say_text("Apparently json was fixed.") speak.say_text("Apparently json was fixed.")
else: else:
raise ValueError("No valid JSON object found") raise ValueError("No valid JSON object found")

View File

@@ -46,15 +46,21 @@ def gtts_speech(text):
playsound("speech.mp3", True) playsound("speech.mp3", True)
os.remove("speech.mp3") os.remove("speech.mp3")
def macos_tts_speech(text): def macos_tts_speech(text, voice_index=0):
if voice_index == 0:
os.system(f'say "{text}"') os.system(f'say "{text}"')
else:
if voice_index == 1:
os.system(f'say -v "Ava (Premium)" "{text}"')
else:
os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0): def say_text(text, voice_index=0):
def speak(): def speak():
if not cfg.elevenlabs_api_key: if not cfg.elevenlabs_api_key:
if cfg.use_mac_os_tts == 'True': if cfg.use_mac_os_tts == 'True':
macos_tts_speech(text) macos_tts_speech(text, voice_index)
else: else:
gtts_speech(text) gtts_speech(text)
else: else: