Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Itamar Friedman
2023-04-11 14:47:28 +03:00
6 changed files with 11 additions and 23 deletions

View File

@@ -92,4 +92,3 @@ class AIConfig:
full_prompt += f"\n\n{data.load_prompt()}" full_prompt += f"\n\n{data.load_prompt()}"
return full_prompt return full_prompt

View File

@@ -63,10 +63,10 @@ def chat_with_ai(
""" """
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
if cfg.debug: if cfg.debug:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)

View File

@@ -33,7 +33,7 @@ class Config(metaclass=Singleton):
def __init__(self): def __init__(self):
"""Initialize the Config class""" """Initialize the Config class"""
self.debug = False self.debug_mode = False
self.continuous_mode = False self.continuous_mode = False
self.speak_mode = False self.speak_mode = False
@@ -89,9 +89,6 @@ class Config(metaclass=Singleton):
"""Set the speak mode value.""" """Set the speak mode value."""
self.speak_mode = value self.speak_mode = value
def set_debug_mode(self, value: bool):
self.debug_mode = value
def set_fast_llm_model(self, value: str): def set_fast_llm_model(self, value: str):
"""Set the fast LLM model value.""" """Set the fast LLM model value."""
self.fast_llm_model = value self.fast_llm_model = value
@@ -134,4 +131,4 @@ class Config(metaclass=Singleton):
def set_debug_mode(self, value: bool): def set_debug_mode(self, value: bool):
"""Set the debug mode value.""" """Set the debug mode value."""
self.debug = value self.debug_mode = value

View File

@@ -71,11 +71,11 @@ def fix_and_parse_json(
return json_str return json_str
else: else:
raise e raise e
def fix_json(json_str: str, schema: str) -> str: def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully complient with the provided schema.""" """Fix the given JSON string to make it parseable and fully complient with the provided schema."""
# Try to fix the JSON using gpt: # Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:" function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"] args = [f"'''{json_str}'''", f"'''{schema}'''"]

View File

@@ -88,7 +88,7 @@ def fix_invalid_escape(json_str: str, error_message: str) -> str:
json.loads(json_str) json.loads(json_str)
return json_str return json_str
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
if cfg.debug: if cfg.debug_mode:
print('json loads error - fix invalid escape', e) print('json loads error - fix invalid escape', e)
error_message = str(e) error_message = str(e)
return json_str return json_str
@@ -103,12 +103,12 @@ def correct_json(json_str: str) -> str:
""" """
try: try:
if cfg.debug: if cfg.debug_mode:
print("json", json_str) print("json", json_str)
json.loads(json_str) json.loads(json_str)
return json_str return json_str
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
if cfg.debug: if cfg.debug_mode:
print('json loads error', e) print('json loads error', e)
error_message = str(e) error_message = str(e)
if error_message.startswith('Invalid \\escape'): if error_message.startswith('Invalid \\escape'):
@@ -119,7 +119,7 @@ def correct_json(json_str: str) -> str:
json.loads(json_str) json.loads(json_str)
return json_str return json_str
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
if cfg.debug: if cfg.debug_mode:
print('json loads error - add quotes', e) print('json loads error - add quotes', e)
error_message = str(e) error_message = str(e)
if balanced_str := balance_braces(json_str): if balanced_str := balance_braces(json_str):

View File

@@ -288,17 +288,10 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True) cfg.set_speak_mode(True)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.gpt3only: if args.gpt3only:
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model) cfg.set_smart_llm_model(cfg.fast_llm_model)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
# TODO: fill in llm values here # TODO: fill in llm values here
@@ -415,4 +408,3 @@ while True:
chat.create_chat_message( chat.create_chat_message(
"system", "Unable to execute command")) "system", "Unable to execute command"))
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")