From 546d8783e78096d737351fca00d2cd701b9b72e5 Mon Sep 17 00:00:00 2001 From: Alexander Nikulin Date: Sun, 9 Apr 2023 14:33:30 +0400 Subject: [PATCH] put debug setting to cfg and use it in when calling chat.chat_with_at and fix_json --- scripts/config.py | 4 ++++ scripts/json_parser.py | 2 +- scripts/main.py | 6 +++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index d5f1a3f0..4d7adec1 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -31,6 +31,7 @@ class Config(metaclass=Singleton): """ def __init__(self): + self.debug = False self.continuous_mode = False self.speak_mode = False # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good @@ -110,3 +111,6 @@ class Config(metaclass=Singleton): def set_pinecone_region(self, value: str): self.pinecone_region = value + + def set_debug_mode(self, value: bool): + self.debug = value diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 8ec9238b..c863ccdb 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -40,7 +40,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True): if try_to_fix_with_gpt: print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.") # Now try to fix this up using the ai_functions - ai_fixed_json = fix_json(json_str, json_schema, False) + ai_fixed_json = fix_json(json_str, json_schema, cfg.debug) if ai_fixed_json != "failed": return json.loads(ai_fixed_json) else: diff --git a/scripts/main.py b/scripts/main.py index a0a1898c..f96afeb1 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -266,6 +266,10 @@ def parse_arguments(): print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) + if args.debug: + print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") + cfg.set_debug_mode(True) + # TODO: fill in llm values here @@ -295,7 +299,7 @@ while True: user_input, full_message_history, memory, - cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + cfg.fast_token_limit, cfg.debug) # TODO: This hardcodes the model to use GPT3.5. Make this an argument # Print Assistant thoughts print_assistant_thoughts(assistant_reply)