From 5b2d6010dc59bab1026d13bfcd75b37618e573b9 Mon Sep 17 00:00:00 2001 From: kinance Date: Mon, 10 Apr 2023 20:10:11 +0900 Subject: [PATCH 1/3] Resolve the conflict around debug mode flag after pull merge --- scripts/config.py | 7 ++----- scripts/json_utils.py | 8 ++++---- scripts/main.py | 5 +---- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 1eb74b2b..50432c42 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -33,7 +33,7 @@ class Config(metaclass=Singleton): def __init__(self): """Initialize the Config class""" - self.debug = False + self.debug_mode = False self.continuous_mode = False self.speak_mode = False @@ -86,9 +86,6 @@ class Config(metaclass=Singleton): """Set the speak mode value.""" self.speak_mode = value - def set_debug_mode(self, value: bool): - self.debug_mode = value - def set_fast_llm_model(self, value: str): """Set the fast LLM model value.""" self.fast_llm_model = value @@ -131,4 +128,4 @@ class Config(metaclass=Singleton): def set_debug_mode(self, value: bool): """Set the debug mode value.""" - self.debug = value + self.debug_mode = value diff --git a/scripts/json_utils.py b/scripts/json_utils.py index b3ffe4b9..9f26970e 100644 --- a/scripts/json_utils.py +++ b/scripts/json_utils.py @@ -88,7 +88,7 @@ def fix_invalid_escape(json_str: str, error_message: str) -> str: json.loads(json_str) return json_str except json.JSONDecodeError as e: - if cfg.debug: + if cfg.debug_mode: print('json loads error - fix invalid escape', e) error_message = str(e) return json_str @@ -103,12 +103,12 @@ def correct_json(json_str: str) -> str: """ try: - if cfg.debug: + if cfg.debug_mode: print("json", json_str) json.loads(json_str) return json_str except json.JSONDecodeError as e: - if cfg.debug: + if cfg.debug_mode: print('json loads error', e) error_message = str(e) if error_message.startswith('Invalid \\escape'): @@ -119,7 +119,7 @@ def correct_json(json_str: str) -> str: json.loads(json_str) return json_str except json.JSONDecodeError as e: - if cfg.debug: + if cfg.debug_mode: print('json loads error - add quotes', e) error_message = str(e) if balanced_str := balance_braces(json_str): diff --git a/scripts/main.py b/scripts/main.py index 844c2375..34750fa0 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -285,9 +285,6 @@ def parse_arguments(): print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) - if args.debug: - print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") - cfg.set_debug_mode(True) # TODO: fill in llm values here @@ -318,7 +315,7 @@ while True: user_input, full_message_history, memory, - cfg.fast_token_limit, cfg.debug) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument # Print Assistant thoughts print_assistant_thoughts(assistant_reply) From d12da33e55ec026be8cc5efdcaab4172e8d5631e Mon Sep 17 00:00:00 2001 From: Andy Melnikov Date: Mon, 10 Apr 2023 18:46:40 +0200 Subject: [PATCH 2/3] Fix flake8 W293 and W391 --- scripts/ai_config.py | 1 - scripts/browse.py | 2 +- scripts/chat.py | 4 ++-- scripts/json_parser.py | 6 +++--- scripts/main.py | 1 - 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/scripts/ai_config.py b/scripts/ai_config.py index 1d5832c1..2a4854cb 100644 --- a/scripts/ai_config.py +++ b/scripts/ai_config.py @@ -92,4 +92,3 @@ class AIConfig: full_prompt += f"\n\n{data.load_prompt()}" return full_prompt - diff --git a/scripts/browse.py b/scripts/browse.py index 09f376a7..b0c745ef 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -15,7 +15,7 @@ def scrape_text(url): # Most basic check if the URL is valid: if not url.startswith('http'): return "Error: Invalid URL" - + # Restrict access to local files if check_local_file_access(url): return "Error: Access to local files is restricted" diff --git a/scripts/chat.py b/scripts/chat.py index c00e4d4a..23e5b501 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -63,10 +63,10 @@ def chat_with_ai( """ model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - + if cfg.debug: print(f"Token limit: {token_limit}") - + send_token_limit = token_limit - 1000 relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 1fd68244..8c17dfa2 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -71,11 +71,11 @@ def fix_and_parse_json( return json_str else: raise e - - + + def fix_json(json_str: str, schema: str) -> str: """Fix the given JSON string to make it parseable and fully complient with the provided schema.""" - + # Try to fix the JSON using gpt: function_string = "def fix_json(json_str: str, schema:str=None) -> str:" args = [f"'''{json_str}'''", f"'''{schema}'''"] diff --git a/scripts/main.py b/scripts/main.py index 8661bfad..21746118 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -415,4 +415,3 @@ while True: chat.create_chat_message( "system", "Unable to execute command")) print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") - From b06974904c5ce48da1681ef2d8a362cde59e90de Mon Sep 17 00:00:00 2001 From: kinance Date: Tue, 11 Apr 2023 19:26:23 +0900 Subject: [PATCH 3/3] Remove duplicates of set debug mode func --- scripts/config.py | 3 --- scripts/main.py | 4 ---- 2 files changed, 7 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 27cc946c..c9a285ac 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -89,9 +89,6 @@ class Config(metaclass=Singleton): """Set the speak mode value.""" self.speak_mode = value - def set_debug_mode(self, value: bool): - self.debug_mode = value - def set_fast_llm_model(self, value: str): """Set the fast LLM model value.""" self.fast_llm_model = value diff --git a/scripts/main.py b/scripts/main.py index 8661bfad..6afcdf55 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -288,10 +288,6 @@ def parse_arguments(): print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") cfg.set_speak_mode(True) - if args.debug: - print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") - cfg.set_debug_mode(True) - if args.gpt3only: print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model)