Revised to support debug mode from command line

This commit is contained in:
kinance
2023-04-08 12:39:57 +09:00
parent c0aacac72e
commit 91fe21e64d
5 changed files with 14 additions and 5 deletions

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@ auto_gpt_workspace/*
.env .env
outputs/* outputs/*
ai_settings.yaml ai_settings.yaml
.venv/*

View File

@@ -62,13 +62,13 @@ def chat_with_ai(
""" """
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
if debug: if cfg.debug_mode:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
if debug: if cfg.debug_mode:
print('Memory Stats: ', permanent_memory.get_stats()) print('Memory Stats: ', permanent_memory.get_stats())
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
@@ -107,7 +107,7 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context # Debug print the current context
if debug: if cfg.debug_mode:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
print(f"Send Token Count: {current_tokens_used}") print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}") print(f"Tokens remaining for response: {tokens_remaining}")

View File

@@ -28,6 +28,7 @@ class Config(metaclass=Singleton):
def __init__(self): def __init__(self):
self.continuous_mode = False self.continuous_mode = False
self.speak_mode = False self.speak_mode = False
self.debug_mode = False
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
@@ -66,6 +67,9 @@ class Config(metaclass=Singleton):
def set_speak_mode(self, value: bool): def set_speak_mode(self, value: bool):
self.speak_mode = value self.speak_mode = value
def set_debug_mode(self, value: bool):
self.debug_mode = value
def set_fast_llm_model(self, value: str): def set_fast_llm_model(self, value: str):
self.fast_llm_model = value self.fast_llm_model = value

View File

@@ -61,7 +61,7 @@ def fix_json(json_str: str, schema: str, debug=False) -> str:
result_string = call_ai_function( result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model function_string, args, description_string, model=cfg.fast_llm_model
) )
if debug: if cfg.debug_mode:
print("------------ JSON FIX ATTEMPT ---------------") print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}") print(f"Original JSON: {json_str}")
print("-----------") print("-----------")

View File

@@ -262,6 +262,10 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True) cfg.set_speak_mode(True)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.gpt3only: if args.gpt3only:
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model) cfg.set_smart_llm_model(cfg.fast_llm_model)