Make the cfg.debug to cfg.debug_mode consistent across files

This commit is contained in:
kinance
2023-04-11 23:01:49 +09:00
parent d7b747600d
commit e0cb648858
2 changed files with 4 additions and 4 deletions

View File

@@ -64,14 +64,14 @@ def chat_with_ai(
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
if cfg.debug: if cfg.debug_mode:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
if cfg.debug: if cfg.debug_mode:
print('Memory Stats: ', permanent_memory.get_stats()) print('Memory Stats: ', permanent_memory.get_stats())
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
@@ -110,7 +110,7 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context # Debug print the current context
if cfg.debug: if cfg.debug_mode:
print(f"Token limit: {token_limit}") print(f"Token limit: {token_limit}")
print(f"Send Token Count: {current_tokens_used}") print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}") print(f"Tokens remaining for response: {tokens_remaining}")

View File

@@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str:
result_string = call_ai_function( result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model function_string, args, description_string, model=cfg.fast_llm_model
) )
if cfg.debug: if cfg.debug_mode:
print("------------ JSON FIX ATTEMPT ---------------") print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}") print(f"Original JSON: {json_str}")
print("-----------") print("-----------")