mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
Changed occurances of 'cfg.debug_mode' to 'cfg.debug'
This commit is contained in:
@@ -64,14 +64,14 @@ def chat_with_ai(
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
if cfg.debug_mode:
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||
|
||||
if cfg.debug_mode:
|
||||
if cfg.debug:
|
||||
print('Memory Stats: ', permanent_memory.get_stats())
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
@@ -110,7 +110,7 @@ def chat_with_ai(
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
if cfg.debug_mode:
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
print(f"Send Token Count: {current_tokens_used}")
|
||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||
|
||||
Reference in New Issue
Block a user