mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
Fixes #803
- Brings back debug mode
- Replaces all calls from cfg.debug to cfg.debug_mode that was updated on 5b2d6010dc
- Remove unnecessary config instance at main.py
This commit is contained in:
@@ -64,14 +64,14 @@ def chat_with_ai(
|
|||||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||||
# Reserve 1000 tokens for the response
|
# Reserve 1000 tokens for the response
|
||||||
|
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print(f"Token limit: {token_limit}")
|
print(f"Token limit: {token_limit}")
|
||||||
|
|
||||||
send_token_limit = token_limit - 1000
|
send_token_limit = token_limit - 1000
|
||||||
|
|
||||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||||
|
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print('Memory Stats: ', permanent_memory.get_stats())
|
print('Memory Stats: ', permanent_memory.get_stats())
|
||||||
|
|
||||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||||
@@ -110,7 +110,7 @@ def chat_with_ai(
|
|||||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||||
|
|
||||||
# Debug print the current context
|
# Debug print the current context
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print(f"Token limit: {token_limit}")
|
print(f"Token limit: {token_limit}")
|
||||||
print(f"Send Token Count: {current_tokens_used}")
|
print(f"Send Token Count: {current_tokens_used}")
|
||||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str:
|
|||||||
result_string = call_ai_function(
|
result_string = call_ai_function(
|
||||||
function_string, args, description_string, model=cfg.fast_llm_model
|
function_string, args, description_string, model=cfg.fast_llm_model
|
||||||
)
|
)
|
||||||
if cfg.debug:
|
if cfg.debug_mode:
|
||||||
print("------------ JSON FIX ATTEMPT ---------------")
|
print("------------ JSON FIX ATTEMPT ---------------")
|
||||||
print(f"Original JSON: {json_str}")
|
print(f"Original JSON: {json_str}")
|
||||||
print("-----------")
|
print("-----------")
|
||||||
|
|||||||
@@ -266,6 +266,7 @@ def prompt_user():
|
|||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
"""Parses the arguments passed to the script"""
|
"""Parses the arguments passed to the script"""
|
||||||
global cfg
|
global cfg
|
||||||
|
cfg.set_debug_mode(False)
|
||||||
cfg.set_continuous_mode(False)
|
cfg.set_continuous_mode(False)
|
||||||
cfg.set_speak_mode(False)
|
cfg.set_speak_mode(False)
|
||||||
|
|
||||||
@@ -292,6 +293,9 @@ def parse_arguments():
|
|||||||
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||||
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||||
|
cfg.set_debug_mode(True)
|
||||||
|
|
||||||
|
|
||||||
# TODO: fill in llm values here
|
# TODO: fill in llm values here
|
||||||
|
|||||||
Reference in New Issue
Block a user