mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-31 20:04:28 +01:00
refactor(agent): Reduce log spam in Agent Protocol mode
- Removed unnecessary print_attribute calls in configurators.py and configurator.py files - Consolidated printing of configuration attributes in main.py for improved readability and reduced log spam in Agent Protocol mode
This commit is contained in:
@@ -6,7 +6,6 @@ from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIDirectives, AIProfile, Config
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider
|
||||
from autogpt.logs.config import configure_chat_plugins
|
||||
from autogpt.logs.helpers import print_attribute
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.plugins import scan_plugins
|
||||
|
||||
@@ -79,8 +78,6 @@ def _configure_agent(
|
||||
|
||||
# TODO: configure memory
|
||||
|
||||
print_attribute("Configured Browser", app_config.selenium_web_browser)
|
||||
|
||||
return Agent(
|
||||
settings=agent_state,
|
||||
llm_provider=llm_provider,
|
||||
|
||||
@@ -13,7 +13,7 @@ from autogpt.config import Config
|
||||
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs.config import LogFormatName
|
||||
from autogpt.logs.helpers import print_attribute, request_user_double_check
|
||||
from autogpt.logs.helpers import request_user_double_check
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -78,7 +78,6 @@ def apply_overrides_to_config(
|
||||
config.logging.log_file_format = LogFormatName(log_file_format)
|
||||
|
||||
if continuous:
|
||||
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
|
||||
logger.warning(
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
@@ -87,7 +86,6 @@ def apply_overrides_to_config(
|
||||
config.continuous_mode = True
|
||||
|
||||
if continuous_limit:
|
||||
print_attribute("Continuous Limit", continuous_limit)
|
||||
config.continuous_limit = continuous_limit
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
@@ -95,12 +93,10 @@ def apply_overrides_to_config(
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
if speak:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
config.tts_config.speak_mode = True
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
print_attribute("GPT3.5 Only Mode", "ENABLED")
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
|
||||
config.fast_llm = GPT_3_MODEL
|
||||
config.smart_llm = GPT_3_MODEL
|
||||
@@ -113,7 +109,6 @@ def apply_overrides_to_config(
|
||||
)
|
||||
== GPT_4_MODEL
|
||||
):
|
||||
print_attribute("GPT4 Only Mode", "ENABLED")
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
|
||||
config.fast_llm = GPT_4_MODEL
|
||||
config.smart_llm = GPT_4_MODEL
|
||||
@@ -136,14 +131,10 @@ def apply_overrides_to_config(
|
||||
},
|
||||
msg=f"{supported_memory}",
|
||||
)
|
||||
print_attribute(
|
||||
"Defaulting to", config.memory_backend, title_color=Fore.YELLOW
|
||||
)
|
||||
else:
|
||||
config.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
print_attribute("Skip Re-prompt", "ENABLED")
|
||||
config.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
@@ -156,7 +147,6 @@ def apply_overrides_to_config(
|
||||
request_user_double_check()
|
||||
exit(1)
|
||||
|
||||
print_attribute("Using AI Settings File", file)
|
||||
config.ai_settings_file = config.project_root / file
|
||||
config.skip_reprompt = True
|
||||
|
||||
@@ -170,14 +160,12 @@ def apply_overrides_to_config(
|
||||
request_user_double_check()
|
||||
exit(1)
|
||||
|
||||
print_attribute("Using Prompt Settings File", file)
|
||||
config.prompt_settings_file = config.project_root / file
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
print_attribute("Native Downloading", "ENABLED")
|
||||
logger.warning(
|
||||
msg=f"{Back.LIGHTYELLOW_EX}"
|
||||
"AutoGPT will now be able to download and save files to your machine."
|
||||
|
||||
@@ -125,6 +125,21 @@ async def run_auto_gpt(
|
||||
print_motd(config, logger)
|
||||
print_git_branch_info(logger)
|
||||
print_python_version_info(logger)
|
||||
print_attribute("Smart LLM", config.smart_llm)
|
||||
print_attribute("Fast LLM", config.fast_llm)
|
||||
print_attribute("Browser", config.selenium_web_browser)
|
||||
if config.continuous_mode:
|
||||
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
|
||||
if continuous_limit:
|
||||
print_attribute("Continuous Limit", config.continuous_limit)
|
||||
if config.tts_config.speak_mode:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
if ai_settings:
|
||||
print_attribute("Using AI Settings File", ai_settings)
|
||||
if prompt_settings:
|
||||
print_attribute("Using Prompt Settings File", prompt_settings)
|
||||
if config.allow_downloads:
|
||||
print_attribute("Native Downloading", "ENABLED")
|
||||
|
||||
if install_plugin_deps:
|
||||
install_plugin_dependencies()
|
||||
|
||||
Reference in New Issue
Block a user