Clean up logging

This commit is contained in:
Reinier van der Leer
2023-08-22 07:29:56 +02:00
parent 3fe2246468
commit 4e761b49f3
49 changed files with 611 additions and 488 deletions

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
import json
import logging
import time
from datetime import datetime
from typing import TYPE_CHECKING, Optional
@@ -21,7 +22,6 @@ from autogpt.json_utils.utilities import extract_dict_from_response, validate_di
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import Message
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import logger
from autogpt.logs.log_cycle import (
CURRENT_CONTEXT_FILE_NAME,
FULL_MESSAGE_HISTORY_FILE_NAME,
@@ -41,6 +41,8 @@ from autogpt.workspace import Workspace
from .base import BaseAgent
logger = logging.getLogger(__name__)
class Agent(BaseAgent):
"""Agent class for interacting with Auto-GPT."""

View File

@@ -1,23 +1,24 @@
from __future__ import annotations
import logging
import re
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Literal, Optional
if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
from autogpt.models.command_registry import CommandRegistry
from autogpt.agents.utils.exceptions import InvalidAgentResponseError
from autogpt.llm.base import ChatModelResponse, ChatSequence, Message
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.logs import logger
from autogpt.memory.message_history import MessageHistory
from autogpt.models.agent_actions import ActionResult
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
logger = logging.getLogger(__name__)
CommandName = str
CommandArgs = dict[str, str]
AgentThoughts = dict[str, Any]

View File

@@ -1,5 +1,6 @@
from __future__ import annotations
import logging
import re
from datetime import datetime
from typing import TYPE_CHECKING, Literal, Optional
@@ -14,7 +15,6 @@ from autogpt.agents.utils.exceptions import AgentException, InvalidAgentResponse
from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
from autogpt.llm.base import Message
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import logger
from autogpt.logs.log_cycle import (
CURRENT_CONTEXT_FILE_NAME,
NEXT_ACTION_FILE_NAME,
@@ -35,6 +35,8 @@ from .agent import execute_command, extract_command
from .base import BaseAgent
from .utils.context import AgentContext
logger = logging.getLogger(__name__)
class PlanningAgent(BaseAgent):
"""Agent class for interacting with Auto-GPT."""

View File

@@ -1,6 +1,7 @@
"""Configurator module."""
from __future__ import annotations
import logging
from typing import Literal
import click
@@ -10,9 +11,11 @@ from autogpt import utils
from autogpt.config import Config
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.logs.helpers import print_attribute, request_user_double_check
from autogpt.memory.vector import get_supported_memory_backends
logger = logging.getLogger(__name__)
def create_config(
config: Config,
@@ -52,14 +55,12 @@ def create_config(
config.speak_mode = False
if debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
print_attribute("Debug mode", "ENABLED")
config.debug_mode = True
if continuous:
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
logger.typewriter_log(
"WARNING: ",
Fore.RED,
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
logger.warning(
"Continuous mode is not recommended. It is potentially dangerous and may"
" cause your AI to run forever or carry out actions you would not usually"
" authorise. Use at your own risk.",
@@ -67,9 +68,7 @@ def create_config(
config.continuous_mode = True
if continuous_limit:
logger.typewriter_log(
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
)
print_attribute("Continuous Limit", continuous_limit)
config.continuous_limit = continuous_limit
# Check if continuous limit is used without continuous mode
@@ -77,12 +76,12 @@ def create_config(
raise click.UsageError("--continuous-limit can only be used with --continuous")
if speak:
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
print_attribute("Speak Mode", "ENABLED")
config.speak_mode = True
# Set the default LLM models
if gpt3only:
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
print_attribute("GPT3.5 Only Mode", "ENABLED")
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
config.fast_llm = GPT_3_MODEL
config.smart_llm = GPT_3_MODEL
@@ -91,7 +90,7 @@ def create_config(
and check_model(GPT_4_MODEL, model_type="smart_llm", config=config)
== GPT_4_MODEL
):
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
print_attribute("GPT4 Only Mode", "ENABLED")
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
config.fast_llm = GPT_4_MODEL
config.smart_llm = GPT_4_MODEL
@@ -103,17 +102,21 @@ def create_config(
supported_memory = get_supported_memory_backends()
chosen = memory_type
if chosen not in supported_memory:
logger.typewriter_log(
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
Fore.RED,
f"{supported_memory}",
logger.warning(
extra={
"title": "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED:",
"title_color": Fore.RED,
},
msg=f"{supported_memory}",
)
print_attribute(
"Defaulting to", config.memory_backend, title_color=Fore.YELLOW
)
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
else:
config.memory_backend = chosen
if skip_reprompt:
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
print_attribute("Skip Re-prompt", "ENABLED")
config.skip_reprompt = True
if ai_settings_file:
@@ -122,11 +125,11 @@ def create_config(
# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
request_user_double_check()
exit(1)
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
print_attribute("Using AI Settings File", file)
config.ai_settings_file = file
config.skip_reprompt = True
@@ -136,28 +139,24 @@ def create_config(
# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
request_user_double_check()
exit(1)
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
print_attribute("Using Prompt Settings File", file)
config.prompt_settings_file = file
if browser_name:
config.selenium_web_browser = browser_name
if allow_downloads:
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
logger.typewriter_log(
"WARNING: ",
Fore.YELLOW,
f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
+ "It is recommended that you monitor any files it downloads carefully.",
print_attribute("Native Downloading", "ENABLED")
logger.warn(
msg=f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET}"
" It is recommended that you monitor any files it downloads carefully.",
)
logger.typewriter_log(
"WARNING: ",
Fore.YELLOW,
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
logger.warn(
msg=f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
)
config.allow_downloads = True
@@ -178,10 +177,7 @@ def check_model(
if any(model_name in m["id"] for m in models):
return model_name
logger.typewriter_log(
"WARNING: ",
Fore.YELLOW,
f"You do not have access to {model_name}. Setting {model_type} to "
f"gpt-3.5-turbo.",
logger.warn(
f"You do not have access to {model_name}. Setting {model_type} to gpt-3.5-turbo."
)
return "gpt-3.5-turbo"

View File

@@ -13,7 +13,7 @@ from colorama import Fore, Style
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
from autogpt.agents.utils.exceptions import InvalidAgentResponseError
from autogpt.app.configurator import create_config
from autogpt.app.setup import prompt_user
from autogpt.app.setup import interactive_ai_config_setup
from autogpt.app.spinner import Spinner
from autogpt.app.utils import (
clean_input,
@@ -25,7 +25,8 @@ from autogpt.app.utils import (
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.logs.config import configure_chat_plugins, configure_logging
from autogpt.logs.helpers import print_attribute
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins
@@ -56,15 +57,8 @@ def run_auto_gpt(
ai_role: Optional[str] = None,
ai_goals: tuple[str] = tuple(),
):
# Configure logging before we do anything else.
logger.set_level(logging.DEBUG if debug else logging.INFO)
config = ConfigBuilder.build_config_from_env(workdir=working_directory)
# HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
# or import it directly.
logger.config = config
# TODO: fill in llm values here
check_openai_api_key(config)
@@ -85,16 +79,35 @@ def run_auto_gpt(
skip_news,
)
# Set up logging module
configure_logging(config)
logger = logging.getLogger(__name__)
if config.continuous_mode:
for line in get_legal_warning().split("\n"):
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
logger.warn(
extra={
"title": "LEGAL:",
"title_color": Fore.RED,
"preserve_color": True,
},
msg=markdown_to_ansi_style(line),
)
if not config.skip_news:
motd, is_new_motd = get_latest_bulletin()
if motd:
motd = markdown_to_ansi_style(motd)
for motd_line in motd.split("\n"):
logger.info(motd_line, "NEWS:", Fore.GREEN)
logger.info(
extra={
"title": "NEWS:",
"title_color": Fore.GREEN,
"preserve_color": True,
},
msg=motd_line,
)
if is_new_motd and not config.chat_messages_enabled:
input(
Fore.MAGENTA
@@ -105,17 +118,13 @@ def run_auto_gpt(
git_branch = get_current_git_branch()
if git_branch and git_branch != "stable":
logger.typewriter_log(
"WARNING: ",
Fore.RED,
f"You are running on `{git_branch}` branch "
"- this is not a supported branch.",
logger.warn(
f"You are running on `{git_branch}` branch"
" - this is not a supported branch."
)
if sys.version_info < (3, 10):
logger.typewriter_log(
"WARNING: ",
Fore.RED,
"You are running on an older version of Python. "
logger.error(
"WARNING: You are running on an older version of Python. "
"Some people have observed problems with certain "
"parts of Auto-GPT with this version. "
"Please consider upgrading to Python 3.10 or higher.",
@@ -135,6 +144,7 @@ def run_auto_gpt(
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
config.plugins = scan_plugins(config, config.debug_mode)
configure_chat_plugins(config)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
@@ -148,21 +158,13 @@ def run_auto_gpt(
ai_config.command_registry = command_registry
# print(prompt)
# add chat plugins capable of report to logger
if config.chat_messages_enabled:
for plugin in config.plugins:
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
logger.chat_plugins.append(plugin)
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(config)
memory.clear()
logger.typewriter_log(
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
print_attribute("Configured Memory", memory.__class__.__name__)
print_attribute("Configured Browser", config.selenium_web_browser)
agent = Agent(
memory=memory,
@@ -209,6 +211,8 @@ def run_interaction_loop(
# These contain both application config and agent config, so grab them here.
config = agent.config
ai_config = agent.ai_config
logger = logging.getLogger(__name__)
logger.debug(f"{ai_config.ai_name} System Prompt: {agent.system_prompt}")
cycle_budget = cycles_remaining = _get_cycle_budget(
@@ -219,19 +223,15 @@ def run_interaction_loop(
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
nonlocal cycle_budget, cycles_remaining, spinner
if cycles_remaining in [0, 1]:
logger.typewriter_log(
"Interrupt signal received. Stopping Auto-GPT immediately.",
Fore.RED,
)
logger.error("Interrupt signal received. Stopping Auto-GPT immediately.")
sys.exit()
else:
restart_spinner = spinner.running
if spinner.running:
spinner.stop()
logger.typewriter_log(
"Interrupt signal received. Stopping continuous command execution.",
Fore.RED,
logger.error(
"Interrupt signal received. Stopping continuous command execution."
)
cycles_remaining = 1
if restart_spinner:
@@ -296,31 +296,32 @@ def run_interaction_loop(
else:
# Case 1: Continuous iteration was interrupted -> resume
if cycle_budget > 1:
logger.typewriter_log(
"RESUMING CONTINUOUS EXECUTION: ",
Fore.MAGENTA,
logger.info(
f"The cycle budget is {cycle_budget}.",
extra={
"title": "RESUMING CONTINUOUS EXECUTION",
"title_color": Fore.MAGENTA,
},
)
# Case 2: The agent used up its cycle budget -> reset
cycles_remaining = cycle_budget + 1
logger.typewriter_log(
logger.info(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
extra={"color": Fore.MAGENTA},
)
elif user_feedback == UserFeedback.EXIT:
logger.typewriter_log("Exiting...", Fore.YELLOW)
logger.warn("Exiting...")
exit()
else: # user_feedback == UserFeedback.TEXT
command_name = "human_feedback"
else:
user_input = ""
# First log new-line so user can differentiate sections better in console
logger.typewriter_log("\n")
print()
if cycles_remaining != math.inf:
# Print authorized commands left value
logger.typewriter_log(
"AUTHORISED COMMANDS LEFT: ", Fore.CYAN, f"{cycles_remaining}"
print_attribute(
"AUTHORIZED_COMMANDS_LEFT", cycles_remaining, title_color=Fore.CYAN
)
###################
@@ -338,7 +339,10 @@ def run_interaction_loop(
result = agent.execute(command_name, command_args, user_input)
if result.status == "success":
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result.results)
logger.info(
str(result.results),
extra={"title": "SYSTEM:", "title_color": Fore.YELLOW},
)
elif result.status == "error":
logger.warn(
f"Command {command_name} returned an error: {result.error or result.reason}"
@@ -361,6 +365,7 @@ def update_user(
command_args: The arguments for the command.
assistant_reply_dict: The assistant's reply.
"""
logger = logging.getLogger(__name__)
print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config)
@@ -368,12 +373,15 @@ def update_user(
say_text(f"I want to execute {command_name}", config)
# First log new-line so user can differentiate sections better in console
logger.typewriter_log("\n")
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
print()
logger.info(
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}",
extra={
"title": "NEXT ACTION:",
"title_color": Fore.CYAN,
"preserve_color": True,
},
)
@@ -391,6 +399,8 @@ def get_user_feedback(
A tuple of the user's feedback, the user's input, and the number of
cycles remaining if the user has initiated a continuous cycle.
"""
logger = logging.getLogger(__name__)
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
@@ -449,6 +459,8 @@ def construct_main_ai_config(
Returns:
str: The prompt string
"""
logger = logging.getLogger(__name__)
ai_config = AIConfig.load(config.workdir / config.ai_settings_file)
# Apply overrides
@@ -464,20 +476,17 @@ def construct_main_ai_config(
or config.skip_reprompt
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
):
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
logger.typewriter_log(
print_attribute("Name :", ai_config.ai_name)
print_attribute("Role :", ai_config.ai_role)
print_attribute("Goals:", ai_config.ai_goals)
print_attribute(
"API Budget:",
Fore.GREEN,
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
)
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
logger.typewriter_log(
"Welcome back! ",
Fore.GREEN,
f"Would you like me to return to being {ai_config.ai_name}?",
speak_text=True,
logger.info(
extra={"title": f"{Fore.GREEN}Welcome back!{Fore.RESET}"},
msg=f"Would you like me to return to being {ai_config.ai_name}?",
)
should_continue = clean_input(
config,
@@ -492,36 +501,31 @@ Continue ({config.authorise_key}/{config.exit_key}): """,
ai_config = AIConfig()
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
ai_config = prompt_user(config)
ai_config = interactive_ai_config_setup(config)
ai_config.save(config.workdir / config.ai_settings_file)
if config.restrict_to_workspace:
logger.typewriter_log(
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
Fore.YELLOW,
f"{config.workspace_path}",
logger.info(
f"{Fore.YELLOW}NOTE: All files/directories created by this agent"
f" can be found inside its workspace at:{Fore.RESET} {config.workspace_path}",
extra={"preserve_color": True},
)
# set the total api budget
api_manager = ApiManager()
api_manager.set_total_budget(ai_config.api_budget)
# Agent Created, print message
logger.typewriter_log(
ai_config.ai_name,
Fore.LIGHTBLUE_EX,
"has been created with the following details:",
speak_text=True,
logger.info(
f"{Fore.LIGHTBLUE_EX}{ai_config.ai_name}{Fore.RESET} has been created with the following details:",
extra={"preserve_color": True},
)
# Print the ai_config details
# Name
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
# Role
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
# Goals
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
print_attribute("Name :", ai_config.ai_name)
print_attribute("Role :", ai_config.ai_role)
print_attribute("Goals:", "")
for goal in ai_config.ai_goals:
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
logger.info(f"- {goal}")
return ai_config
@@ -533,6 +537,8 @@ def print_assistant_thoughts(
) -> None:
from autogpt.speech import say_text
logger = logging.getLogger(__name__)
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
@@ -551,12 +557,12 @@ def print_assistant_thoughts(
assistant_thoughts_speak = remove_ansi_escape(
assistant_thoughts.get("speak", "")
)
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
print_attribute(
f"{ai_name.upper()} THOUGHTS", assistant_thoughts_text, title_color=Fore.YELLOW
)
logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
print_attribute("REASONING", assistant_thoughts_reasoning, title_color=Fore.YELLOW)
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
print_attribute("PLAN", "", title_color=Fore.YELLOW)
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
@@ -567,14 +573,17 @@ def print_assistant_thoughts(
lines = assistant_thoughts_plan.split("\n")
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
logger.info(line.strip(), extra={"title": "- ", "title_color": Fore.GREEN})
print_attribute(
"CRITICISM", f"{assistant_thoughts_criticism}", title_color=Fore.YELLOW
)
# Speak the assistant's thoughts
if assistant_thoughts_speak:
if config.speak_mode:
say_text(assistant_thoughts_speak, config)
else:
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
print_attribute("SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW)
def remove_ansi_escape(s: str) -> str:

View File

@@ -1,4 +1,5 @@
"""Set up the AI and its goals"""
import logging
import re
from typing import Optional
@@ -10,15 +11,17 @@ from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.utils import create_chat_completion
from autogpt.logs import logger
from autogpt.logs.helpers import user_friendly_output
from autogpt.prompts.default_prompts import (
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
DEFAULT_USER_DESIRE_PROMPT,
)
logger = logging.getLogger(__name__)
def prompt_user(
def interactive_ai_config_setup(
config: Config, ai_config_template: Optional[AIConfig] = None
) -> AIConfig:
"""Prompt the user for input
@@ -32,11 +35,10 @@ def prompt_user(
"""
# Construct the prompt
logger.typewriter_log(
"Welcome to Auto-GPT! ",
Fore.GREEN,
"run with '--help' for more information.",
speak_text=True,
user_friendly_output(
title="Welcome to Auto-GPT! ",
message="run with '--help' for more information.",
title_color=Fore.GREEN,
)
ai_config_template_provided = ai_config_template is not None and any(
@@ -50,11 +52,10 @@ def prompt_user(
user_desire = ""
if not ai_config_template_provided:
# Get user desire if command line overrides have not been passed in
logger.typewriter_log(
"Create an AI-Assistant:",
Fore.GREEN,
"input '--manual' to enter manual mode.",
speak_text=True,
user_friendly_output(
title="Create an AI-Assistant:",
message="input '--manual' to enter manual mode.",
title_color=Fore.GREEN,
)
user_desire = utils.clean_input(
@@ -66,10 +67,10 @@ def prompt_user(
# If user desire contains "--manual" or we have overridden any of the AI configuration
if "--manual" in user_desire or ai_config_template_provided:
logger.typewriter_log(
"Manual Mode Selected",
Fore.GREEN,
speak_text=True,
user_friendly_output(
"",
title="Manual Mode Selected",
title_color=Fore.GREEN,
)
return generate_aiconfig_manual(config, ai_config_template)
@@ -77,11 +78,10 @@ def prompt_user(
try:
return generate_aiconfig_automatic(user_desire, config)
except Exception as e:
logger.typewriter_log(
"Unable to automatically generate AI Config based on user desire.",
Fore.RED,
"Falling back to manual mode.",
speak_text=True,
user_friendly_output(
title="Unable to automatically generate AI Config based on user desire.",
message="Falling back to manual mode.",
title_color=Fore.RED,
)
logger.debug(f"Error during AIConfig generation: {e}")
@@ -107,12 +107,11 @@ def generate_aiconfig_manual(
"""
# Manual Setup Intro
logger.typewriter_log(
"Create an AI-Assistant:",
Fore.GREEN,
"Enter the name of your AI and its role below. Entering nothing will load"
user_friendly_output(
title="Create an AI-Assistant:",
message="Enter the name of your AI and its role below. Entering nothing will load"
" defaults.",
speak_text=True,
title_color=Fore.GREEN,
)
if ai_config_template and ai_config_template.ai_name:
@@ -120,26 +119,30 @@ def generate_aiconfig_manual(
else:
ai_name = ""
# Get AI Name from User
logger.typewriter_log(
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
user_friendly_output(
title="Name your AI:",
message="For example, 'Entrepreneur-GPT'",
title_color=Fore.GREEN,
)
ai_name = utils.clean_input(config, "AI Name: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
logger.typewriter_log(
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
user_friendly_output(
title=f"{ai_name} here!",
message="I am at your service.",
title_color=Fore.LIGHTBLUE_EX,
)
if ai_config_template and ai_config_template.ai_role:
ai_role = ai_config_template.ai_role
else:
# Get AI Role from User
logger.typewriter_log(
"Describe your AI's role: ",
Fore.GREEN,
"For example, 'an AI designed to autonomously develop and run businesses with"
user_friendly_output(
title="Describe your AI's role:",
message="For example, 'an AI designed to autonomously develop and run businesses with"
" the sole goal of increasing your net worth.'",
title_color=Fore.GREEN,
)
ai_role = utils.clean_input(config, f"{ai_name} is: ")
if ai_role == "":
@@ -150,11 +153,11 @@ def generate_aiconfig_manual(
ai_goals = ai_config_template.ai_goals
else:
# Enter up to 5 goals for the AI
logger.typewriter_log(
"Enter up to 5 goals for your AI: ",
Fore.GREEN,
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
user_friendly_output(
title="Enter up to 5 goals for your AI:",
message="For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
" multiple businesses autonomously'",
title_color=Fore.GREEN,
)
logger.info("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
@@ -173,10 +176,10 @@ def generate_aiconfig_manual(
]
# Get API Budget from User
logger.typewriter_log(
"Enter your budget for API calls: ",
Fore.GREEN,
"For example: $1.50",
user_friendly_output(
title="Enter your budget for API calls:",
message="For example: $1.50",
title_color=Fore.GREEN,
)
logger.info("Enter nothing to let the AI run without monetary limit")
api_budget_input = utils.clean_input(
@@ -188,8 +191,11 @@ def generate_aiconfig_manual(
try:
api_budget = float(api_budget_input.replace("$", ""))
except ValueError:
logger.typewriter_log(
"Invalid budget input. Setting budget to unlimited.", Fore.RED
user_friendly_output(
level=logging.WARNING,
title="Invalid budget input.",
message="Setting budget to unlimited.",
title_color=Fore.RED,
)
api_budget = 0.0

View File

@@ -1,3 +1,4 @@
import logging
import os
import re
@@ -8,12 +9,12 @@ from prompt_toolkit import ANSI, PromptSession
from prompt_toolkit.history import InMemoryHistory
from autogpt.config import Config
from autogpt.logs import logger
logger = logging.getLogger(__name__)
session = PromptSession(history=InMemoryHistory())
def clean_input(config: Config, prompt: str = "", talk=False):
def clean_input(config: Config, prompt: str = ""):
try:
if config.chat_messages_enabled:
for plugin in config.plugins:
@@ -44,7 +45,7 @@ def clean_input(config: Config, prompt: str = "", talk=False):
return plugin_response
# ask for input, default when just pressing Enter is y
logger.info("Asking user via keyboard...")
logger.debug("Asking user via keyboard...")
# handle_sigint must be set to False, so the signal handler in the
# autogpt/main.py could be employed properly. This referes to

View File

@@ -1,9 +1,11 @@
import functools
import logging
from pathlib import Path
from typing import Callable
from autogpt.agents.agent import Agent
from autogpt.logs import logger
logger = logging.getLogger(__name__)
def sanitize_path_arg(arg_name: str):

View File

@@ -3,6 +3,7 @@
COMMAND_CATEGORY = "execute_code"
COMMAND_CATEGORY_TITLE = "Execute Code"
import logging
import os
import subprocess
from pathlib import Path
@@ -21,10 +22,11 @@ from autogpt.agents.utils.exceptions import (
)
from autogpt.command_decorator import command
from autogpt.config import Config
from autogpt.logs import logger
from .decorators import sanitize_path_arg
logger = logging.getLogger(__name__)
ALLOWLIST_CONTROL = "allowlist"
DENYLIST_CONTROL = "denylist"

View File

@@ -7,6 +7,7 @@ COMMAND_CATEGORY_TITLE = "File Operations"
import contextlib
import hashlib
import logging
import os
import os.path
from pathlib import Path
@@ -15,12 +16,13 @@ from typing import Generator, Literal
from autogpt.agents.agent import Agent
from autogpt.agents.utils.exceptions import DuplicateOperationError
from autogpt.command_decorator import command
from autogpt.logs import logger
from autogpt.memory.vector import MemoryItem, VectorMemory
from .decorators import sanitize_path_arg
from .file_operations_utils import read_textual_file
logger = logging.getLogger(__name__)
Operation = Literal["write", "append", "delete"]

View File

@@ -1,4 +1,5 @@
import json
import logging
import os
import charset_normalizer
@@ -9,8 +10,8 @@ import yaml
from bs4 import BeautifulSoup
from pylatexenc.latex2text import LatexNodes2Text
from autogpt import logs
from autogpt.logs import logger
logger = logging.getLogger(__name__)
class ParserStrategy:
@@ -97,7 +98,7 @@ class LaTeXParser(ParserStrategy):
class FileContext:
def __init__(self, parser: ParserStrategy, logger: logs.Logger):
def __init__(self, parser: ParserStrategy, logger: logging.Logger):
self.parser = parser
self.logger = logger
@@ -144,7 +145,7 @@ def is_file_binary_fn(file_path: str):
return False
def read_textual_file(file_path: str, logger: logs.Logger) -> str:
def read_textual_file(file_path: str, logger: logging.Logger) -> str:
if not os.path.isfile(file_path):
raise FileNotFoundError(
f"read_file {file_path} failed: no such file or directory"

View File

@@ -5,6 +5,7 @@ COMMAND_CATEGORY_TITLE = "Text to Image"
import io
import json
import logging
import time
import uuid
from base64 import b64decode
@@ -15,7 +16,8 @@ from PIL import Image
from autogpt.agents.agent import Agent
from autogpt.command_decorator import command
from autogpt.logs import logger
logger = logging.getLogger(__name__)
@command(

View File

@@ -5,9 +5,12 @@ from __future__ import annotations
COMMAND_CATEGORY = "system"
COMMAND_CATEGORY_TITLE = "System"
import logging
from autogpt.agents.agent import Agent
from autogpt.command_decorator import command
from autogpt.logs import logger
logger = logging.getLogger(__name__)
@command(
@@ -31,5 +34,5 @@ def task_complete(reason: str, agent: Agent) -> None:
A result string from create chat completion. A list of suggestions to
improve the code.
"""
logger.info(title="Shutting down...\n", message=reason)
logger.info(reason, extra={"title": "Shutting down...\n"})
quit()

View File

@@ -40,11 +40,12 @@ if TYPE_CHECKING:
from autogpt.agents.utils.exceptions import CommandExecutionError
from autogpt.command_decorator import command
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import logger
from autogpt.memory.vector import MemoryItem, get_memory
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
from autogpt.url_utils.validators import validate_url
logger = logging.getLogger(__name__)
FILE_DIR = Path(__file__).parent.parent
TOKENS_TO_TRIGGER_SUMMARY = 50
LINKS_TO_RETURN = 20

View File

@@ -2,11 +2,14 @@
"""
A module that contains the PromptConfig class object that contains the configuration
"""
import logging
import yaml
from colorama import Fore
from autogpt import utils
from autogpt.logs import logger
from autogpt.logs.helpers import request_user_double_check
logger = logging.getLogger(__name__)
class PromptConfig:
@@ -35,8 +38,8 @@ class PromptConfig:
# Validate file
(validated, message) = utils.validate_yaml_file(prompt_settings_file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
logger.error(message, extra={"title": "FAILED FILE VALIDATION"})
request_user_double_check()
exit(1)
with open(prompt_settings_file, encoding="utf-8") as file:

View File

@@ -32,7 +32,7 @@ when you clone it.
:star2: **This is the reference application I'm working with for now** :star2:
The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic.
The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the ~~`logger.typewriter_log`~~`user_friendly_output` logic.
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py)
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py)

View File

@@ -1,13 +1,13 @@
import logging
from pathlib import Path
from agent_protocol import StepHandler, StepResult
from colorama import Fore
from autogpt.agents import Agent
from autogpt.app.main import UserFeedback
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, ConfigBuilder
from autogpt.logs import logger
from autogpt.logs.helpers import user_friendly_output
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
@@ -64,7 +64,9 @@ async def interaction_step(
if command_name is not None:
result = agent.execute(command_name, command_args, user_input)
if result is None:
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
user_friendly_output(
title="SYSTEM:", message="Unable to execute command", level=logging.WARN
)
return
next_command_name, next_command_args, assistant_reply_dict = agent.think()

View File

@@ -1,13 +1,15 @@
"""Utilities for the json_fixes package."""
import ast
import json
import logging
import os.path
from typing import Any, Literal
from jsonschema import Draft7Validator
from autogpt.config import Config
from autogpt.logs import logger
logger = logging.getLogger(__name__)
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"

View File

@@ -1,14 +1,16 @@
from __future__ import annotations
import logging
from typing import List, Optional
import openai
from openai import Model
from autogpt.llm.base import CompletionModelInfo
from autogpt.logs import logger
from autogpt.singleton import Singleton
logger = logging.getLogger(__name__)
class ApiManager(metaclass=Singleton):
def __init__(self):

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
import functools
import logging
import time
from dataclasses import dataclass
from typing import Callable, List, Optional
@@ -19,9 +20,11 @@ from autogpt.llm.base import (
TextModelInfo,
TText,
)
from autogpt.logs import logger
from autogpt.logs.helpers import request_user_double_check
from autogpt.models.command_registry import CommandRegistry
logger = logging.getLogger(__name__)
OPEN_AI_CHAT_MODELS = {
info.name: info
for info in [
@@ -197,7 +200,7 @@ def retry_api(
error_msg = error_messages[type(e)]
logger.warn(error_msg)
if not user_warned:
logger.double_check(api_key_error_msg)
request_user_double_check(api_key_error_msg)
logger.debug(f"Status: {e.http_status}")
logger.debug(f"Response body: {e.json_body}")
logger.debug(f"Response headers: {e.headers}")

View File

@@ -1,12 +1,14 @@
"""Functions for counting the number of tokens in a message or string."""
from __future__ import annotations
import logging
from typing import List, overload
import tiktoken
from autogpt.llm.base import Message
from autogpt.logs import logger
logger = logging.getLogger(__name__)
@overload

View File

@@ -1,5 +1,4 @@
from .formatters import AutoGptFormatter, JsonFormatter, remove_color_codes
from .handlers import ConsoleHandler, JsonFileHandler, TypingConsoleHandler
from .helpers import log_json, user_friendly_output
from .log_cycle import (
CURRENT_CONTEXT_FILE_NAME,
FULL_MESSAGE_HISTORY_FILE_NAME,
@@ -11,4 +10,3 @@ from .log_cycle import (
USER_INPUT_FILE_NAME,
LogCycleHandler,
)
from .logger import Logger, logger

110
autogpt/logs/config.py Normal file
View File

@@ -0,0 +1,110 @@
"""Logging module for Auto-GPT."""
from __future__ import annotations
import logging
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from auto_gpt_plugin_template import AutoGPTPluginTemplate
if TYPE_CHECKING:
from autogpt.config import Config
from .formatters import AutoGptFormatter
from .handlers import TTSHandler, TypingConsoleHandler
LOG_DIR = Path(__file__).parent.parent.parent / "logs"
LOG_FILE = "activity.log"
DEBUG_LOG_FILE = "debug.log"
ERROR_LOG_FILE = "error.log"
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
DEBUG_LOG_FORMAT = (
"%(asctime)s.%(msecs)03d %(levelname)s %(filename)s:%(lineno)d"
" %(title)s%(message)s"
)
USER_FRIENDLY_OUTPUT_LOGGER = "USER_FRIENDLY_OUTPUT"
_chat_plugins: list[AutoGPTPluginTemplate] = []
def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None:
"""Configure the native logging module."""
# create log directory if it doesn't exist
if not log_dir.exists():
log_dir.mkdir()
log_level = logging.DEBUG if config.debug_mode else logging.INFO
log_format = DEBUG_LOG_FORMAT if config.debug_mode else SIMPLE_LOG_FORMAT
console_formatter = AutoGptFormatter(log_format)
# Console output handler
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(log_level)
console_handler.setFormatter(console_formatter)
# INFO log file handler
activity_log_handler = logging.FileHandler(log_dir / LOG_FILE, "a", "utf-8")
activity_log_handler.setLevel(logging.INFO)
activity_log_handler.setFormatter(AutoGptFormatter(SIMPLE_LOG_FORMAT))
if config.debug_mode:
# DEBUG log file handler
debug_log_handler = logging.FileHandler(log_dir / DEBUG_LOG_FILE, "a", "utf-8")
debug_log_handler.setLevel(logging.DEBUG)
debug_log_handler.setFormatter(AutoGptFormatter(DEBUG_LOG_FORMAT))
# ERROR log file handler
error_log_handler = logging.FileHandler(log_dir / ERROR_LOG_FILE, "a", "utf-8")
error_log_handler.setLevel(logging.ERROR)
error_log_handler.setFormatter(AutoGptFormatter(DEBUG_LOG_FORMAT))
# Configure the root logger
logging.basicConfig(
format=log_format,
level=log_level,
handlers=(
[console_handler, activity_log_handler, error_log_handler]
+ ([debug_log_handler] if config.debug_mode else [])
),
)
## Set up user-friendly loggers
# Console output handler which simulates typing
typing_console_handler = TypingConsoleHandler(stream=sys.stdout)
typing_console_handler.setLevel(logging.INFO)
typing_console_handler.setFormatter(console_formatter)
user_friendly_output_logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER)
user_friendly_output_logger.addHandler(
typing_console_handler if not config.plain_output else console_handler
)
user_friendly_output_logger.addHandler(TTSHandler(config))
user_friendly_output_logger.addHandler(activity_log_handler)
user_friendly_output_logger.addHandler(error_log_handler)
user_friendly_output_logger.setLevel(logging.DEBUG)
json_logger = logging.getLogger("JSON_LOGGER")
json_logger.addHandler(activity_log_handler)
json_logger.addHandler(error_log_handler)
json_logger.setLevel(logging.DEBUG)
def configure_chat_plugins(config: Config) -> None:
"""Configure chat plugins for use by the logging module"""
logger = logging.getLogger(__name__)
# Add chat plugins capable of report to logger
if config.chat_messages_enabled:
if _chat_plugins:
_chat_plugins.clear()
for plugin in config.plugins:
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
logger.debug(f"Loaded plugin into logger: {plugin.__class__.__name__}")
_chat_plugins.append(plugin)

View File

@@ -1,7 +1,8 @@
import logging
import re
from colorama import Style
from colorama import Fore, Style
from .utils import remove_color_codes
class AutoGptFormatter(logging.Formatter):
@@ -10,32 +11,46 @@ class AutoGptFormatter(logging.Formatter):
To use this formatter, make sure to pass 'color', 'title' as log extras.
"""
# level -> (level & text color, title color)
LEVEL_COLOR_MAP = {
logging.DEBUG: Fore.LIGHTBLACK_EX,
logging.INFO: Fore.BLUE,
logging.WARNING: Fore.YELLOW,
logging.ERROR: Fore.RED,
logging.CRITICAL: Fore.RED + Style.BRIGHT,
}
def format(self, record: logging.LogRecord) -> str:
if hasattr(record, "color"):
record.title_color = (
getattr(record, "color")
+ getattr(record, "title", "")
+ " "
+ Style.RESET_ALL
)
else:
record.title_color = getattr(record, "title", "")
# Make sure `msg` is a string
if not hasattr(record, "msg"):
record.msg = ""
elif not type(record.msg) == str:
record.msg = str(record.msg)
# Add this line to set 'title' to an empty string if it doesn't exist
record.title = getattr(record, "title", "")
# Strip color from the message to prevent color spoofing
if record.msg and not getattr(record, "preserve_color", False):
record.msg = remove_color_codes(record.msg)
# Determine default color based on error level
level_color = ""
if record.levelno in self.LEVEL_COLOR_MAP:
level_color = self.LEVEL_COLOR_MAP[record.levelno]
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
# Determine color for message
color = getattr(record, "color", level_color)
color_is_specified = hasattr(record, "color")
# Determine color for title
title = getattr(record, "title", "")
title_color = getattr(record, "title_color", level_color)
if title and title_color:
title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}"
# Make sure record.title is set, and padded with a space if not empty
record.title = f"{title} " if title else ""
# Don't color INFO messages unless the color is explicitly specified.
if color and (record.levelno != logging.INFO or color_is_specified):
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
if hasattr(record, "msg"):
record.message_no_color = remove_color_codes(getattr(record, "msg"))
else:
record.message_no_color = ""
return super().format(record)
def remove_color_codes(s: str) -> str:
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
return ansi_escape.sub("", s)
class JsonFormatter(logging.Formatter):
def format(self, record: logging.LogRecord):
return record.msg

View File

@@ -1,48 +1,77 @@
from __future__ import annotations
import json
import logging
import random
import re
import time
from pathlib import Path
from typing import TYPE_CHECKING
from autogpt.logs.utils import remove_color_codes
from autogpt.speech.say import say_text
class ConsoleHandler(logging.StreamHandler):
def emit(self, record: logging.LogRecord) -> None:
msg = self.format(record)
try:
print(msg)
except Exception:
self.handleError(record)
if TYPE_CHECKING:
from autogpt.config import Config
class TypingConsoleHandler(logging.StreamHandler):
"""Output stream to console using simulated typing"""
def emit(self, record: logging.LogRecord):
min_typing_speed = 0.05
max_typing_speed = 0.01
# Typing speed settings in WPS (Words Per Second)
MIN_WPS = 25
MAX_WPS = 100
def emit(self, record: logging.LogRecord) -> None:
min_typing_interval = 1 / TypingConsoleHandler.MAX_WPS
max_typing_interval = 1 / TypingConsoleHandler.MIN_WPS
msg = self.format(record)
try:
words = msg.split()
# Split without discarding whitespace
words = re.findall(r"\S+\s*", msg)
for i, word in enumerate(words):
print(word, end="", flush=True)
if i < len(words) - 1:
print(" ", end="", flush=True)
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
time.sleep(typing_speed)
self.stream.write(word)
self.flush()
if i >= len(words) - 1:
break
interval = random.uniform(min_typing_interval, max_typing_interval)
# type faster after each word
min_typing_speed = min_typing_speed * 0.95
max_typing_speed = max_typing_speed * 0.95
print()
min_typing_interval = min_typing_interval * 0.95
max_typing_interval = max_typing_interval * 0.95
time.sleep(interval)
except Exception:
self.handleError(record)
class JsonFileHandler(logging.FileHandler):
def __init__(self, filename: str | Path, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
class TTSHandler(logging.Handler):
"""Output messages to the configured TTS engine (if any)"""
def emit(self, record: logging.LogRecord):
json_data = json.loads(self.format(record))
def __init__(self, config: Config):
self.config = config
def format(self, record: logging.LogRecord) -> str:
if getattr(record, "title", ""):
msg = f"{getattr(record, 'title')} {record.msg}"
else:
msg = f"{record.msg}"
return remove_color_codes(msg)
def emit(self, record: logging.LogRecord) -> None:
if not self.config.speak_mode:
return
message = self.format(record)
say_text(message, self.config)
class JsonFileHandler(logging.FileHandler):
def format(self, record: logging.LogRecord) -> str:
record.json_data = json.loads(record.getMessage())
return json.dumps(getattr(record, "json_data"), ensure_ascii=False, indent=4)
def emit(self, record: logging.LogRecord) -> None:
with open(self.baseFilename, "w", encoding="utf-8") as f:
json.dump(json_data, f, ensure_ascii=False, indent=4)
f.write(self.format(record))

70
autogpt/logs/helpers.py Normal file
View File

@@ -0,0 +1,70 @@
import logging
from pathlib import Path
from typing import Any, Optional
from colorama import Fore
from .config import USER_FRIENDLY_OUTPUT_LOGGER, _chat_plugins
from .handlers import JsonFileHandler
def user_friendly_output(
message: str,
level: int = logging.INFO,
title: str = "",
title_color: str = "",
) -> None:
"""Outputs a message to the user in a user-friendly way.
This function outputs on up to two channels:
1. The console, in typewriter style
2. Text To Speech, if configured
"""
logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER)
if _chat_plugins:
for plugin in _chat_plugins:
plugin.report(f"{title}: {message}")
logger.log(level, message, extra={"title": title, "title_color": title_color})
def print_attribute(
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
) -> None:
logger = logging.getLogger()
logger.info(
str(value),
extra={
"title": f"{title.rstrip(':')}:",
"title_color": title_color,
"color": value_color,
},
)
def request_user_double_check(additionalText: Optional[str] = None) -> None:
if not additionalText:
additionalText = (
"Please ensure you've setup and configured everything"
" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
"double check. You can also create a github issue or join the discord"
" and ask there!"
)
user_friendly_output(
additionalText, level=logging.WARN, title="DOUBLE CHECK CONFIGURATION"
)
def log_json(data: Any, file_name: str | Path, log_dir: Path) -> None:
logger = logging.getLogger("JSON_LOGGER")
# Create a handler for JSON files
json_file_path = log_dir / file_name
json_data_handler = JsonFileHandler(json_file_path)
# Log the JSON data using the custom file handler
logger.addHandler(json_data_handler)
logger.debug(data)
logger.removeHandler(json_data_handler)

View File

@@ -3,7 +3,9 @@ import os
from pathlib import Path
from typing import Any, Dict, Union
from .logger import logger
from autogpt.logs.helpers import log_json
from .config import LOG_DIR
DEFAULT_PREFIX = "agent"
FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
@@ -31,7 +33,7 @@ class LogCycleHandler:
ai_name_short = self.get_agent_short_name(ai_name)
outer_folder_name = f"{created_at}_{ai_name_short}"
outer_folder_path = logger.log_dir / "DEBUG" / outer_folder_name
outer_folder_path = LOG_DIR / "DEBUG" / outer_folder_name
if not outer_folder_path.exists():
outer_folder_path.mkdir(parents=True)
@@ -76,5 +78,5 @@ class LogCycleHandler:
json_data = json.dumps(data, ensure_ascii=False, indent=4)
log_file_path = cycle_log_dir / f"{self.log_count_within_cycle}_{file_name}"
logger.log_json(json_data, log_file_path)
log_json(json_data, log_file_path, LOG_DIR)
self.log_count_within_cycle += 1

View File

@@ -1,190 +0,0 @@
"""Logging module for Auto-GPT."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional
from colorama import Fore
if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.singleton import Singleton
from .formatters import AutoGptFormatter, JsonFormatter
from .handlers import ConsoleHandler, JsonFileHandler, TypingConsoleHandler
class Logger(metaclass=Singleton):
"""
Logger that handle titles in different colors.
Outputs logs in console, activity.log, and errors.log
For console handler: simulates typing
"""
def __init__(self):
# create log directory if it doesn't exist
# TODO: use workdir from config
self.log_dir = Path(__file__).parent.parent.parent / "logs"
if not self.log_dir.exists():
self.log_dir.mkdir()
log_file = "activity.log"
error_file = "error.log"
console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
# Create a handler for console which simulate typing
self.typing_console_handler = TypingConsoleHandler()
self.typing_console_handler.setLevel(logging.INFO)
self.typing_console_handler.setFormatter(console_formatter)
# Create a handler for console without typing simulation
self.console_handler = ConsoleHandler()
self.console_handler.setLevel(logging.DEBUG)
self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log
self.file_handler = logging.FileHandler(self.log_dir / log_file, "a", "utf-8")
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
)
self.file_handler.setFormatter(info_formatter)
# Error handler error.log
error_handler = logging.FileHandler(self.log_dir / error_file, "a", "utf-8")
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
" %(message_no_color)s"
)
error_handler.setFormatter(error_formatter)
self.typing_logger = logging.getLogger("TYPER")
self.typing_logger.addHandler(self.typing_console_handler)
self.typing_logger.addHandler(self.file_handler)
self.typing_logger.addHandler(error_handler)
self.typing_logger.setLevel(logging.DEBUG)
self.logger = logging.getLogger("LOGGER")
self.logger.addHandler(self.console_handler)
self.logger.addHandler(self.file_handler)
self.logger.addHandler(error_handler)
self.logger.setLevel(logging.DEBUG)
self.json_logger = logging.getLogger("JSON_LOGGER")
self.json_logger.addHandler(self.file_handler)
self.json_logger.addHandler(error_handler)
self.json_logger.setLevel(logging.DEBUG)
self._config: Optional[Config] = None
self.chat_plugins = []
@property
def config(self) -> Config | None:
return self._config
@config.setter
def config(self, config: Config):
self._config = config
if config.plain_output:
self.typing_logger.removeHandler(self.typing_console_handler)
self.typing_logger.addHandler(self.console_handler)
def typewriter_log(
self,
title: str = "",
title_color: str = "",
content: str = "",
speak_text: bool = False,
level: int = logging.INFO,
) -> None:
from autogpt.speech import say_text
if speak_text and self.config and self.config.speak_mode:
say_text(f"{title}. {content}", self.config)
for plugin in self.chat_plugins:
plugin.report(f"{title}. {content}")
if content:
if isinstance(content, list):
content = " ".join(content)
else:
content = ""
self.typing_logger.log(
level, content, extra={"title": title, "color": title_color}
)
def debug(
self,
message: str,
title: str = "",
title_color: str = "",
) -> None:
self._log(title, title_color, message, logging.DEBUG)
def info(
self,
message: str,
title: str = "",
title_color: str = "",
) -> None:
self._log(title, title_color, message, logging.INFO)
def warn(
self,
message: str,
title: str = "",
title_color: str = "",
) -> None:
self._log(title, title_color, message, logging.WARN)
def error(self, title: str, message: str = "") -> None:
self._log(title, Fore.RED, message, logging.ERROR)
def _log(
self,
title: str = "",
title_color: str = "",
message: str = "",
level: int = logging.INFO,
) -> None:
if message:
if isinstance(message, list):
message = " ".join(message)
self.logger.log(
level, message, extra={"title": str(title), "color": str(title_color)}
)
def set_level(self, level: logging._Level) -> None:
self.logger.setLevel(level)
self.typing_logger.setLevel(level)
def double_check(self, additionalText: Optional[str] = None) -> None:
if not additionalText:
additionalText = (
"Please ensure you've setup and configured everything"
" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
"double check. You can also create a github issue or join the discord"
" and ask there!"
)
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
def log_json(self, data: Any, file_name: str | Path) -> None:
# Create a handler for JSON files
json_file_path = self.log_dir / file_name
json_data_handler = JsonFileHandler(json_file_path)
json_data_handler.setFormatter(JsonFormatter())
# Log the JSON data using the custom file handler
self.json_logger.addHandler(json_data_handler)
self.json_logger.debug(data)
self.json_logger.removeHandler(json_data_handler)
logger = Logger()

5
autogpt/logs/utils.py Normal file
View File

@@ -0,0 +1,5 @@
import re
def remove_color_codes(s: str) -> str:
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)

View File

@@ -2,6 +2,7 @@ from __future__ import annotations
import copy
import json
import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterator, Optional
@@ -17,12 +18,9 @@ from autogpt.llm.utils import (
count_string_tokens,
create_chat_completion,
)
from autogpt.logs import (
PROMPT_SUMMARY_FILE_NAME,
SUMMARY_FILE_NAME,
LogCycleHandler,
logger,
)
from autogpt.logs import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME, LogCycleHandler
logger = logging.getLogger(__name__)
@dataclass

View File

@@ -1,5 +1,6 @@
import logging
from autogpt.config import Config
from autogpt.logs import logger
from .memory_item import MemoryItem, MemoryItemRelevance
from .providers.base import VectorMemoryProvider as VectorMemory

View File

@@ -2,6 +2,7 @@ from __future__ import annotations
import dataclasses
import json
import logging
from typing import Literal
import ftfy
@@ -10,11 +11,12 @@ import numpy as np
from autogpt.config import Config
from autogpt.llm import Message
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import logger
from autogpt.processing.text import chunk_content, split_text, summarize_text
from .utils import Embedding, get_embedding
logger = logging.getLogger(__name__)
MemoryDocType = Literal["webpage", "text_file", "code_file", "agent_history"]

View File

@@ -1,15 +1,17 @@
import abc
import functools
import logging
from typing import MutableSet, Sequence
import numpy as np
from autogpt.config.config import Config
from autogpt.logs import logger
from .. import MemoryItem, MemoryItemRelevance
from ..utils import Embedding, get_embedding
logger = logging.getLogger(__name__)
class VectorMemoryProvider(MutableSet[MemoryItem]):
@abc.abstractmethod

View File

@@ -1,16 +1,18 @@
from __future__ import annotations
import logging
from pathlib import Path
from typing import Iterator
import orjson
from autogpt.config import Config
from autogpt.logs import logger
from ..memory_item import MemoryItem
from .base import VectorMemoryProvider
logger = logging.getLogger(__name__)
class JSONFileMemory(VectorMemoryProvider):
"""Memory backend that stores memories in a JSON file"""

View File

@@ -1,3 +1,4 @@
import logging
from contextlib import suppress
from typing import Any, overload
@@ -6,7 +7,8 @@ import numpy as np
from autogpt.config import Config
from autogpt.llm.base import TText
from autogpt.llm.providers import openai as iopenai
from autogpt.logs import logger
logger = logging.getLogger(__name__)
Embedding = list[np.float32] | np.ndarray[Any, np.dtype[np.float32]]
"""Embedding vector"""

View File

@@ -2,6 +2,7 @@ from __future__ import annotations
import importlib
import inspect
import logging
from dataclasses import dataclass, field
from types import ModuleType
from typing import TYPE_CHECKING, Any
@@ -9,10 +10,12 @@ from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER
from autogpt.logs import logger
from autogpt.models.command import Command
logger = logging.getLogger(__name__)
class CommandRegistry:
"""

View File

@@ -4,6 +4,7 @@ from __future__ import annotations
import importlib.util
import inspect
import json
import logging
import os
import sys
import zipfile
@@ -20,9 +21,10 @@ from openapi_python_client.config import Config as OpenAPIConfig
if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
logger = logging.getLogger(__name__)
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
"""

View File

@@ -1,14 +1,16 @@
from __future__ import annotations
import logging
from pathlib import Path
from typing import Union
import yaml
from pydantic import BaseModel
from autogpt.logs import logger
from autogpt.plugins.plugin_config import PluginConfig
logger = logging.getLogger(__name__)
class PluginsConfig(BaseModel):
"""Class for holding configuration of all plugins"""

View File

@@ -1,6 +1,7 @@
"""Text processing functions"""
import logging
from math import ceil
from typing import Optional
from typing import Iterator, Optional, Sequence
import spacy
import tiktoken
@@ -9,10 +10,11 @@ from autogpt.config import Config
from autogpt.llm.base import ChatSequence
from autogpt.llm.providers.openai import OPEN_AI_MODELS
from autogpt.llm.utils import count_string_tokens, create_chat_completion
from autogpt.logs import logger
logger = logging.getLogger(__name__)
def batch(iterable, max_batch_length: int, overlap: int = 0):
def batch(iterable: Sequence, max_batch_length: int, overlap: int = 0):
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
# batched('ABCDEFG', 3) --> ABC DEF G
if max_batch_length < 1:
@@ -41,7 +43,7 @@ def chunk_content(
for_model: str,
max_chunk_length: Optional[int] = None,
with_overlap=True,
):
) -> Iterator[tuple[str, int]]:
"""Split content into chunks of approximately equal token length."""
MAX_OVERLAP = 200 # limit overlap to save tokens
@@ -156,7 +158,7 @@ def split_text(
config: Config,
with_overlap=True,
max_chunk_length: Optional[int] = None,
):
) -> Iterator[tuple[str, int]]:
"""Split text into chunks of sentences, with each chunk not exceeding the maximum length
Args:

View File

@@ -1,6 +1,7 @@
"""ElevenLabs speech module"""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
@@ -11,6 +12,8 @@ if TYPE_CHECKING:
from autogpt.config import Config
from .base import VoiceBase
logger = logging.getLogger(__name__)
PLACEHOLDERS = {"your-voice-id"}
@@ -72,8 +75,6 @@ class ElevenLabsSpeech(VoiceBase):
Returns:
bool: True if the request was successful, False otherwise
"""
from autogpt.logs import logger
tts_url = (
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
)

View File

@@ -1,10 +1,15 @@
""" GTTS Voice. """
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import gtts
from playsound import playsound
from autogpt.config import Config
if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.speech.base import VoiceBase

View File

@@ -1,7 +1,12 @@
""" MacOS TTS Voice. """
import os
from __future__ import annotations
import os
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.config import Config
from autogpt.speech.base import VoiceBase

View File

@@ -1,12 +1,19 @@
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
import requests
from playsound import playsound
from autogpt.config import Config
if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.speech.base import VoiceBase
logger = logging.getLogger(__name__)
class StreamElementsSpeech(VoiceBase):
"""Streamelements speech module for autogpt"""
@@ -36,7 +43,7 @@ class StreamElementsSpeech(VoiceBase):
os.remove("speech.mp3")
return True
else:
logging.error(
logger.error(
"Request failed with status code: %s, response content: %s",
response.status_code,
response.content,

View File

@@ -9,11 +9,13 @@ agent.
"""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Optional
from autogpt.config import Config
from autogpt.logs import logger
logger = logging.getLogger(__name__)
class Workspace:

View File

@@ -1,3 +1,4 @@
import logging
import os
import subprocess
import sys
@@ -5,7 +6,7 @@ import zipfile
from glob import glob
from pathlib import Path
from autogpt.logs import logger
logger = logging.getLogger(__name__)
def install_plugin_dependencies():

View File

@@ -9,7 +9,7 @@ from pytest_mock import MockerFixture
from autogpt.agents import Agent
from autogpt.config import AIConfig, Config, ConfigBuilder
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.logs.config import configure_logging
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
@@ -54,12 +54,13 @@ def config(
config.workspace_path = workspace.root
# HACK: this is necessary to ensure PLAIN_OUTPUT takes effect
logger.config = config
config.plugins_dir = "tests/unit/data/test_plugins"
config.plugins_config_file = temp_plugins_config_file
# HACK: this is necessary to ensure PLAIN_OUTPUT takes effect
config.plain_output = True
configure_logging(config, Path(__file__).parent / "logs")
# avoid circular dependency
from autogpt.plugins.plugins_config import PluginsConfig

View File

@@ -2,7 +2,7 @@ from unittest.mock import patch
import pytest
from autogpt.app.setup import generate_aiconfig_automatic, prompt_user
from autogpt.app.setup import generate_aiconfig_automatic, interactive_ai_config_setup
from autogpt.config.ai_config import AIConfig
@@ -11,7 +11,7 @@ from autogpt.config.ai_config import AIConfig
def test_generate_aiconfig_automatic_default(patched_api_requestor, config):
user_inputs = [""]
with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user(config)
ai_config = interactive_ai_config_setup(config)
assert isinstance(ai_config, AIConfig)
assert ai_config.ai_name is not None
@@ -44,7 +44,7 @@ def test_generate_aiconfig_automatic_fallback(patched_api_requestor, config):
"",
]
with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user(config)
ai_config = interactive_ai_config_setup(config)
assert isinstance(ai_config, AIConfig)
assert ai_config.ai_name == "Chef-GPT"
@@ -65,7 +65,7 @@ def test_prompt_user_manual_mode(patched_api_requestor, config):
"",
]
with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user(config)
ai_config = interactive_ai_config_setup(config)
assert isinstance(ai_config, AIConfig)
assert ai_config.ai_name == "Chef-GPT"

View File

@@ -1,6 +1,6 @@
import pytest
from autogpt.logs import remove_color_codes
from autogpt.logs.utils import remove_color_codes
@pytest.mark.parametrize(

View File

@@ -1,4 +1,5 @@
import json
import logging
import tempfile
from unittest import TestCase
from xml.etree import ElementTree
@@ -8,7 +9,8 @@ import yaml
from bs4 import BeautifulSoup
from autogpt.commands.file_operations_utils import is_file_binary_fn, read_textual_file
from autogpt.logs import logger
logger = logging.getLogger(__name__)
plain_text_str = "Hello, world!"