mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 22:44:21 +01:00
Restructure logs.py into a module; include log_cycle (#4921)
* Consolidate all logging stuff into one module * Merge import statement for `logs` and `logs.log_cycle` --------- Co-authored-by: James Collins <collijk@uw.edu>
This commit is contained in:
committed by
GitHub
parent
bfdfeff1b3
commit
e8b6676b22
2
.gitignore
vendored
2
.gitignore
vendored
@@ -12,7 +12,7 @@ last_run_ai_settings.yaml
|
|||||||
auto-gpt.json
|
auto-gpt.json
|
||||||
log.txt
|
log.txt
|
||||||
log-ingestion.txt
|
log-ingestion.txt
|
||||||
logs
|
/logs
|
||||||
*.log
|
*.log
|
||||||
*.mp3
|
*.mp3
|
||||||
mem.sqlite3
|
mem.sqlite3
|
||||||
|
|||||||
@@ -12,13 +12,15 @@ from autogpt.json_utils.utilities import extract_json_from_response, validate_js
|
|||||||
from autogpt.llm.chat import chat_with_ai
|
from autogpt.llm.chat import chat_with_ai
|
||||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
|
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
|
||||||
from autogpt.llm.utils import count_string_tokens
|
from autogpt.llm.utils import count_string_tokens
|
||||||
from autogpt.log_cycle.log_cycle import (
|
from autogpt.logs import (
|
||||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||||
NEXT_ACTION_FILE_NAME,
|
NEXT_ACTION_FILE_NAME,
|
||||||
USER_INPUT_FILE_NAME,
|
USER_INPUT_FILE_NAME,
|
||||||
LogCycleHandler,
|
LogCycleHandler,
|
||||||
|
logger,
|
||||||
|
print_assistant_thoughts,
|
||||||
|
remove_ansi_escape,
|
||||||
)
|
)
|
||||||
from autogpt.logs import logger, print_assistant_thoughts, remove_ansi_escape
|
|
||||||
from autogpt.memory.message_history import MessageHistory
|
from autogpt.memory.message_history import MessageHistory
|
||||||
from autogpt.memory.vector import VectorMemory
|
from autogpt.memory.vector import VectorMemory
|
||||||
from autogpt.models.command_registry import CommandRegistry
|
from autogpt.models.command_registry import CommandRegistry
|
||||||
|
|||||||
@@ -12,8 +12,7 @@ from autogpt.config import Config
|
|||||||
from autogpt.llm.api_manager import ApiManager
|
from autogpt.llm.api_manager import ApiManager
|
||||||
from autogpt.llm.base import ChatSequence, Message
|
from autogpt.llm.base import ChatSequence, Message
|
||||||
from autogpt.llm.utils import count_message_tokens, create_chat_completion
|
from autogpt.llm.utils import count_message_tokens, create_chat_completion
|
||||||
from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME
|
from autogpt.logs import CURRENT_CONTEXT_FILE_NAME, logger
|
||||||
from autogpt.logs import logger
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Change debug from hardcode to argument
|
# TODO: Change debug from hardcode to argument
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
import json
|
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
class JsonFileHandler(logging.FileHandler):
|
|
||||||
def __init__(self, filename, mode="a", encoding=None, delay=False):
|
|
||||||
super().__init__(filename, mode, encoding, delay)
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
json_data = json.loads(self.format(record))
|
|
||||||
with open(self.baseFilename, "w", encoding="utf-8") as f:
|
|
||||||
json.dump(json_data, f, ensure_ascii=False, indent=4)
|
|
||||||
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
class JsonFormatter(logging.Formatter):
|
|
||||||
def format(self, record):
|
|
||||||
return record.msg
|
|
||||||
15
autogpt/logs/__init__.py
Normal file
15
autogpt/logs/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
from .formatters import AutoGptFormatter, JsonFormatter, remove_color_codes
|
||||||
|
from .handlers import ConsoleHandler, JsonFileHandler, TypingConsoleHandler
|
||||||
|
from .log_cycle import (
|
||||||
|
CURRENT_CONTEXT_FILE_NAME,
|
||||||
|
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||||
|
NEXT_ACTION_FILE_NAME,
|
||||||
|
PROMPT_SUMMARY_FILE_NAME,
|
||||||
|
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||||
|
SUMMARY_FILE_NAME,
|
||||||
|
SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||||
|
USER_INPUT_FILE_NAME,
|
||||||
|
LogCycleHandler,
|
||||||
|
)
|
||||||
|
from .logger import Logger, logger
|
||||||
|
from .utils import print_assistant_thoughts, remove_ansi_escape
|
||||||
41
autogpt/logs/formatters.py
Normal file
41
autogpt/logs/formatters.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
|
from colorama import Style
|
||||||
|
|
||||||
|
|
||||||
|
class AutoGptFormatter(logging.Formatter):
|
||||||
|
"""
|
||||||
|
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
|
||||||
|
To use this formatter, make sure to pass 'color', 'title' as log extras.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def format(self, record: logging.LogRecord) -> str:
|
||||||
|
if hasattr(record, "color"):
|
||||||
|
record.title_color = (
|
||||||
|
getattr(record, "color")
|
||||||
|
+ getattr(record, "title", "")
|
||||||
|
+ " "
|
||||||
|
+ Style.RESET_ALL
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
record.title_color = getattr(record, "title", "")
|
||||||
|
|
||||||
|
# Add this line to set 'title' to an empty string if it doesn't exist
|
||||||
|
record.title = getattr(record, "title", "")
|
||||||
|
|
||||||
|
if hasattr(record, "msg"):
|
||||||
|
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
||||||
|
else:
|
||||||
|
record.message_no_color = ""
|
||||||
|
return super().format(record)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_color_codes(s: str) -> str:
|
||||||
|
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
||||||
|
return ansi_escape.sub("", s)
|
||||||
|
|
||||||
|
|
||||||
|
class JsonFormatter(logging.Formatter):
|
||||||
|
def format(self, record: logging.LogRecord):
|
||||||
|
return record.msg
|
||||||
47
autogpt/logs/handlers.py
Normal file
47
autogpt/logs/handlers.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
class ConsoleHandler(logging.StreamHandler):
|
||||||
|
def emit(self, record: logging.LogRecord) -> None:
|
||||||
|
msg = self.format(record)
|
||||||
|
try:
|
||||||
|
print(msg)
|
||||||
|
except Exception:
|
||||||
|
self.handleError(record)
|
||||||
|
|
||||||
|
|
||||||
|
class TypingConsoleHandler(logging.StreamHandler):
|
||||||
|
"""Output stream to console using simulated typing"""
|
||||||
|
|
||||||
|
def emit(self, record: logging.LogRecord):
|
||||||
|
min_typing_speed = 0.05
|
||||||
|
max_typing_speed = 0.01
|
||||||
|
|
||||||
|
msg = self.format(record)
|
||||||
|
try:
|
||||||
|
words = msg.split()
|
||||||
|
for i, word in enumerate(words):
|
||||||
|
print(word, end="", flush=True)
|
||||||
|
if i < len(words) - 1:
|
||||||
|
print(" ", end="", flush=True)
|
||||||
|
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
||||||
|
time.sleep(typing_speed)
|
||||||
|
# type faster after each word
|
||||||
|
min_typing_speed = min_typing_speed * 0.95
|
||||||
|
max_typing_speed = max_typing_speed * 0.95
|
||||||
|
print()
|
||||||
|
except Exception:
|
||||||
|
self.handleError(record)
|
||||||
|
|
||||||
|
|
||||||
|
class JsonFileHandler(logging.FileHandler):
|
||||||
|
def __init__(self, filename: str, mode="a", encoding=None, delay=False):
|
||||||
|
super().__init__(filename, mode, encoding, delay)
|
||||||
|
|
||||||
|
def emit(self, record: logging.LogRecord):
|
||||||
|
json_data = json.loads(self.format(record))
|
||||||
|
with open(self.baseFilename, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(json_data, f, ensure_ascii=False, indent=4)
|
||||||
@@ -2,7 +2,7 @@ import json
|
|||||||
import os
|
import os
|
||||||
from typing import Any, Dict, Union
|
from typing import Any, Dict, Union
|
||||||
|
|
||||||
from autogpt.logs import logger
|
from .logger import logger
|
||||||
|
|
||||||
DEFAULT_PREFIX = "agent"
|
DEFAULT_PREFIX = "agent"
|
||||||
FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
|
FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
|
||||||
@@ -42,7 +42,7 @@ class LogCycleHandler:
|
|||||||
|
|
||||||
return outer_folder_path
|
return outer_folder_path
|
||||||
|
|
||||||
def get_agent_short_name(self, ai_name):
|
def get_agent_short_name(self, ai_name: str) -> str:
|
||||||
return ai_name[:15].rstrip() if ai_name else DEFAULT_PREFIX
|
return ai_name[:15].rstrip() if ai_name else DEFAULT_PREFIX
|
||||||
|
|
||||||
def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str:
|
def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str:
|
||||||
@@ -3,20 +3,18 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
from logging import LogRecord
|
|
||||||
from typing import TYPE_CHECKING, Any, Optional
|
from typing import TYPE_CHECKING, Any, Optional
|
||||||
|
|
||||||
from colorama import Fore, Style
|
from colorama import Fore
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
|
||||||
from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter
|
|
||||||
from autogpt.singleton import Singleton
|
from autogpt.singleton import Singleton
|
||||||
|
|
||||||
|
from .formatters import AutoGptFormatter, JsonFormatter
|
||||||
|
from .handlers import ConsoleHandler, JsonFileHandler, TypingConsoleHandler
|
||||||
|
|
||||||
|
|
||||||
class Logger(metaclass=Singleton):
|
class Logger(metaclass=Singleton):
|
||||||
"""
|
"""
|
||||||
@@ -100,8 +98,13 @@ class Logger(metaclass=Singleton):
|
|||||||
self.typing_logger.addHandler(self.console_handler)
|
self.typing_logger.addHandler(self.console_handler)
|
||||||
|
|
||||||
def typewriter_log(
|
def typewriter_log(
|
||||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
self,
|
||||||
):
|
title: str = "",
|
||||||
|
title_color: str = "",
|
||||||
|
content: str = "",
|
||||||
|
speak_text: bool = False,
|
||||||
|
level: int = logging.INFO,
|
||||||
|
) -> None:
|
||||||
from autogpt.speech import say_text
|
from autogpt.speech import say_text
|
||||||
|
|
||||||
if speak_text and self.config and self.config.speak_mode:
|
if speak_text and self.config and self.config.speak_mode:
|
||||||
@@ -122,29 +125,29 @@ class Logger(metaclass=Singleton):
|
|||||||
|
|
||||||
def debug(
|
def debug(
|
||||||
self,
|
self,
|
||||||
message,
|
message: str,
|
||||||
title="",
|
title: str = "",
|
||||||
title_color="",
|
title_color: str = "",
|
||||||
):
|
) -> None:
|
||||||
self._log(title, title_color, message, logging.DEBUG)
|
self._log(title, title_color, message, logging.DEBUG)
|
||||||
|
|
||||||
def info(
|
def info(
|
||||||
self,
|
self,
|
||||||
message,
|
message: str,
|
||||||
title="",
|
title: str = "",
|
||||||
title_color="",
|
title_color: str = "",
|
||||||
):
|
) -> None:
|
||||||
self._log(title, title_color, message, logging.INFO)
|
self._log(title, title_color, message, logging.INFO)
|
||||||
|
|
||||||
def warn(
|
def warn(
|
||||||
self,
|
self,
|
||||||
message,
|
message: str,
|
||||||
title="",
|
title: str = "",
|
||||||
title_color="",
|
title_color: str = "",
|
||||||
):
|
) -> None:
|
||||||
self._log(title, title_color, message, logging.WARN)
|
self._log(title, title_color, message, logging.WARN)
|
||||||
|
|
||||||
def error(self, title, message=""):
|
def error(self, title: str, message: str = "") -> None:
|
||||||
self._log(title, Fore.RED, message, logging.ERROR)
|
self._log(title, Fore.RED, message, logging.ERROR)
|
||||||
|
|
||||||
def _log(
|
def _log(
|
||||||
@@ -152,8 +155,8 @@ class Logger(metaclass=Singleton):
|
|||||||
title: str = "",
|
title: str = "",
|
||||||
title_color: str = "",
|
title_color: str = "",
|
||||||
message: str = "",
|
message: str = "",
|
||||||
level=logging.INFO,
|
level: int = logging.INFO,
|
||||||
):
|
) -> None:
|
||||||
if message:
|
if message:
|
||||||
if isinstance(message, list):
|
if isinstance(message, list):
|
||||||
message = " ".join(message)
|
message = " ".join(message)
|
||||||
@@ -161,11 +164,11 @@ class Logger(metaclass=Singleton):
|
|||||||
level, message, extra={"title": str(title), "color": str(title_color)}
|
level, message, extra={"title": str(title), "color": str(title_color)}
|
||||||
)
|
)
|
||||||
|
|
||||||
def set_level(self, level):
|
def set_level(self, level: logging._Level) -> None:
|
||||||
self.logger.setLevel(level)
|
self.logger.setLevel(level)
|
||||||
self.typing_logger.setLevel(level)
|
self.typing_logger.setLevel(level)
|
||||||
|
|
||||||
def double_check(self, additionalText=None):
|
def double_check(self, additionalText: Optional[str] = None) -> None:
|
||||||
if not additionalText:
|
if not additionalText:
|
||||||
additionalText = (
|
additionalText = (
|
||||||
"Please ensure you've setup and configured everything"
|
"Please ensure you've setup and configured everything"
|
||||||
@@ -191,131 +194,10 @@ class Logger(metaclass=Singleton):
|
|||||||
self.json_logger.debug(data)
|
self.json_logger.debug(data)
|
||||||
self.json_logger.removeHandler(json_data_handler)
|
self.json_logger.removeHandler(json_data_handler)
|
||||||
|
|
||||||
def get_log_directory(self):
|
def get_log_directory(self) -> str:
|
||||||
this_files_dir_path = os.path.dirname(__file__)
|
this_files_dir_path = os.path.dirname(__file__)
|
||||||
log_dir = os.path.join(this_files_dir_path, "../logs")
|
log_dir = os.path.join(this_files_dir_path, "../../logs")
|
||||||
return os.path.abspath(log_dir)
|
return os.path.abspath(log_dir)
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Output stream to console using simulated typing
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class TypingConsoleHandler(logging.StreamHandler):
|
|
||||||
def emit(self, record):
|
|
||||||
min_typing_speed = 0.05
|
|
||||||
max_typing_speed = 0.01
|
|
||||||
|
|
||||||
msg = self.format(record)
|
|
||||||
try:
|
|
||||||
words = msg.split()
|
|
||||||
for i, word in enumerate(words):
|
|
||||||
print(word, end="", flush=True)
|
|
||||||
if i < len(words) - 1:
|
|
||||||
print(" ", end="", flush=True)
|
|
||||||
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
|
||||||
time.sleep(typing_speed)
|
|
||||||
# type faster after each word
|
|
||||||
min_typing_speed = min_typing_speed * 0.95
|
|
||||||
max_typing_speed = max_typing_speed * 0.95
|
|
||||||
print()
|
|
||||||
except Exception:
|
|
||||||
self.handleError(record)
|
|
||||||
|
|
||||||
|
|
||||||
class ConsoleHandler(logging.StreamHandler):
|
|
||||||
def emit(self, record) -> None:
|
|
||||||
msg = self.format(record)
|
|
||||||
try:
|
|
||||||
print(msg)
|
|
||||||
except Exception:
|
|
||||||
self.handleError(record)
|
|
||||||
|
|
||||||
|
|
||||||
class AutoGptFormatter(logging.Formatter):
|
|
||||||
"""
|
|
||||||
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
|
|
||||||
To use this formatter, make sure to pass 'color', 'title' as log extras.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def format(self, record: LogRecord) -> str:
|
|
||||||
if hasattr(record, "color"):
|
|
||||||
record.title_color = (
|
|
||||||
getattr(record, "color")
|
|
||||||
+ getattr(record, "title", "")
|
|
||||||
+ " "
|
|
||||||
+ Style.RESET_ALL
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
record.title_color = getattr(record, "title", "")
|
|
||||||
|
|
||||||
# Add this line to set 'title' to an empty string if it doesn't exist
|
|
||||||
record.title = getattr(record, "title", "")
|
|
||||||
|
|
||||||
if hasattr(record, "msg"):
|
|
||||||
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
|
||||||
else:
|
|
||||||
record.message_no_color = ""
|
|
||||||
return super().format(record)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_color_codes(s: str) -> str:
|
|
||||||
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
|
||||||
return ansi_escape.sub("", s)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_ansi_escape(s: str) -> str:
|
|
||||||
return s.replace("\x1B", "")
|
|
||||||
|
|
||||||
|
|
||||||
logger = Logger()
|
logger = Logger()
|
||||||
|
|
||||||
|
|
||||||
def print_assistant_thoughts(
|
|
||||||
ai_name: object,
|
|
||||||
assistant_reply_json_valid: object,
|
|
||||||
config: Config,
|
|
||||||
) -> None:
|
|
||||||
from autogpt.speech import say_text
|
|
||||||
|
|
||||||
assistant_thoughts_reasoning = None
|
|
||||||
assistant_thoughts_plan = None
|
|
||||||
assistant_thoughts_speak = None
|
|
||||||
assistant_thoughts_criticism = None
|
|
||||||
|
|
||||||
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
|
||||||
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
|
|
||||||
if assistant_thoughts:
|
|
||||||
assistant_thoughts_reasoning = remove_ansi_escape(
|
|
||||||
assistant_thoughts.get("reasoning")
|
|
||||||
)
|
|
||||||
assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan"))
|
|
||||||
assistant_thoughts_criticism = remove_ansi_escape(
|
|
||||||
assistant_thoughts.get("criticism")
|
|
||||||
)
|
|
||||||
assistant_thoughts_speak = remove_ansi_escape(assistant_thoughts.get("speak"))
|
|
||||||
logger.typewriter_log(
|
|
||||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
|
||||||
)
|
|
||||||
logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
|
|
||||||
if assistant_thoughts_plan:
|
|
||||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
|
||||||
# If it's a list, join it into a string
|
|
||||||
if isinstance(assistant_thoughts_plan, list):
|
|
||||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
|
||||||
elif isinstance(assistant_thoughts_plan, dict):
|
|
||||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
|
||||||
|
|
||||||
# Split the input_string using the newline character and dashes
|
|
||||||
lines = assistant_thoughts_plan.split("\n")
|
|
||||||
for line in lines:
|
|
||||||
line = line.lstrip("- ")
|
|
||||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
|
||||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
|
||||||
# Speak the assistant's thoughts
|
|
||||||
if assistant_thoughts_speak:
|
|
||||||
if config.speak_mode:
|
|
||||||
say_text(assistant_thoughts_speak, config)
|
|
||||||
else:
|
|
||||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
|
||||||
65
autogpt/logs/utils.py
Normal file
65
autogpt/logs/utils.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from colorama import Fore
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
from .logger import logger
|
||||||
|
|
||||||
|
|
||||||
|
def print_assistant_thoughts(
|
||||||
|
ai_name: str,
|
||||||
|
assistant_reply_json_valid: dict,
|
||||||
|
config: Config,
|
||||||
|
) -> None:
|
||||||
|
from autogpt.speech import say_text
|
||||||
|
|
||||||
|
assistant_thoughts_reasoning = None
|
||||||
|
assistant_thoughts_plan = None
|
||||||
|
assistant_thoughts_speak = None
|
||||||
|
assistant_thoughts_criticism = None
|
||||||
|
|
||||||
|
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
||||||
|
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
|
||||||
|
if assistant_thoughts:
|
||||||
|
assistant_thoughts_reasoning = remove_ansi_escape(
|
||||||
|
assistant_thoughts.get("reasoning", "")
|
||||||
|
)
|
||||||
|
assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", ""))
|
||||||
|
assistant_thoughts_criticism = remove_ansi_escape(
|
||||||
|
assistant_thoughts.get("criticism", "")
|
||||||
|
)
|
||||||
|
assistant_thoughts_speak = remove_ansi_escape(
|
||||||
|
assistant_thoughts.get("speak", "")
|
||||||
|
)
|
||||||
|
logger.typewriter_log(
|
||||||
|
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||||
|
)
|
||||||
|
logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
|
||||||
|
if assistant_thoughts_plan:
|
||||||
|
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||||
|
# If it's a list, join it into a string
|
||||||
|
if isinstance(assistant_thoughts_plan, list):
|
||||||
|
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||||
|
elif isinstance(assistant_thoughts_plan, dict):
|
||||||
|
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||||
|
|
||||||
|
# Split the input_string using the newline character and dashes
|
||||||
|
lines = assistant_thoughts_plan.split("\n")
|
||||||
|
for line in lines:
|
||||||
|
line = line.lstrip("- ")
|
||||||
|
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||||
|
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||||
|
# Speak the assistant's thoughts
|
||||||
|
if assistant_thoughts_speak:
|
||||||
|
if config.speak_mode:
|
||||||
|
say_text(assistant_thoughts_speak, config)
|
||||||
|
else:
|
||||||
|
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||||
|
|
||||||
|
|
||||||
|
def remove_ansi_escape(s: str) -> str:
|
||||||
|
return s.replace("\x1B", "")
|
||||||
@@ -17,8 +17,7 @@ from autogpt.llm.utils import (
|
|||||||
count_string_tokens,
|
count_string_tokens,
|
||||||
create_chat_completion,
|
create_chat_completion,
|
||||||
)
|
)
|
||||||
from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME
|
from autogpt.logs import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME, logger
|
||||||
from autogpt.logs import logger
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Any, Generator
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from autogpt.log_cycle.log_cycle import LogCycleHandler
|
from autogpt.logs import LogCycleHandler
|
||||||
from autogpt.workspace import Workspace
|
from autogpt.workspace import Workspace
|
||||||
from benchmarks import run_task
|
from benchmarks import run_task
|
||||||
from tests.challenges.schema import Task
|
from tests.challenges.schema import Task
|
||||||
|
|||||||
Reference in New Issue
Block a user