feat(global): errors logs are logged as debug level and activated with program argument 'debug'

This commit is contained in:
Fabrice Hong
2023-04-08 01:05:08 +02:00
parent 0e004f5c14
commit 43c006d71c
5 changed files with 148 additions and 26 deletions

View File

@@ -112,13 +112,20 @@ python scripts/main.py
2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue. 2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue.
3. To exit the program, type "exit" and press Enter. 3. To exit the program, type "exit" and press Enter.
### Logs
You will find activity and error logs in the folder ```./logs```
To output debug logs:
```
python scripts/main.py --debug
```
## 🗣️ Speech Mode ## 🗣️ Speech Mode
Use this to use TTS for Auto-GPT Use this to use TTS for Auto-GPT
``` ```
python scripts/main.py --speak python scripts/main.py --speak
``` ```
## 🔍 Google API Keys Configuration ## 🔍 Google API Keys Configuration

View File

@@ -4,6 +4,8 @@ from dotenv import load_dotenv
from config import Config from config import Config
import token_counter import token_counter
from llm_utils import create_chat_completion from llm_utils import create_chat_completion
from logger import logger
import logging
cfg = Config() cfg = Config()
@@ -64,15 +66,12 @@ def chat_with_ai(
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
if cfg.debug_mode: logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG)
print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10) relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
if cfg.debug_mode: logger.log(content=f'Memory Stats: {permanent_memory.get_stats()}', level=logging.DEBUG)
print('Memory Stats: ', permanent_memory.get_stats())
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
prompt, relevant_memory, full_message_history, model) prompt, relevant_memory, full_message_history, model)
@@ -110,19 +109,17 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context # Debug print the current context
if cfg.debug_mode: logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG)
print(f"Token limit: {token_limit}") logger.log(content=f"Send Token Count: {current_tokens_used}", level=logging.DEBUG)
print(f"Send Token Count: {current_tokens_used}") logger.log(content=f"Tokens remaining for response: {tokens_remaining}", level=logging.DEBUG)
print(f"Tokens remaining for response: {tokens_remaining}") logger.log(content="------------ CONTEXT SENT TO AI ---------------", level=logging.DEBUG)
print("------------ CONTEXT SENT TO AI ---------------") for message in current_context:
for message in current_context: # Skip printing the prompt
# Skip printing the prompt if message["role"] == "system" and message["content"] == prompt:
if message["role"] == "system" and message["content"] == prompt: continue
continue logger.log(content=f"{message['role'].capitalize()}: {message['content']}", level=logging.DEBUG)
print( logger.log(content="", level=logging.DEBUG)
f"{message['role'].capitalize()}: {message['content']}") logger.log(content="----------- END OF CONTEXT ----------------", level=logging.DEBUG)
print()
print("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
assistant_reply = create_chat_completion( assistant_reply = create_chat_completion(

View File

@@ -76,7 +76,6 @@ def fix_and_parse_json(
def fix_json(json_str: str, schema: str) -> str: def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema.""" """Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
# Try to fix the JSON using GPT: # Try to fix the JSON using GPT:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:" function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"] args = [f"'''{json_str}'''", f"'''{schema}'''"]
@@ -92,12 +91,11 @@ def fix_json(json_str: str, schema: str) -> str:
result_string = call_ai_function( result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model function_string, args, description_string, model=cfg.fast_llm_model
) )
if cfg.debug_mode: logger.log(content="------------ JSON FIX ATTEMPT ---------------", level=logging.DEBUG)
print("------------ JSON FIX ATTEMPT ---------------") logger.log(content=f"Original JSON: {json_str}", level=logging.DEBUG)
print(f"Original JSON: {json_str}") logger.log(content="-----------", level=logging.DEBUG)
print("-----------") logger.log(content=f"Fixed JSON: {result_string}", level=logging.DEBUG)
print(f"Fixed JSON: {result_string}") logger.log(content="----------- END OF FIX ATTEMPT ----------------", level=logging.DEBUG)
print("----------- END OF FIX ATTEMPT ----------------")
try: try:
json.loads(result_string) # just check the validity json.loads(result_string) # just check the validity

115
scripts/logger.py Normal file
View File

@@ -0,0 +1,115 @@
import logging
import os
import random
import time
from logging import LogRecord
from colorama import Style
import speak
from config import Config
from config import Singleton
import re
cfg = Config()
class Logger(metaclass=Singleton):
def __init__(self):
# create log directory if it doesn't exist
log_dir = os.path.join('..', 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = "activity.log"
error_file = "error.log"
# Create a handler for INFO level logs
self.console_handler = TypingConsoleHandler()
self.console_handler.setLevel(logging.INFO)
console_formatter = AutoGptFormatter('%(title_color)s %(message)s')
self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
self.file_handler.setLevel(logging.INFO)
info_formatter = AutoGptFormatter('%(asctime)s %(levelname)s %(title)s %(message_no_color)s')
self.file_handler.setFormatter(info_formatter)
# Error handler error.log
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
'%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s')
error_handler.setFormatter(error_formatter)
self.logger = logging.getLogger(__name__)
self.logger.addHandler(self.console_handler)
self.logger.addHandler(self.file_handler)
self.logger.addHandler(error_handler)
def log(
self,
title='',
title_color='',
content='',
speak_text=False,
level=logging.INFO):
if speak_text and cfg.speak_mode:
speak.say_text(f"{title}. {content}")
if content:
if isinstance(content, list):
content = " ".join(content)
else:
content = ""
self.logger.log(level, content, extra={'title': title, 'color': title_color})
def set_level(self, level):
self.logger.setLevel(level)
self.console_handler.setLevel(level)
self.file_handler.setLevel(level)
class TypingConsoleHandler(logging.StreamHandler):
def emit(self, record):
min_typing_speed = 0.05
max_typing_speed = 0.01
msg = self.format(record)
try:
words = msg.split()
for i, word in enumerate(words):
print(word, end="", flush=True)
if i < len(words) - 1:
print(" ", end="", flush=True)
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
time.sleep(typing_speed)
# type faster after each word
min_typing_speed = min_typing_speed * 0.95
max_typing_speed = max_typing_speed * 0.95
print()
except Exception:
self.handleError(record)
class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str:
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL
if hasattr(record, 'msg'):
record.message_no_color = remove_color_codes(getattr(record, 'msg'))
else:
record.message_no_color = ''
return super().format(record)
def remove_color_codes(s: str) -> str:
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
return ansi_escape.sub('', s)
logger = Logger()

View File

@@ -313,6 +313,10 @@ def parse_arguments():
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode') parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
args = parser.parse_args() args = parser.parse_args()
if args.debug:
logger.log("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.continuous: if args.continuous:
print_to_console("Continuous Mode: ", Fore.RED, "ENABLED") print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
print_to_console( print_to_console(
@@ -343,6 +347,7 @@ check_openai_api_key()
cfg = Config() cfg = Config()
logger = configure_logging() logger = configure_logging()
parse_arguments() parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = "" ai_name = ""
prompt = construct_prompt() prompt = construct_prompt()
# print(prompt) # print(prompt)