fix(logger): fix typewriter simulation doesn't work well with Typing spinner

This commit is contained in:
Fabrice Hong
2023-04-12 14:39:54 +02:00
parent 1f5049a9c4
commit 862d44ea39
5 changed files with 117 additions and 97 deletions

View File

@@ -66,12 +66,12 @@ def chat_with_ai(
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG) logger.debug(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10) relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
logger.log(content=f'Memory Stats: {permanent_memory.get_stats()}', level=logging.DEBUG) logger.debug(f'Memory Stats: {permanent_memory.get_stats()}')
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
prompt, relevant_memory, full_message_history, model) prompt, relevant_memory, full_message_history, model)
@@ -109,17 +109,17 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context # Debug print the current context
logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG) logger.debug(f"Token limit: {token_limit}")
logger.log(content=f"Send Token Count: {current_tokens_used}", level=logging.DEBUG) logger.debug(f"Send Token Count: {current_tokens_used}")
logger.log(content=f"Tokens remaining for response: {tokens_remaining}", level=logging.DEBUG) logger.debug(f"Tokens remaining for response: {tokens_remaining}")
logger.log(content="------------ CONTEXT SENT TO AI ---------------", level=logging.DEBUG) logger.debug("------------ CONTEXT SENT TO AI ---------------")
for message in current_context: for message in current_context:
# Skip printing the prompt # Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt: if message["role"] == "system" and message["content"] == prompt:
continue continue
logger.log(content=f"{message['role'].capitalize()}: {message['content']}", level=logging.DEBUG) logger.debug(f"{message['role'].capitalize()}: {message['content']}")
logger.log(content="", level=logging.DEBUG) logger.debug("")
logger.log(content="----------- END OF CONTEXT ----------------", level=logging.DEBUG) logger.debug("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
assistant_reply = create_chat_completion( assistant_reply = create_chat_completion(

View File

@@ -3,6 +3,7 @@ from typing import Any, Dict, Union
from call_ai_function import call_ai_function from call_ai_function import call_ai_function
from config import Config from config import Config
from json_utils import correct_json from json_utils import correct_json
from logger import logger
cfg = Config() cfg = Config()
@@ -56,7 +57,7 @@ def fix_and_parse_json(
# Can throw a ValueError if there is no "{" or "}" in the json_str # Can throw a ValueError if there is no "{" or "}" in the json_str
except (json.JSONDecodeError, ValueError) as e: # noqa: F841 except (json.JSONDecodeError, ValueError) as e: # noqa: F841
if try_to_fix_with_gpt: if try_to_fix_with_gpt:
print("Warning: Failed to parse AI output, attempting to fix." logger.warn("Warning: Failed to parse AI output, attempting to fix."
"\n If you see this warning frequently, it's likely that" "\n If you see this warning frequently, it's likely that"
" your prompt is confusing the AI. Try changing it up" " your prompt is confusing the AI. Try changing it up"
" slightly.") " slightly.")
@@ -68,7 +69,7 @@ def fix_and_parse_json(
else: else:
# This allows the AI to react to the error message, # This allows the AI to react to the error message,
# which usually results in it correcting its ways. # which usually results in it correcting its ways.
print("Failed to fix AI output, telling the AI.") logger.error("Failed to fix AI output, telling the AI.")
return json_str return json_str
else: else:
raise e raise e
@@ -91,11 +92,11 @@ def fix_json(json_str: str, schema: str) -> str:
result_string = call_ai_function( result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model function_string, args, description_string, model=cfg.fast_llm_model
) )
logger.log(content="------------ JSON FIX ATTEMPT ---------------", level=logging.DEBUG) logger.debug("------------ JSON FIX ATTEMPT ---------------")
logger.log(content=f"Original JSON: {json_str}", level=logging.DEBUG) logger.debug(f"Original JSON: {json_str}")
logger.log(content="-----------", level=logging.DEBUG) logger.debug("-----------")
logger.log(content=f"Fixed JSON: {result_string}", level=logging.DEBUG) logger.debug(f"Fixed JSON: {result_string}")
logger.log(content="----------- END OF FIX ATTEMPT ----------------", level=logging.DEBUG) logger.debug("----------- END OF FIX ATTEMPT ----------------")
try: try:
json.loads(result_string) # just check the validity json.loads(result_string) # just check the validity

View File

@@ -1,8 +1,10 @@
import logging import logging
import os import os
import random import random
import re
import time import time
from logging import LogRecord from logging import LogRecord
from colorama import Fore
from colorama import Style from colorama import Style
@@ -10,8 +12,6 @@ import speak
from config import Config from config import Config
from config import Singleton from config import Singleton
import re
cfg = Config() cfg = Config()
''' '''
@@ -31,10 +31,16 @@ class Logger(metaclass=Singleton):
log_file = "activity.log" log_file = "activity.log"
error_file = "error.log" error_file = "error.log"
# Create a handler for INFO level logs
self.console_handler = TypingConsoleHandler()
self.console_handler.setLevel(logging.INFO)
console_formatter = AutoGptFormatter('%(title_color)s %(message)s') console_formatter = AutoGptFormatter('%(title_color)s %(message)s')
# Create a handler for console which simulate typing
self.typing_console_handler = TypingConsoleHandler()
self.typing_console_handler.setLevel(logging.INFO)
self.typing_console_handler.setFormatter(console_formatter)
# Create a handler for console without typing simulation
self.console_handler = ConsoleHandler()
self.console_handler.setLevel(logging.DEBUG)
self.console_handler.setFormatter(console_formatter) self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log # Info handler in activity.log
@@ -50,10 +56,17 @@ class Logger(metaclass=Singleton):
'%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s') '%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s')
error_handler.setFormatter(error_formatter) error_handler.setFormatter(error_formatter)
self.logger = logging.getLogger(__name__) self.typing_logger = logging.getLogger('TYPER')
self.typing_logger.addHandler(self.typing_console_handler)
self.typing_logger.addHandler(self.file_handler)
self.typing_logger.addHandler(error_handler)
self.typing_logger.setLevel(logging.DEBUG)
self.logger = logging.getLogger('LOGGER')
self.logger.addHandler(self.console_handler) self.logger.addHandler(self.console_handler)
self.logger.addHandler(self.file_handler) self.logger.addHandler(self.file_handler)
self.logger.addHandler(error_handler) self.logger.addHandler(error_handler)
self.logger.setLevel(logging.DEBUG)
def log( def log(
self, self,
@@ -71,12 +84,45 @@ class Logger(metaclass=Singleton):
else: else:
content = "" content = ""
self.logger.log(level, content, extra={'title': title, 'color': title_color}) self.typing_logger.log(level, content, extra={'title': title, 'color': title_color})
def debug(
self,
message,
title='',
title_color='',
):
self._logs(title, title_color, message, logging.DEBUG)
def warn(
self,
message,
title='',
title_color='',
):
self._logs(title, title_color, message, logging.WARN)
def error(
self,
title,
message=''
):
self._logs(title, Fore.RED, message, logging.ERROR)
def _logs(
self,
title='',
title_color='',
message='',
level=logging.INFO):
if message:
if isinstance(message, list):
message = " ".join(message)
self.logger.log(level, message, extra={'title': title, 'color': title_color})
def set_level(self, level): def set_level(self, level):
self.logger.setLevel(level) self.logger.setLevel(level)
self.console_handler.setLevel(level) self.typing_logger.setLevel(level)
self.file_handler.setLevel(level)
''' '''
@@ -105,6 +151,13 @@ class TypingConsoleHandler(logging.StreamHandler):
except Exception: except Exception:
self.handleError(record) self.handleError(record)
class ConsoleHandler(logging.StreamHandler):
def emit(self, record):
msg = self.format(record)
try:
print(msg)
except Exception:
self.handleError(record)
''' '''
Allows to handle custom placeholders 'title_color' and 'message_no_color'. Allows to handle custom placeholders 'title_color' and 'message_no_color'.
@@ -114,7 +167,10 @@ To use this formatter, make sure to pass 'color', 'title' as log extras.
class AutoGptFormatter(logging.Formatter): class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str: def format(self, record: LogRecord) -> str:
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL if (hasattr(record, 'color')):
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL
else:
record.title_color = getattr(record, 'title')
if hasattr(record, 'msg'): if hasattr(record, 'msg'):
record.message_no_color = remove_color_codes(getattr(record, 'msg')) record.message_no_color = remove_color_codes(getattr(record, 'msg'))
else: else:

View File

@@ -15,18 +15,11 @@ from ai_config import AIConfig
import traceback import traceback
import yaml import yaml
import argparse import argparse
from logger import logger
import logging import logging
cfg = Config() cfg = Config()
def configure_logging():
logging.basicConfig(filename='log.txt',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
return logging.getLogger('AutoGPT')
def check_openai_api_key(): def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable.""" """Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key: if not cfg.openai_api_key:
@@ -37,39 +30,10 @@ def check_openai_api_key():
print("You can get your key from https://beta.openai.com/account/api-keys") print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1) exit(1)
def print_to_console(
title,
title_color,
content,
speak_text=False,
min_typing_speed=0.05,
max_typing_speed=0.01):
"""Prints text to the console with a typing effect"""
global cfg
global logger
if speak_text and cfg.speak_mode:
speak.say_text(f"{title}. {content}")
print(title_color + title + " " + Style.RESET_ALL, end="")
if content:
logger.info(title + ': ' + content)
if isinstance(content, list):
content = " ".join(content)
words = content.split()
for i, word in enumerate(words):
print(word, end="", flush=True)
if i < len(words) - 1:
print(" ", end="", flush=True)
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
time.sleep(typing_speed)
# type faster after each word
min_typing_speed = min_typing_speed * 0.95
max_typing_speed = max_typing_speed * 0.95
print()
def attempt_to_fix_json_by_finding_outermost_brackets(json_string): def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode: if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
print_to_console("Attempting to fix JSON by finding outermost brackets\n", Fore.RED, "") logger.debug(title="Attempting to fix JSON by finding outermost brackets\n", title_color=Fore.RED)
try: try:
# Use regex to search for JSON objects # Use regex to search for JSON objects
@@ -80,7 +44,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if json_match: if json_match:
# Extract the valid JSON object from the string # Extract the valid JSON object from the string
json_string = json_match.group(0) json_string = json_match.group(0)
print_to_console("Apparently json was fixed.", Fore.GREEN,"") logger.debug(title="Apparently json was fixed.", title_color=Fore.GREEN)
if cfg.speak_mode and cfg.debug_mode: if cfg.speak_mode and cfg.debug_mode:
speak.say_text("Apparently json was fixed.") speak.say_text("Apparently json was fixed.")
else: else:
@@ -89,7 +53,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
except (json.JSONDecodeError, ValueError) as e: except (json.JSONDecodeError, ValueError) as e:
if cfg.speak_mode: if cfg.speak_mode:
speak.say_text("Didn't work. I will have to ignore this response then.") speak.say_text("Didn't work. I will have to ignore this response then.")
print_to_console("Error: Invalid JSON, setting it to empty JSON now.\n", Fore.RED, "") logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {} json_string = {}
return json_string return json_string
@@ -103,7 +67,7 @@ def print_assistant_thoughts(assistant_reply):
# Parse and print Assistant response # Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply) assistant_reply_json = fix_and_parse_json(assistant_reply)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply) logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
assistant_reply_json = fix_and_parse_json(assistant_reply_json) assistant_reply_json = fix_and_parse_json(assistant_reply_json)
@@ -112,7 +76,7 @@ def print_assistant_thoughts(assistant_reply):
try: try:
assistant_reply_json = json.loads(assistant_reply_json) assistant_reply_json = json.loads(assistant_reply_json)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply) logger.error("Error: Invalid JSON\n", assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json) assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json)
assistant_thoughts_reasoning = None assistant_thoughts_reasoning = None
@@ -128,11 +92,11 @@ def print_assistant_thoughts(assistant_reply):
assistant_thoughts_criticism = assistant_thoughts.get("criticism") assistant_thoughts_criticism = assistant_thoughts.get("criticism")
assistant_thoughts_speak = assistant_thoughts.get("speak") assistant_thoughts_speak = assistant_thoughts.get("speak")
print_to_console(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) logger.log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
print_to_console("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) logger.log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
if assistant_thoughts_plan: if assistant_thoughts_plan:
print_to_console("PLAN:", Fore.YELLOW, "") logger.log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string # If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list): if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
@@ -143,23 +107,23 @@ def print_assistant_thoughts(assistant_reply):
lines = assistant_thoughts_plan.split('\n') lines = assistant_thoughts_plan.split('\n')
for line in lines: for line in lines:
line = line.lstrip("- ") line = line.lstrip("- ")
print_to_console("- ", Fore.GREEN, line.strip()) logger.log("- ", Fore.GREEN, line.strip())
print_to_console("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) logger.log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
# Speak the assistant's thoughts # Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak: if cfg.speak_mode and assistant_thoughts_speak:
speak.say_text(assistant_thoughts_speak) speak.say_text(assistant_thoughts_speak)
return assistant_reply_json return assistant_reply_json
except json.decoder.JSONDecodeError as e: except json.decoder.JSONDecodeError as e:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply) logger.error("Error: Invalid JSON\n", assistant_reply)
if cfg.speak_mode: if cfg.speak_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.") speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.")
# All other errors, return "Error: + error message" # All other errors, return "Error: + error message"
except Exception as e: except Exception as e:
call_stack = traceback.format_exc() call_stack = traceback.format_exc()
print_to_console("Error: \n", Fore.RED, call_stack) logger.error("Error: \n", call_stack)
def load_variables(config_file="config.yaml"): def load_variables(config_file="config.yaml"):
@@ -220,7 +184,7 @@ def construct_prompt():
"""Construct the prompt for the AI to respond to""" """Construct the prompt for the AI to respond to"""
config = AIConfig.load() config = AIConfig.load()
if config.ai_name: if config.ai_name:
print_to_console( logger.log(
f"Welcome back! ", f"Welcome back! ",
Fore.GREEN, Fore.GREEN,
f"Would you like me to return to being {config.ai_name}?", f"Would you like me to return to being {config.ai_name}?",
@@ -249,14 +213,14 @@ def prompt_user():
"""Prompt the user for input""" """Prompt the user for input"""
ai_name = "" ai_name = ""
# Construct the prompt # Construct the prompt
print_to_console( logger.log(
"Welcome to Auto-GPT! ", "Welcome to Auto-GPT! ",
Fore.GREEN, Fore.GREEN,
"Enter the name of your AI and its role below. Entering nothing will load defaults.", "Enter the name of your AI and its role below. Entering nothing will load defaults.",
speak_text=True) speak_text=True)
# Get AI Name from User # Get AI Name from User
print_to_console( logger.log(
"Name your AI: ", "Name your AI: ",
Fore.GREEN, Fore.GREEN,
"For example, 'Entrepreneur-GPT'") "For example, 'Entrepreneur-GPT'")
@@ -264,14 +228,14 @@ def prompt_user():
if ai_name == "": if ai_name == "":
ai_name = "Entrepreneur-GPT" ai_name = "Entrepreneur-GPT"
print_to_console( logger.log(
f"{ai_name} here!", f"{ai_name} here!",
Fore.LIGHTBLUE_EX, Fore.LIGHTBLUE_EX,
"I am at your service.", "I am at your service.",
speak_text=True) speak_text=True)
# Get AI Role from User # Get AI Role from User
print_to_console( logger.log(
"Describe your AI's role: ", "Describe your AI's role: ",
Fore.GREEN, Fore.GREEN,
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'") "For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
@@ -280,7 +244,7 @@ def prompt_user():
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth." ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
# Enter up to 5 goals for the AI # Enter up to 5 goals for the AI
print_to_console( logger.log(
"Enter up to 5 goals for your AI: ", "Enter up to 5 goals for your AI: ",
Fore.GREEN, Fore.GREEN,
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'") "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
@@ -318,34 +282,33 @@ def parse_arguments():
cfg.set_debug_mode(True) cfg.set_debug_mode(True)
if args.continuous: if args.continuous:
print_to_console("Continuous Mode: ", Fore.RED, "ENABLED") logger.log("Continuous Mode: ", Fore.RED, "ENABLED")
print_to_console( logger.log(
"WARNING: ", "WARNING: ",
Fore.RED, Fore.RED,
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.") "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
cfg.set_continuous_mode(True) cfg.set_continuous_mode(True)
if args.speak: if args.speak:
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") logger.log("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True) cfg.set_speak_mode(True)
if args.gpt3only: if args.gpt3only:
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") logger.log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model) cfg.set_smart_llm_model(cfg.fast_llm_model)
if args.gpt4only: if args.gpt4only:
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") logger.log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_fast_llm_model(cfg.smart_llm_model) cfg.set_fast_llm_model(cfg.smart_llm_model)
if args.debug: if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") logger.log("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True) cfg.set_debug_mode(True)
# TODO: fill in llm values here # TODO: fill in llm values here
check_openai_api_key() check_openai_api_key()
cfg = Config() cfg = Config()
logger = configure_logging()
parse_arguments() parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = "" ai_name = ""
@@ -383,14 +346,14 @@ while True:
if cfg.speak_mode: if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}") speak.say_text(f"I want to execute {command_name}")
except Exception as e: except Exception as e:
print_to_console("Error: \n", Fore.RED, str(e)) logger.error("Error: \n", str(e))
if not cfg.continuous_mode and next_action_count == 0: if not cfg.continuous_mode and next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ### ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape # Get key press: Prompt the user to press enter to continue or escape
# to exit # to exit
user_input = "" user_input = ""
print_to_console( logger.log(
"NEXT ACTION: ", "NEXT ACTION: ",
Fore.CYAN, Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
@@ -419,7 +382,7 @@ while True:
break break
if user_input == "GENERATE NEXT COMMAND JSON": if user_input == "GENERATE NEXT COMMAND JSON":
print_to_console( logger.log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA, Fore.MAGENTA,
"") "")
@@ -428,7 +391,7 @@ while True:
break break
else: else:
# Print command # Print command
print_to_console( logger.log(
"NEXT ACTION: ", "NEXT ACTION: ",
Fore.CYAN, Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
@@ -453,9 +416,9 @@ while True:
# history # history
if result is not None: if result is not None:
full_message_history.append(chat.create_chat_message("system", result)) full_message_history.append(chat.create_chat_message("system", result))
print_to_console("SYSTEM: ", Fore.YELLOW, result) logger.log("SYSTEM: ", Fore.YELLOW, result)
else: else:
full_message_history.append( full_message_history.append(
chat.create_chat_message( chat.create_chat_message(
"system", "Unable to execute command")) "system", "Unable to execute command"))
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") logger.log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")

View File

@@ -15,7 +15,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
try: try:
encoding = tiktoken.encoding_for_model(model) encoding = tiktoken.encoding_for_model(model)
except KeyError: except KeyError:
print("Warning: model not found. Using cl100k_base encoding.") logger.warn("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base") encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo": if model == "gpt-3.5-turbo":
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.") # !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")