fix(logger): fix typewriter simulation doesn't work well with Typing spinner

This commit is contained in:
Fabrice Hong
2023-04-12 14:39:54 +02:00
parent 1f5049a9c4
commit 862d44ea39
5 changed files with 117 additions and 97 deletions

View File

@@ -66,12 +66,12 @@ def chat_with_ai(
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG)
logger.debug(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
logger.log(content=f'Memory Stats: {permanent_memory.get_stats()}', level=logging.DEBUG)
logger.debug(f'Memory Stats: {permanent_memory.get_stats()}')
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
prompt, relevant_memory, full_message_history, model)
@@ -109,17 +109,17 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context
logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG)
logger.log(content=f"Send Token Count: {current_tokens_used}", level=logging.DEBUG)
logger.log(content=f"Tokens remaining for response: {tokens_remaining}", level=logging.DEBUG)
logger.log(content="------------ CONTEXT SENT TO AI ---------------", level=logging.DEBUG)
logger.debug(f"Token limit: {token_limit}")
logger.debug(f"Send Token Count: {current_tokens_used}")
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
logger.debug("------------ CONTEXT SENT TO AI ---------------")
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
logger.log(content=f"{message['role'].capitalize()}: {message['content']}", level=logging.DEBUG)
logger.log(content="", level=logging.DEBUG)
logger.log(content="----------- END OF CONTEXT ----------------", level=logging.DEBUG)
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
logger.debug("")
logger.debug("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
assistant_reply = create_chat_completion(