Fix flake8 W293 and W391

This commit is contained in:
Andy Melnikov
2023-04-10 18:46:40 +02:00
parent 5a6e565c52
commit d12da33e55
5 changed files with 6 additions and 8 deletions

View File

@@ -63,10 +63,10 @@ def chat_with_ai(
"""
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
if cfg.debug:
print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)