From 1d26f6b69720a561237f4d94d08bd9fae232aebb Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 2 May 2023 02:48:27 +0200 Subject: [PATCH] Add warning for LLM to avoid context overflow (#3646) --- autogpt/agent/agent.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index bd105932..dbae1198 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -5,6 +5,7 @@ from autogpt.config import Config from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message +from autogpt.llm.token_counter import count_string_tokens from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner @@ -233,6 +234,16 @@ class Agent: ) result = f"Command {command_name} returned: " f"{command_result}" + result_tlength = count_string_tokens( + str(command_result), cfg.fast_llm_model + ) + memory_tlength = count_string_tokens( + str(self.summary_memory), cfg.fast_llm_model + ) + if result_tlength + memory_tlength + 600 > cfg.fast_token_limit: + result = f"Failure: command {command_name} returned too much output. \ + Do not execute this command again with the same arguments." + for plugin in cfg.plugins: if not plugin.can_handle_post_command(): continue