From 7bf39cbb72a838fae5fc2ac70f6b8a5cf421e6ca Mon Sep 17 00:00:00 2001 From: Kinance Date: Tue, 13 Jun 2023 08:29:11 +0900 Subject: [PATCH] Include the token length of the current summary (#4670) Co-authored-by: merwanehamadi --- autogpt/memory/message_history.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/autogpt/memory/message_history.py b/autogpt/memory/message_history.py index 7f307536..f4a2217f 100644 --- a/autogpt/memory/message_history.py +++ b/autogpt/memory/message_history.py @@ -174,6 +174,7 @@ class MessageHistory: # TODO make this default dynamic prompt_template_length = 100 max_tokens = OPEN_AI_CHAT_MODELS.get(cfg.fast_llm_model).max_tokens + summary_tlength = count_string_tokens(str(self.summary), cfg.fast_llm_model) batch = [] batch_tlength = 0 @@ -181,9 +182,15 @@ class MessageHistory: for event in new_events: event_tlength = count_string_tokens(str(event), cfg.fast_llm_model) - if batch_tlength + event_tlength > max_tokens - prompt_template_length: + if ( + batch_tlength + event_tlength + > max_tokens - prompt_template_length - summary_tlength + ): # The batch is full. Summarize it and start a new one. self.summarize_batch(batch, cfg) + summary_tlength = count_string_tokens( + str(self.summary), cfg.fast_llm_model + ) batch = [event] batch_tlength = event_tlength else: