From 09a5b3149d921cb18094be5c36aa27e25e9f06b4 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Sat, 15 Apr 2023 23:01:01 -0500 Subject: [PATCH] Add on_planning hook. --- autogpt/agent/agent.py | 1 + autogpt/chat.py | 24 ++++++++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 55690119..934af42c 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -65,6 +65,7 @@ class Agent: # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( + self, self.prompt, self.user_input, self.full_message_history, diff --git a/autogpt/chat.py b/autogpt/chat.py index b0886967..7f63bd9e 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -51,7 +51,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model): # TODO: Change debug from hardcode to argument def chat_with_ai( - prompt, user_input, full_message_history, permanent_memory, token_limit + agent, prompt, user_input, full_message_history, permanent_memory, token_limit ): """Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.""" @@ -109,7 +109,7 @@ def chat_with_ai( current_tokens_used += token_counter.count_message_tokens( [create_chat_message("user", user_input)], model ) # Account for user input (appended later) - + while next_message_to_add_index >= 0: # print (f"CURRENT TOKENS USED: {current_tokens_used}") message_to_add = full_message_history[next_message_to_add_index] @@ -135,6 +135,26 @@ def chat_with_ai( # Append user input, the length of this is accounted for above current_context.extend([create_chat_message("user", user_input)]) + plugin_count = len(cfg.plugins) + for i, plugin in enumerate(cfg.plugins): + plugin_response = plugin.on_planning( + agent.prompt_generator, current_context + ) + if not plugin_response or plugin_response == "": + continue + tokens_to_add = token_counter.count_message_tokens( + [plugin_response], model + ) + if current_tokens_used + tokens_to_add > send_token_limit: + if cfg.debug_mode: + print("Plugin response too long, skipping:", + plugin_response) + print("Plugins remaining at stop:", plugin_count - i) + break + current_context.append( + create_chat_message("system", plugin_response) + ) + # Calculate remaining tokens tokens_remaining = token_limit - current_tokens_used # assert tokens_remaining >= 0, "Tokens remaining is negative.