From ee42b4d06c2bd7aa99e77edda00e9bfb9f5e327f Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Sat, 15 Apr 2023 23:45:16 -0500 Subject: [PATCH] Add pre_instruction and on_instruction hooks. --- autogpt/agent/agent_manager.py | 31 +++++++++++++++++++++++++------ autogpt/chat.py | 9 +++------ 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 3467f8bf..32473750 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,7 +1,7 @@ """Agent manager for managing GPT agents""" from typing import List, Tuple, Union from autogpt.llm_utils import create_chat_completion -from autogpt.config.config import Singleton +from autogpt.config.config import Singleton, Config class AgentManager(metaclass=Singleton): @@ -10,6 +10,7 @@ class AgentManager(metaclass=Singleton): def __init__(self): self.next_key = 0 self.agents = {} # key, (task, full_message_history, model) + self.cfg = Config() # Create new GPT agent # TODO: Centralise use of create_chat_completion() to globally enforce token limit @@ -28,6 +29,10 @@ class AgentManager(metaclass=Singleton): messages = [ {"role": "user", "content": prompt}, ] + for plugin in self.cfg.plugins: + plugin_messages = plugin.pre_instruction(messages) + if plugin_messages: + messages.extend(plugin_messages) # Start GPT instance agent_reply = create_chat_completion( @@ -35,9 +40,13 @@ class AgentManager(metaclass=Singleton): messages=messages, ) - # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) + plugins_reply = agent_reply + for plugin in self.cfg.plugins: + plugin_result = plugin.on_instruction(messages) + if plugin_result: + plugins_reply = f"{plugins_reply}\n{plugin_result}" + messages.append({"role": "assistant", "content": plugins_reply}) key = self.next_key # This is done instead of len(agents) to make keys unique even if agents # are deleted @@ -45,7 +54,7 @@ class AgentManager(metaclass=Singleton): self.agents[key] = (task, messages, model) - return key, agent_reply + return key, plugins_reply def message_agent(self, key: Union[str, int], message: str) -> str: """Send a message to an agent and return its response @@ -62,16 +71,26 @@ class AgentManager(metaclass=Singleton): # Add user message to message history before sending to agent messages.append({"role": "user", "content": message}) + for plugin in self.cfg.plugins: + plugin_messages = plugin.pre_instruction(messages) + if plugin_messages: + messages.extend(plugin_messages) + # Start GPT instance agent_reply = create_chat_completion( model=model, messages=messages, ) + plugins_reply = agent_reply + for plugin in self.cfg.plugins: + plugin_result = plugin.on_instruction(messages) + if plugin_result: + plugins_reply = f"{plugins_reply}\n{plugin_result}" # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) + messages.append({"role": "assistant", "content": plugins_reply}) - return agent_reply + return plugins_reply def list_agents(self) -> List[Tuple[Union[str, int], str]]: """Return a list of all agents diff --git a/autogpt/chat.py b/autogpt/chat.py index 7f63bd9e..2844251f 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -109,7 +109,7 @@ def chat_with_ai( current_tokens_used += token_counter.count_message_tokens( [create_chat_message("user", user_input)], model ) # Account for user input (appended later) - + while next_message_to_add_index >= 0: # print (f"CURRENT TOKENS USED: {current_tokens_used}") message_to_add = full_message_history[next_message_to_add_index] @@ -147,13 +147,10 @@ def chat_with_ai( ) if current_tokens_used + tokens_to_add > send_token_limit: if cfg.debug_mode: - print("Plugin response too long, skipping:", - plugin_response) + print("Plugin response too long, skipping:", plugin_response) print("Plugins remaining at stop:", plugin_count - i) break - current_context.append( - create_chat_message("system", plugin_response) - ) + current_context.append(create_chat_message("system", plugin_response)) # Calculate remaining tokens tokens_remaining = token_limit - current_tokens_used