Add pre_instruction and on_instruction hooks.

This commit is contained in:
BillSchumacher
2023-04-15 23:45:16 -05:00
parent 09a5b3149d
commit ee42b4d06c
2 changed files with 28 additions and 12 deletions

View File

@@ -1,7 +1,7 @@
"""Agent manager for managing GPT agents""" """Agent manager for managing GPT agents"""
from typing import List, Tuple, Union from typing import List, Tuple, Union
from autogpt.llm_utils import create_chat_completion from autogpt.llm_utils import create_chat_completion
from autogpt.config.config import Singleton from autogpt.config.config import Singleton, Config
class AgentManager(metaclass=Singleton): class AgentManager(metaclass=Singleton):
@@ -10,6 +10,7 @@ class AgentManager(metaclass=Singleton):
def __init__(self): def __init__(self):
self.next_key = 0 self.next_key = 0
self.agents = {} # key, (task, full_message_history, model) self.agents = {} # key, (task, full_message_history, model)
self.cfg = Config()
# Create new GPT agent # Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit # TODO: Centralise use of create_chat_completion() to globally enforce token limit
@@ -28,6 +29,10 @@ class AgentManager(metaclass=Singleton):
messages = [ messages = [
{"role": "user", "content": prompt}, {"role": "user", "content": prompt},
] ]
for plugin in self.cfg.plugins:
plugin_messages = plugin.pre_instruction(messages)
if plugin_messages:
messages.extend(plugin_messages)
# Start GPT instance # Start GPT instance
agent_reply = create_chat_completion( agent_reply = create_chat_completion(
@@ -35,9 +40,13 @@ class AgentManager(metaclass=Singleton):
messages=messages, messages=messages,
) )
# Update full message history plugins_reply = agent_reply
messages.append({"role": "assistant", "content": agent_reply}) for plugin in self.cfg.plugins:
plugin_result = plugin.on_instruction(messages)
if plugin_result:
plugins_reply = f"{plugins_reply}\n{plugin_result}"
messages.append({"role": "assistant", "content": plugins_reply})
key = self.next_key key = self.next_key
# This is done instead of len(agents) to make keys unique even if agents # This is done instead of len(agents) to make keys unique even if agents
# are deleted # are deleted
@@ -45,7 +54,7 @@ class AgentManager(metaclass=Singleton):
self.agents[key] = (task, messages, model) self.agents[key] = (task, messages, model)
return key, agent_reply return key, plugins_reply
def message_agent(self, key: Union[str, int], message: str) -> str: def message_agent(self, key: Union[str, int], message: str) -> str:
"""Send a message to an agent and return its response """Send a message to an agent and return its response
@@ -62,16 +71,26 @@ class AgentManager(metaclass=Singleton):
# Add user message to message history before sending to agent # Add user message to message history before sending to agent
messages.append({"role": "user", "content": message}) messages.append({"role": "user", "content": message})
for plugin in self.cfg.plugins:
plugin_messages = plugin.pre_instruction(messages)
if plugin_messages:
messages.extend(plugin_messages)
# Start GPT instance # Start GPT instance
agent_reply = create_chat_completion( agent_reply = create_chat_completion(
model=model, model=model,
messages=messages, messages=messages,
) )
plugins_reply = agent_reply
for plugin in self.cfg.plugins:
plugin_result = plugin.on_instruction(messages)
if plugin_result:
plugins_reply = f"{plugins_reply}\n{plugin_result}"
# Update full message history # Update full message history
messages.append({"role": "assistant", "content": agent_reply}) messages.append({"role": "assistant", "content": plugins_reply})
return agent_reply return plugins_reply
def list_agents(self) -> List[Tuple[Union[str, int], str]]: def list_agents(self) -> List[Tuple[Union[str, int], str]]:
"""Return a list of all agents """Return a list of all agents

View File

@@ -109,7 +109,7 @@ def chat_with_ai(
current_tokens_used += token_counter.count_message_tokens( current_tokens_used += token_counter.count_message_tokens(
[create_chat_message("user", user_input)], model [create_chat_message("user", user_input)], model
) # Account for user input (appended later) ) # Account for user input (appended later)
while next_message_to_add_index >= 0: while next_message_to_add_index >= 0:
# print (f"CURRENT TOKENS USED: {current_tokens_used}") # print (f"CURRENT TOKENS USED: {current_tokens_used}")
message_to_add = full_message_history[next_message_to_add_index] message_to_add = full_message_history[next_message_to_add_index]
@@ -147,13 +147,10 @@ def chat_with_ai(
) )
if current_tokens_used + tokens_to_add > send_token_limit: if current_tokens_used + tokens_to_add > send_token_limit:
if cfg.debug_mode: if cfg.debug_mode:
print("Plugin response too long, skipping:", print("Plugin response too long, skipping:", plugin_response)
plugin_response)
print("Plugins remaining at stop:", plugin_count - i) print("Plugins remaining at stop:", plugin_count - i)
break break
current_context.append( current_context.append(create_chat_message("system", plugin_response))
create_chat_message("system", plugin_response)
)
# Calculate remaining tokens # Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used tokens_remaining = token_limit - current_tokens_used