Remove dead agent manager (#4900)

* Remove dead agent manager

---------

Co-authored-by: Reinier van der Leer <github@pwuts.nl>
This commit is contained in:
James Collins
2023-07-08 14:50:12 -07:00
committed by GitHub
parent a38d400207
commit 89c1f2d0c0
4 changed files with 1 additions and 218 deletions

View File

@@ -1,4 +1,3 @@
from autogpt.agent.agent import Agent
from autogpt.agent.agent_manager import AgentManager
__all__ = ["Agent", "AgentManager"]
__all__ = ["Agent"]

View File

@@ -1,145 +0,0 @@
"""Agent manager for managing GPT agents"""
from __future__ import annotations
from autogpt.config import Config
from autogpt.llm.base import ChatSequence
from autogpt.llm.chat import Message, create_chat_completion
from autogpt.singleton import Singleton
class AgentManager(metaclass=Singleton):
"""Agent manager for managing GPT agents"""
def __init__(self, config: Config):
self.next_key = 0
self.agents: dict[
int, tuple[str, list[Message], str]
] = {} # key, (task, full_message_history, model)
self.config = config
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(
self, task: str, creation_prompt: str, model: str
) -> tuple[int, str]:
"""Create a new agent and return its key
Args:
task: The task to perform
creation_prompt: Prompt passed to the LLM at creation
model: The model to use to run this agent
Returns:
The key of the new agent
"""
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
for plugin in self.config.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction(messages.raw()):
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
# Start GPT instance
agent_reply = create_chat_completion(
prompt=messages, config=self.config
).content
messages.add("assistant", agent_reply)
plugins_reply = ""
for i, plugin in enumerate(self.config.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
sep = "\n" if i else ""
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
if plugins_reply and plugins_reply != "":
messages.add("assistant", plugins_reply)
key = self.next_key
# This is done instead of len(agents) to make keys unique even if agents
# are deleted
self.next_key += 1
self.agents[key] = (task, list(messages), model)
for plugin in self.config.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)
return key, agent_reply
def message_agent(self, key: str | int, message: str) -> str:
"""Send a message to an agent and return its response
Args:
key: The key of the agent to message
message: The message to send to the agent
Returns:
The agent's response
"""
task, messages, model = self.agents[int(key)]
# Add user message to message history before sending to agent
messages = ChatSequence.for_model(model, messages)
messages.add("user", message)
for plugin in self.config.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
# Start GPT instance
agent_reply = create_chat_completion(
prompt=messages, config=self.config
).content
messages.add("assistant", agent_reply)
plugins_reply = agent_reply
for i, plugin in enumerate(self.config.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
sep = "\n" if i else ""
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
# Update full message history
if plugins_reply and plugins_reply != "":
messages.add("assistant", plugins_reply)
for plugin in self.config.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)
return agent_reply
def list_agents(self) -> list[tuple[str | int, str]]:
"""Return a list of all agents
Returns:
A list of tuples of the form (key, task)
"""
# Return a list of agent keys and their tasks
return [(key, task) for key, (task, _, _) in self.agents.items()]
def delete_agent(self, key: str | int) -> bool:
"""Delete an agent from the agent manager
Args:
key: The key of the agent to delete
Returns:
True if successful, False otherwise
"""
try:
del self.agents[int(key)]
return True
except KeyError:
return False

View File

@@ -7,7 +7,6 @@ constraints: [
resources: [
'Internet access for searches and information gathering.',
'Long Term memory management.',
'GPT-3.5 powered Agents for delegation of simple tasks.',
'File output.'
]
performance_evaluations: [

View File

@@ -1,70 +0,0 @@
import pytest
from autogpt.agent.agent_manager import AgentManager
from autogpt.llm import ChatModelResponse
from autogpt.llm.chat import create_chat_completion
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
@pytest.fixture
def agent_manager(config):
# Hack, real gross. Singletons are not good times.
yield AgentManager(config)
del AgentManager._instances[AgentManager]
@pytest.fixture
def task():
return "translate English to French"
@pytest.fixture
def prompt():
return "Translate the following English text to French: 'Hello, how are you?'"
@pytest.fixture
def model():
return "gpt-3.5-turbo"
@pytest.fixture(autouse=True)
def mock_create_chat_completion(mocker, config):
mock_create_chat_completion = mocker.patch(
"autogpt.agent.agent_manager.create_chat_completion",
wraps=create_chat_completion,
)
mock_create_chat_completion.return_value = ChatModelResponse(
model_info=OPEN_AI_CHAT_MODELS[config.fast_llm],
content="irrelevant",
function_call={},
)
return mock_create_chat_completion
def test_create_agent(agent_manager: AgentManager, task, prompt, model):
key, agent_reply = agent_manager.create_agent(task, prompt, model)
assert isinstance(key, int)
assert isinstance(agent_reply, str)
assert key in agent_manager.agents
def test_message_agent(agent_manager: AgentManager, task, prompt, model):
key, _ = agent_manager.create_agent(task, prompt, model)
user_message = "Please translate 'Good morning' to French."
agent_reply = agent_manager.message_agent(key, user_message)
assert isinstance(agent_reply, str)
def test_list_agents(agent_manager: AgentManager, task, prompt, model):
key, _ = agent_manager.create_agent(task, prompt, model)
agents_list = agent_manager.list_agents()
assert isinstance(agents_list, list)
assert (key, task) in agents_list
def test_delete_agent(agent_manager: AgentManager, task, prompt, model):
key, _ = agent_manager.create_agent(task, prompt, model)
success = agent_manager.delete_agent(key)
assert success
assert key not in agent_manager.agents