mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-02-15 03:04:24 +01:00
AutoGPT: Deprecate MessageHistory
This commit is contained in:
@@ -18,12 +18,11 @@ from autogpt.llm.base import Message
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.logs.log_cycle import (
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.models.agent_actions import (
|
||||
from autogpt.models.action_history import (
|
||||
Action,
|
||||
ActionErrorResult,
|
||||
ActionInterruptedByHuman,
|
||||
@@ -113,22 +112,12 @@ class Agent(ContextMixin, WorkspaceMixin, WatchdogMixin, BaseAgent):
|
||||
kwargs["append_messages"] = []
|
||||
kwargs["append_messages"].append(budget_msg)
|
||||
|
||||
# # Include message history in base prompt
|
||||
# kwargs["with_message_history"] = True
|
||||
|
||||
return super().construct_base_prompt(*args, **kwargs)
|
||||
|
||||
def on_before_think(self, *args, **kwargs) -> ChatSequence:
|
||||
prompt = super().on_before_think(*args, **kwargs)
|
||||
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.message_history.raw(),
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
@@ -148,11 +137,6 @@ class Agent(ContextMixin, WorkspaceMixin, WatchdogMixin, BaseAgent):
|
||||
|
||||
if command_name == "human_feedback":
|
||||
result = ActionInterruptedByHuman(feedback=user_input)
|
||||
self.message_history.add(
|
||||
"user",
|
||||
"I interrupted the execution of the command you proposed "
|
||||
f"to give you some feedback: {user_input}",
|
||||
)
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
@@ -207,26 +191,6 @@ class Agent(ContextMixin, WorkspaceMixin, WatchdogMixin, BaseAgent):
|
||||
elif result.status == "error":
|
||||
result.reason = plugin.post_command(command_name, result.reason)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
if result.status == "success":
|
||||
self.message_history.add(
|
||||
"system",
|
||||
f"Command {command_name} returned: {result.outputs}",
|
||||
"action_result",
|
||||
)
|
||||
elif result.status == "error":
|
||||
message = f"Command {command_name} failed: {result.reason}"
|
||||
|
||||
# Append hint to the error message if the exception has a hint
|
||||
if (
|
||||
result.error
|
||||
and isinstance(result.error, AgentException)
|
||||
and result.error.hint
|
||||
):
|
||||
message = message.rstrip(".") + f". {result.error.hint}"
|
||||
|
||||
self.message_history.add("system", message, "action_result")
|
||||
|
||||
# Update action history
|
||||
self.event_history.register_result(result)
|
||||
|
||||
|
||||
@@ -15,8 +15,7 @@ from autogpt.config.ai_directives import AIDirectives
|
||||
from autogpt.llm.base import ChatSequence, Message
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
|
||||
from autogpt.llm.utils import count_message_tokens, create_chat_completion
|
||||
from autogpt.memory.message_history import MessageHistory
|
||||
from autogpt.models.agent_actions import EpisodicActionHistory, ActionResult
|
||||
from autogpt.models.action_history import EpisodicActionHistory, ActionResult
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
@@ -92,11 +91,6 @@ class BaseAgent(metaclass=ABCMeta):
|
||||
|
||||
self.event_history = EpisodicActionHistory()
|
||||
|
||||
self.message_history = MessageHistory(
|
||||
model=self.llm,
|
||||
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
|
||||
)
|
||||
|
||||
# Support multi-inheritance and mixins for subclasses
|
||||
super(BaseAgent, self).__init__()
|
||||
|
||||
@@ -168,7 +162,6 @@ class BaseAgent(metaclass=ABCMeta):
|
||||
prepend_messages: list[Message] = [],
|
||||
append_messages: list[Message] = [],
|
||||
reserve_tokens: int = 0,
|
||||
with_message_history: bool = False,
|
||||
) -> ChatSequence:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
@@ -196,24 +189,6 @@ class BaseAgent(metaclass=ABCMeta):
|
||||
[Message("system", self.system_prompt)] + prepend_messages,
|
||||
)
|
||||
|
||||
if with_message_history:
|
||||
# Reserve tokens for messages to be appended later, if any
|
||||
reserve_tokens += self.message_history.max_summary_tlength
|
||||
if append_messages:
|
||||
reserve_tokens += count_message_tokens(append_messages, self.llm.name)
|
||||
|
||||
# Fill message history, up to a margin of reserved_tokens.
|
||||
# Trim remaining historical messages and add them to the running summary.
|
||||
history_start_index = len(prompt)
|
||||
trimmed_history = add_history_upto_token_limit(
|
||||
prompt, self.message_history, self.send_token_limit - reserve_tokens
|
||||
)
|
||||
if trimmed_history:
|
||||
new_summary_msg, _ = self.message_history.trim_messages(
|
||||
list(prompt), self.config
|
||||
)
|
||||
prompt.insert(history_start_index, new_summary_msg)
|
||||
|
||||
if append_messages:
|
||||
prompt.extend(append_messages)
|
||||
|
||||
@@ -372,24 +347,9 @@ class BaseAgent(metaclass=ABCMeta):
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
|
||||
# Save assistant reply to message history
|
||||
self.message_history.append(prompt[-1])
|
||||
self.message_history.add(
|
||||
"assistant", llm_response.content, "ai_response"
|
||||
) # FIXME: support function calls
|
||||
|
||||
try:
|
||||
return self.parse_and_process_response(
|
||||
llm_response, thought_process_id, prompt, instruction
|
||||
)
|
||||
except InvalidAgentResponseError as e:
|
||||
# TODO: tune this message
|
||||
self.message_history.add(
|
||||
"system",
|
||||
f"Your response could not be parsed: {e}"
|
||||
"\n\nRemember to only respond using the specified format above!",
|
||||
)
|
||||
raise
|
||||
return self.parse_and_process_response(
|
||||
llm_response, thought_process_id, prompt, instruction
|
||||
)
|
||||
|
||||
# TODO: update memory/context
|
||||
|
||||
@@ -415,27 +375,3 @@ class BaseAgent(metaclass=ABCMeta):
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def add_history_upto_token_limit(
|
||||
prompt: ChatSequence, history: MessageHistory, t_limit: int
|
||||
) -> list[Message]:
|
||||
current_prompt_length = prompt.token_length
|
||||
insertion_index = len(prompt)
|
||||
limit_reached = False
|
||||
trimmed_messages: list[Message] = []
|
||||
for cycle in reversed(list(history.per_cycle())):
|
||||
messages_to_add = [msg for msg in cycle if msg is not None]
|
||||
tokens_to_add = count_message_tokens(messages_to_add, prompt.model.name)
|
||||
if current_prompt_length + tokens_to_add > t_limit:
|
||||
limit_reached = True
|
||||
|
||||
if not limit_reached:
|
||||
# Add the most recent message to the start of the chain,
|
||||
# after the system prompts.
|
||||
prompt.insert(insertion_index, *messages_to_add)
|
||||
current_prompt_length += tokens_to_add
|
||||
else:
|
||||
trimmed_messages = messages_to_add + trimmed_messages
|
||||
|
||||
return trimmed_messages
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import logging
|
||||
from contextlib import ExitStack
|
||||
|
||||
from autogpt.models.agent_actions import EpisodicActionHistory
|
||||
from autogpt.models.action_history import EpisodicActionHistory
|
||||
|
||||
from ..base import BaseAgent
|
||||
|
||||
|
||||
@@ -21,9 +21,8 @@ from autogpt.logs.log_cycle import (
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.models.agent_actions import (
|
||||
from autogpt.models.action_history import (
|
||||
ActionErrorResult,
|
||||
EpisodicActionHistory,
|
||||
ActionInterruptedByHuman,
|
||||
ActionResult,
|
||||
ActionSuccessResult,
|
||||
@@ -69,8 +68,6 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
"""LogCycleHandler for structured debug logging."""
|
||||
|
||||
self.action_history = EpisodicActionHistory()
|
||||
|
||||
self.plan: list[str] = []
|
||||
"""List of steps that the Agent plans to take"""
|
||||
|
||||
@@ -90,12 +87,12 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
plan_section += [f"{i}. {s}" for i, s in enumerate(self.plan, 1)]
|
||||
|
||||
# Add the actions so far to the prompt
|
||||
if self.action_history:
|
||||
if self.event_history:
|
||||
plan_section += [
|
||||
"\n### Progress",
|
||||
"So far, you have executed the following actions based on the plan:",
|
||||
]
|
||||
for i, cycle in enumerate(self.action_history, 1):
|
||||
for i, cycle in enumerate(self.event_history, 1):
|
||||
if not (cycle.action and cycle.result):
|
||||
logger.warn(f"Incomplete action in history: {cycle}")
|
||||
continue
|
||||
@@ -229,7 +226,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.action_history.episodes,
|
||||
self.event_history.episodes,
|
||||
"action_history.json",
|
||||
)
|
||||
self.log_cycle_handler.log_cycle(
|
||||
@@ -285,7 +282,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
|
||||
result_tlength = count_string_tokens(str(result), self.llm.name)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.message_history.summary_message()), self.llm.name
|
||||
str(self.event_history.fmt_paragraph()), self.llm.name
|
||||
)
|
||||
if result_tlength + memory_tlength > self.send_token_limit:
|
||||
result = ActionErrorResult(
|
||||
@@ -301,23 +298,6 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
elif result.status == "error":
|
||||
result.reason = plugin.post_command(command_name, result.reason)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
if result.status == "success":
|
||||
self.message_history.add(
|
||||
"system",
|
||||
f"Command {command_name} returned: {result.outputs}",
|
||||
"action_result",
|
||||
)
|
||||
elif result.status == "error":
|
||||
message = f"Command {command_name} failed: {result.reason}"
|
||||
if (
|
||||
result.error
|
||||
and isinstance(result.error, AgentException)
|
||||
and result.error.hint
|
||||
):
|
||||
message = message.rstrip(".") + f". {result.error.hint}"
|
||||
self.message_history.add("system", message, "action_result")
|
||||
|
||||
return result
|
||||
|
||||
def parse_and_process_response(
|
||||
|
||||
@@ -6,7 +6,6 @@ from typing import Any, Dict, Union
|
||||
from .config import LOG_DIR
|
||||
|
||||
DEFAULT_PREFIX = "agent"
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
|
||||
CURRENT_CONTEXT_FILE_NAME = "current_context.json"
|
||||
NEXT_ACTION_FILE_NAME = "next_action.json"
|
||||
PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json"
|
||||
|
||||
@@ -3,7 +3,7 @@ from autogpt.agents.agent import Agent, execute_command
|
||||
|
||||
def test_agent_initialization(agent: Agent):
|
||||
assert agent.ai_config.ai_name == "Base"
|
||||
assert agent.message_history.messages == []
|
||||
assert agent.event_history.episodes == []
|
||||
assert agent.cycle_budget is None
|
||||
assert "You are Base" in agent.system_prompt
|
||||
|
||||
|
||||
Reference in New Issue
Block a user