Files
Auto-GPT/autogpts/autogpt/autogpt/components/event_history.py
Reinier van der Leer ada2e19829 refactor(agent)!: Use Pydantic models for Agent process output (#7116)
* Introduce `BaseAgentActionProposal`, `OneShotAgentActionProposal`, and `AssistantThoughts` models to replace `ThoughtProcessResponse`, `DEFAULT_RESPONSE_SCHEMA`
* Refactor and clean up code because now we don't need to do as much type checking everywhere
* Tweak `OneShot` response format instruction

Granular:

* `autogpt.agents.prompt_strategies.one_shot`
  * Replace ThoughtProcessResponse`, `DEFAULT_RESPONSE_SCHEMA` and parsing logic by `AssistantThoughts` and `OneShotAgentActionProposal`
  * (TANGENTIAL) Move response format instruction into main system prompt message
  * (TANGENTIAL) Adjust response format instruction

* `autogpt.agents.base`
  * Add `BaseAgentActionProposal` base model -> replace `ThoughtProcessOutput`
  * Change signature of `execute` method to accept `BaseAgentActionProposal` instead of separate `command_name` and `command_args`
  * Add `do_not_execute(proposal, feedback)` abstract method, replacing `execute("human_feedback", ..., feedback)`

* Move `history` definition from `BaseAgentSettings` to `AgentSettings` (the only place where it's used anyway)

* `autogpt.models`
  * Add `.utils` > `ModelWithSummary` base model
  * Make the models in `.action_history` (`Episode`, `EpisodicActionHistory`) generic with a type parameter for the type of `Episode.action`

* `autogpt.core.resource.model_providers.schema`
  * Add `__str__` to `AssistantFunctionCall` which pretty-prints the function call

All other changes are a direct result of the changes above.

## BREAKING CHANGE:
* Due to the change in `autogpt.models.action_history`, the application after this change will be unable to load/resume agents from before this change and vice versa.
* The `additional_output` field in the response of `execute_step` has changed slightly:
  * Type of `.thoughts.plan` has changed from `str` to `list[str]`
  * `.command` -> `.use_tool`
  * `.command.args` -> `.use_tool.arguments`
2024-05-02 00:43:11 +02:00

83 lines
2.8 KiB
Python

from typing import Callable, Generic, Iterator, Optional
from autogpt.agents.features.watchdog import WatchdogComponent
from autogpt.agents.protocols import AfterExecute, AfterParse, MessageProvider
from autogpt.config.config import Config
from autogpt.core.resource.model_providers.schema import ChatMessage, ChatModelProvider
from autogpt.models.action_history import (
AP,
ActionResult,
Episode,
EpisodicActionHistory,
)
from autogpt.prompts.utils import indent
class EventHistoryComponent(MessageProvider, AfterParse, AfterExecute, Generic[AP]):
"""Keeps track of the event history and provides a summary of the steps."""
run_after = [WatchdogComponent]
def __init__(
self,
event_history: EpisodicActionHistory[AP],
max_tokens: int,
count_tokens: Callable[[str], int],
legacy_config: Config,
llm_provider: ChatModelProvider,
) -> None:
self.event_history = event_history
self.max_tokens = max_tokens
self.count_tokens = count_tokens
self.legacy_config = legacy_config
self.llm_provider = llm_provider
def get_messages(self) -> Iterator[ChatMessage]:
if progress := self._compile_progress(
self.event_history.episodes,
self.max_tokens,
self.count_tokens,
):
yield ChatMessage.system(f"## Progress on your Task so far\n\n{progress}")
def after_parse(self, result: AP) -> None:
self.event_history.register_action(result)
async def after_execute(self, result: ActionResult) -> None:
self.event_history.register_result(result)
await self.event_history.handle_compression(
self.llm_provider, self.legacy_config
)
def _compile_progress(
self,
episode_history: list[Episode],
max_tokens: Optional[int] = None,
count_tokens: Optional[Callable[[str], int]] = None,
) -> str:
if max_tokens and not count_tokens:
raise ValueError("count_tokens is required if max_tokens is set")
steps: list[str] = []
tokens: int = 0
n_episodes = len(episode_history)
for i, episode in enumerate(reversed(episode_history)):
# Use full format for the latest 4 steps, summary or format for older steps
if i < 4 or episode.summary is None:
step_content = indent(episode.format(), 2).strip()
else:
step_content = episode.summary
step = f"* Step {n_episodes - i}: {step_content}"
if max_tokens and count_tokens:
step_tokens = count_tokens(step)
if tokens + step_tokens > max_tokens:
break
tokens += step_tokens
steps.insert(0, step)
return "\n\n".join(steps)