mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-11 02:04:24 +01:00
Optimize ActionHistory prompting
This commit is contained in:
@@ -188,14 +188,9 @@ class Agent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
except AgentException as e:
|
||||
result = ActionErrorResult(e.message, e)
|
||||
|
||||
logger.debug(f"Command result: {result}")
|
||||
|
||||
result_tlength = count_string_tokens(str(result), self.llm.name)
|
||||
# history_tlength = count_string_tokens(
|
||||
# str(self.message_history.summary_message()), self.llm.name
|
||||
# )
|
||||
history_tlength = count_string_tokens(
|
||||
self.event_history.generate_list(), self.llm.name
|
||||
self.event_history.fmt_paragraph(), self.llm.name
|
||||
)
|
||||
if result_tlength + history_tlength > self.send_token_limit:
|
||||
result = ActionErrorResult(
|
||||
@@ -207,7 +202,7 @@ class Agent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
if result.status == "success":
|
||||
result.results = plugin.post_command(command_name, result.results)
|
||||
result.outputs = plugin.post_command(command_name, result.outputs)
|
||||
elif result.status == "error":
|
||||
result.reason = plugin.post_command(command_name, result.reason)
|
||||
|
||||
@@ -215,7 +210,7 @@ class Agent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
if result.status == "success":
|
||||
self.message_history.add(
|
||||
"system",
|
||||
f"Command {command_name} returned: {result.results}",
|
||||
f"Command {command_name} returned: {result.outputs}",
|
||||
"action_result",
|
||||
)
|
||||
elif result.status == "error":
|
||||
|
||||
@@ -180,13 +180,12 @@ class BaseAgent(metaclass=ABCMeta):
|
||||
"""
|
||||
|
||||
if self.event_history:
|
||||
prepend_messages.append(
|
||||
prepend_messages.insert(
|
||||
0,
|
||||
Message(
|
||||
"system",
|
||||
"# Progress\n"
|
||||
"So far, the following things have happened:\n"
|
||||
f"{self.event_history.generate_list()}",
|
||||
)
|
||||
"## Progress\n\n" f"{self.event_history.fmt_paragraph()}",
|
||||
),
|
||||
)
|
||||
|
||||
prompt = ChatSequence.for_model(
|
||||
|
||||
@@ -297,7 +297,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
if result.status == "success":
|
||||
result.results = plugin.post_command(command_name, result.results)
|
||||
result.outputs = plugin.post_command(command_name, result.outputs)
|
||||
elif result.status == "error":
|
||||
result.reason = plugin.post_command(command_name, result.reason)
|
||||
|
||||
@@ -305,7 +305,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent):
|
||||
if result.status == "success":
|
||||
self.message_history.add(
|
||||
"system",
|
||||
f"Command {command_name} returned: {result.results}",
|
||||
f"Command {command_name} returned: {result.outputs}",
|
||||
"action_result",
|
||||
)
|
||||
elif result.status == "error":
|
||||
|
||||
@@ -11,7 +11,7 @@ import logging
|
||||
import os
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
from typing import Generator, Literal
|
||||
from typing import Iterator, Literal
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.agents.utils.exceptions import DuplicateOperationError
|
||||
@@ -34,7 +34,9 @@ def text_checksum(text: str) -> str:
|
||||
|
||||
def operations_from_log(
|
||||
log_path: str | Path,
|
||||
) -> Generator[tuple[Operation, str, str | None], None, None]:
|
||||
) -> Iterator[
|
||||
tuple[Literal["write", "append"], str, str] | tuple[Literal["delete"], str, None]
|
||||
]:
|
||||
"""Parse the file operations log and return a tuple containing the log entries"""
|
||||
try:
|
||||
log = open(log_path, "r", encoding="utf-8")
|
||||
@@ -48,11 +50,7 @@ def operations_from_log(
|
||||
operation, tail = line.split(": ", maxsplit=1)
|
||||
operation = operation.strip()
|
||||
if operation in ("write", "append"):
|
||||
try:
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
except ValueError:
|
||||
logger.warn(f"File log entry lacks checksum: '{line}'")
|
||||
path, checksum = tail.strip(), None
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
yield (operation, path, checksum)
|
||||
elif operation == "delete":
|
||||
yield (operation, tail.strip(), None)
|
||||
@@ -228,7 +226,7 @@ def write_to_file(filename: Path, text: str, agent: Agent) -> str:
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename, agent, checksum)
|
||||
return "File written to successfully."
|
||||
return f"File {filename.name} has been written successfully."
|
||||
|
||||
|
||||
@sanitize_path_arg("filename")
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterator, Literal, Optional
|
||||
|
||||
from autogpt.prompts.utils import format_numbered_list
|
||||
from autogpt.prompts.utils import format_numbered_list, indent
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -18,16 +18,13 @@ class Action:
|
||||
|
||||
@dataclass
|
||||
class ActionSuccessResult:
|
||||
results: Any
|
||||
outputs: Any
|
||||
status: Literal["success"] = "success"
|
||||
|
||||
def __str__(self) -> str:
|
||||
results = (
|
||||
f'"""{self.results}"""'
|
||||
if type(self.results) == str and any(s in self.results for s in ("\n", '"'))
|
||||
else f'"{self.results}"'
|
||||
)
|
||||
return f"Action succeeded, and returned: {results}"
|
||||
outputs = str(self.outputs).replace("```", r"\```")
|
||||
multiline = "\n" in outputs
|
||||
return f"```\n{self.outputs}\n```" if multiline else str(self.outputs)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -60,9 +57,9 @@ class ActionHistory:
|
||||
action: Action
|
||||
result: ActionResult | None
|
||||
|
||||
def __str__(self):
|
||||
executed_action = f"You executed `{self.action.format_call()}`."
|
||||
action_result = f" Result: {self.result}" if self.result else ""
|
||||
def __str__(self) -> str:
|
||||
executed_action = f"Executed `{self.action.format_call()}`"
|
||||
action_result = f": {self.result}" if self.result else "."
|
||||
return executed_action + action_result
|
||||
|
||||
cursor: int
|
||||
@@ -106,5 +103,30 @@ class ActionHistory:
|
||||
self.current_record.result = result
|
||||
self.cursor = len(self.cycles)
|
||||
|
||||
def generate_list(self) -> str:
|
||||
def fmt_list(self) -> str:
|
||||
return format_numbered_list(self.cycles)
|
||||
|
||||
def fmt_paragraph(self) -> str:
|
||||
steps: list[str] = []
|
||||
|
||||
for i, c in enumerate(self.cycles, 1):
|
||||
step = f"### Step {i}: Executed `{c.action.format_call()}`\n"
|
||||
step += f'- **Reasoning:** "{c.action.reasoning}"\n'
|
||||
step += (
|
||||
f"- **Status:** `{c.result.status if c.result else 'did_not_finish'}`\n"
|
||||
)
|
||||
if c.result:
|
||||
if c.result.status == "success":
|
||||
result = str(c.result)
|
||||
result = "\n" + indent(result) if "\n" in result else result
|
||||
step += f"- **Output:** {result}"
|
||||
elif c.result.status == "error":
|
||||
step += f"- **Reason:** {c.result.reason}\n"
|
||||
if c.result.error:
|
||||
step += f"- **Error:** {c.result.error}\n"
|
||||
elif c.result.status == "interrupted_by_human":
|
||||
step += f"- **Feedback:** {c.result.feedback}\n"
|
||||
|
||||
steps.append(step)
|
||||
|
||||
return "\n\n".join(steps)
|
||||
|
||||
@@ -3,3 +3,9 @@ from typing import Any
|
||||
|
||||
def format_numbered_list(items: list[Any], start_at: int = 1) -> str:
|
||||
return "\n".join(f"{i}. {str(item)}" for i, item in enumerate(items, start_at))
|
||||
|
||||
|
||||
def indent(content: str, indentation: int | str = 4) -> str:
|
||||
if type(indentation) == int:
|
||||
indentation = " " * indentation
|
||||
return indentation + content.replace("\n", f"\n{indentation}") # type: ignore
|
||||
|
||||
Reference in New Issue
Block a user