Rebase MessageHistory on ChatSequence (#4922)

* Rebase `MessageHistory` on `ChatSequence`

* Process feedback & make mypy happy

---------

Co-authored-by: James Collins <collijk@uw.edu>
This commit is contained in:
Reinier van der Leer
2023-07-09 19:52:59 +02:00
committed by GitHub
parent 7dc6d736c7
commit 1e1eff70bc
6 changed files with 145 additions and 108 deletions

View File

@@ -38,8 +38,8 @@ def agent(config: Config):
return agent
def test_message_history_batch_summary(mocker, agent, config):
history = MessageHistory(agent)
def test_message_history_batch_summary(mocker, agent: Agent, config: Config):
history = MessageHistory.for_model(agent.config.smart_llm, agent=agent)
model = config.fast_llm
message_tlength = 0
message_count = 0
@@ -48,7 +48,7 @@ def test_message_history_batch_summary(mocker, agent, config):
mock_summary_response = ChatModelResponse(
model_info=OPEN_AI_CHAT_MODELS[model],
content="I executed browse_website command for each of the websites returned from Google search, but none of them have any job openings.",
function_call={},
function_call=None,
)
mock_summary = mocker.patch(
"autogpt.memory.message_history.create_chat_completion",
@@ -105,7 +105,7 @@ def test_message_history_batch_summary(mocker, agent, config):
result = (
"Command browse_website returned: Answer gathered from website: The text in job"
+ str(i)
+ " does not provide information on specific job requirements or a job URL.]",
+ " does not provide information on specific job requirements or a job URL.]"
)
msg = Message("system", result, "action_result")
history.append(msg)
@@ -117,7 +117,7 @@ def test_message_history_batch_summary(mocker, agent, config):
history.append(user_input_msg)
# only take the last cycle of the message history, trim the rest of previous messages, and generate a summary for them
for cycle in reversed(list(history.per_cycle(config))):
for cycle in reversed(list(history.per_cycle())):
messages_to_add = [msg for msg in cycle if msg is not None]
message_sequence.insert(insertion_index, *messages_to_add)
break
@@ -134,7 +134,7 @@ def test_message_history_batch_summary(mocker, agent, config):
)
expected_call_count = math.ceil(
message_tlength / (OPEN_AI_CHAT_MODELS.get(config.fast_llm).max_tokens)
message_tlength / (OPEN_AI_CHAT_MODELS[config.fast_llm].max_tokens)
)
# Expecting 2 batches because of over max token
assert mock_summary.call_count == expected_call_count # 2 at the time of writing