fix(agent): Fix extract_dict_from_response flakiness

- The `extract_dict_from_response` function, which is supposed to reliably extract a JSON object from an LLM's response, positively discriminated objects defined on a single line, causing issues.
This commit is contained in:
Reinier van der Leer
2024-01-19 15:49:32 +01:00
parent e4687e0f03
commit faf5f9e5a4
2 changed files with 13 additions and 1 deletions

View File

@@ -391,7 +391,19 @@ class OneShotAgentPromptStrategy(PromptStrategy):
if not response.content:
raise InvalidAgentResponseError("Assistant response has no text content")
self.logger.debug(
"LLM response content:"
+ (
f"\n{response.content}"
if "\n" in response.content
else f" '{response.content}'"
)
)
assistant_reply_dict = extract_dict_from_response(response.content)
self.logger.debug(
"Validating object extracted from LLM response:\n"
f"{json.dumps(assistant_reply_dict, indent=4)}"
)
_, errors = self.response_schema.validate_object(
object=assistant_reply_dict,

View File

@@ -18,7 +18,7 @@ def extract_dict_from_response(response_content: str) -> dict[str, Any]:
response_content = response_content.lstrip("json")
else:
# The string may contain JSON.
json_pattern = r"{.*}"
json_pattern = r"{[\s\S]*}"
match = re.search(json_pattern, response_content)
if match: