fix(autogpt/llm): AssistantChatMessage.tool_calls default [] instead of None

OpenAI ChatCompletion calls fail when `tool_calls = None`. This issue came to light after 22aba6d.
This commit is contained in:
Reinier van der Leer
2024-02-14 14:34:04 +01:00
parent 6017eefb32
commit 67bafa6302
2 changed files with 4 additions and 6 deletions

View File

@@ -423,7 +423,7 @@ class OpenAIProvider(
tool_calls=(
[AssistantToolCall(**tc.dict()) for tc in _assistant_msg.tool_calls]
if _assistant_msg.tool_calls
else None
else list()
),
)
response = ChatModelResponse(

View File

@@ -44,16 +44,14 @@ class ChatMessage(BaseModel):
SYSTEM = "system"
ASSISTANT = "assistant"
TOOL = "tool"
"""May be used for the result of tool calls"""
FUNCTION = "function"
"""May be used for the return value of function calls"""
role: Role
content: str
@staticmethod
def assistant(content: str) -> "ChatMessage":
return ChatMessage(role=ChatMessage.Role.ASSISTANT, content=content)
@staticmethod
def user(content: str) -> "ChatMessage":
return ChatMessage(role=ChatMessage.Role.USER, content=content)
@@ -93,7 +91,7 @@ class AssistantToolCallDict(TypedDict):
class AssistantChatMessage(ChatMessage):
role: Literal["assistant"] = "assistant"
content: Optional[str]
tool_calls: Optional[list[AssistantToolCall]]
tool_calls: list[AssistantToolCall] = Field(default_factory=list)
class AssistantChatMessageDict(TypedDict, total=False):