From d0db337af82f959fbbf3c9a9a1b445cbdb1b4234 Mon Sep 17 00:00:00 2001 From: SwiftyOS Date: Thu, 21 Sep 2023 17:17:37 +0200 Subject: [PATCH] changed chat completion to async --- autogpts/forge/forge/sdk/llm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpts/forge/forge/sdk/llm.py b/autogpts/forge/forge/sdk/llm.py index 9d853646..6980a3b8 100644 --- a/autogpts/forge/forge/sdk/llm.py +++ b/autogpts/forge/forge/sdk/llm.py @@ -9,7 +9,7 @@ LOG = ForgeLogger(__name__) @retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3)) -def chat_completion_request( +async def chat_completion_request( messages, functions=None, function_call=None, model=str, custom_labels=None ) -> typing.Union[typing.Dict[str, typing.Any], Exception]: """Generate a response to a list of messages using OpenAI's API""" @@ -31,7 +31,7 @@ def chat_completion_request( # This is an example showing adding in the labels as helicone properties kwargs["headers"][f"Helicone-Property-{label}"] = custom_labels[label] - resp = openai.ChatCompletion.create(**kwargs) + resp = await openai.ChatCompletion.acreate(**kwargs) return resp except Exception as e: