mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
Reorg (#1537)
* Pi's message. * Fix most everything. * Blacked * Add Typing, Docstrings everywhere, organize the code a bit. * Black * fix import * Update message, dedupe. * Increase backoff time. * bump up retries
This commit is contained in:
@@ -1,4 +1,6 @@
|
||||
from ast import List
|
||||
import time
|
||||
from typing import Dict, Optional
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
@@ -6,30 +8,79 @@ from colorama import Fore
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
CFG = Config()
|
||||
|
||||
openai.api_key = cfg.openai_api_key
|
||||
openai.api_key = CFG.openai_api_key
|
||||
|
||||
|
||||
def call_ai_function(
|
||||
function: str, args: List, description: str, model: Optional[str] = None
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
This is a magic function that can do anything with no-code. See
|
||||
https://github.com/Torantulino/AI-Functions for more info.
|
||||
|
||||
Args:
|
||||
function (str): The function to call
|
||||
args (list): The arguments to pass to the function
|
||||
description (str): The description of the function
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the function
|
||||
"""
|
||||
if model is None:
|
||||
model = CFG.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma separated string
|
||||
args = ", ".join(args)
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}"
|
||||
f"\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
return create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
def create_chat_completion(
|
||||
messages, model=None, temperature=cfg.temperature, max_tokens=None
|
||||
messages: List, # type: ignore
|
||||
model: Optional[str] = None,
|
||||
temperature: float = CFG.temperature,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API"""
|
||||
"""Create a chat completion using the OpenAI API
|
||||
|
||||
Args:
|
||||
messages (list[dict[str, str]]): The messages to send to the chat completion
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
||||
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
response = None
|
||||
num_retries = 5
|
||||
if cfg.debug_mode:
|
||||
num_retries = 10
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
Fore.GREEN
|
||||
+ f"Creating chat completion with model {model}, temperature {temperature},"
|
||||
f" max_tokens {max_tokens}" + Fore.RESET
|
||||
)
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
try:
|
||||
if cfg.use_azure:
|
||||
if CFG.use_azure:
|
||||
response = openai.ChatCompletion.create(
|
||||
deployment_id=cfg.get_azure_deployment_id_for_model(model),
|
||||
deployment_id=CFG.get_azure_deployment_id_for_model(model),
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
@@ -44,26 +95,21 @@ def create_chat_completion(
|
||||
)
|
||||
break
|
||||
except RateLimitError:
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
"API Rate Limit Reached. Waiting 20 seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(20)
|
||||
pass
|
||||
except APIError as e:
|
||||
if e.http_status == 502:
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
"API Bad gateway. Waiting 20 seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(20)
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(backoff)
|
||||
if response is None:
|
||||
raise RuntimeError("Failed to get response after 5 retries")
|
||||
raise RuntimeError(f"Failed to get response after {num_retries} retries")
|
||||
|
||||
return response.choices[0].message["content"]
|
||||
|
||||
Reference in New Issue
Block a user