mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-22 23:44:31 +01:00
Co-authored-by: Reinier van der Leer <github@pwuts.nl> Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: Nicholas Tindle <nicktindle@outlook.com> Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> Co-authored-by: merwanehamadi <merwanehamadi@gmail.com> Co-authored-by: Merwane Hamadi <merwanehamadi@gmail.com> Co-authored-by: Richard Beales <rich@richbeales.net> Co-authored-by: Luke K <2609441+lc0rp@users.noreply.github.com> Co-authored-by: Luke K (pr-0f3t) <2609441+lc0rp@users.noreply.github.com> Co-authored-by: Erik Peterson <e@eriklp.com> Co-authored-by: Auto-GPT-Bot <github-bot@agpt.co> Co-authored-by: Benny van der Lans <49377421+bfalans@users.noreply.github.com> Co-authored-by: Jan <jan-github@phobia.de> Co-authored-by: Robin Richtsfeld <robin.richtsfeld@gmail.com> Co-authored-by: Marc Bornträger <marc.borntraeger@gmail.com> Co-authored-by: Stefan Ayala <stefanayala3266@gmail.com> Co-authored-by: javableu <45064273+javableu@users.noreply.github.com> Co-authored-by: DGdev91 <DGdev91@users.noreply.github.com> Co-authored-by: Kinance <kinance@gmail.com> Co-authored-by: digger yu <digger-yu@outlook.com> Co-authored-by: David <scenaristeur@gmail.com> Co-authored-by: gravelBridge <john.tian31@gmail.com> Fix Python CI "update cassettes" step (#4591) fix CI (#4596) Fix inverted logic for deny_command (#4563) fix current_score.json generation (#4601) Fix duckduckgo rate limiting (#4592) Fix debug code challenge (#4632) Fix issues with information retrieval challenge a (#4622) fix issues with env configuration and .env.template (#4630) Fix prompt issue causing 'No Command' issues and challenge to fail (#4623) Fix benchmark logs (#4653) Fix typo in docs/setup.md (#4613) Fix run.sh shebang (#4561) Fix autogpt docker image not working because missing prompt_settings (#4680) Fix execute_command coming from plugins (#4730)
156 lines
4.8 KiB
Python
156 lines
4.8 KiB
Python
from __future__ import annotations
|
|
|
|
from typing import List, Optional
|
|
|
|
import openai
|
|
from openai import Model
|
|
|
|
from autogpt.config import Config
|
|
from autogpt.llm.base import CompletionModelInfo, MessageDict
|
|
from autogpt.llm.providers.openai import OPEN_AI_MODELS
|
|
from autogpt.logs import logger
|
|
from autogpt.singleton import Singleton
|
|
|
|
|
|
class ApiManager(metaclass=Singleton):
|
|
def __init__(self):
|
|
self.total_prompt_tokens = 0
|
|
self.total_completion_tokens = 0
|
|
self.total_cost = 0
|
|
self.total_budget = 0
|
|
self.models: Optional[list[Model]] = None
|
|
|
|
def reset(self):
|
|
self.total_prompt_tokens = 0
|
|
self.total_completion_tokens = 0
|
|
self.total_cost = 0
|
|
self.total_budget = 0.0
|
|
self.models = None
|
|
|
|
def create_chat_completion(
|
|
self,
|
|
messages: list[MessageDict],
|
|
model: str | None = None,
|
|
temperature: float = None,
|
|
max_tokens: int | None = None,
|
|
deployment_id=None,
|
|
):
|
|
"""
|
|
Create a chat completion and update the cost.
|
|
Args:
|
|
messages (list): The list of messages to send to the API.
|
|
model (str): The model to use for the API call.
|
|
temperature (float): The temperature to use for the API call.
|
|
max_tokens (int): The maximum number of tokens for the API call.
|
|
Returns:
|
|
str: The AI's response.
|
|
"""
|
|
cfg = Config()
|
|
if temperature is None:
|
|
temperature = cfg.temperature
|
|
if deployment_id is not None:
|
|
response = openai.ChatCompletion.create(
|
|
deployment_id=deployment_id,
|
|
model=model,
|
|
messages=messages,
|
|
temperature=temperature,
|
|
max_tokens=max_tokens,
|
|
api_key=cfg.openai_api_key,
|
|
)
|
|
else:
|
|
response = openai.ChatCompletion.create(
|
|
model=model,
|
|
messages=messages,
|
|
temperature=temperature,
|
|
max_tokens=max_tokens,
|
|
api_key=cfg.openai_api_key,
|
|
)
|
|
if not hasattr(response, "error"):
|
|
logger.debug(f"Response: {response}")
|
|
prompt_tokens = response.usage.prompt_tokens
|
|
completion_tokens = response.usage.completion_tokens
|
|
self.update_cost(prompt_tokens, completion_tokens, model)
|
|
return response
|
|
|
|
def update_cost(self, prompt_tokens, completion_tokens, model: str):
|
|
"""
|
|
Update the total cost, prompt tokens, and completion tokens.
|
|
|
|
Args:
|
|
prompt_tokens (int): The number of tokens used in the prompt.
|
|
completion_tokens (int): The number of tokens used in the completion.
|
|
model (str): The model used for the API call.
|
|
"""
|
|
# the .model property in API responses can contain version suffixes like -v2
|
|
model = model[:-3] if model.endswith("-v2") else model
|
|
model_info = OPEN_AI_MODELS[model]
|
|
|
|
self.total_prompt_tokens += prompt_tokens
|
|
self.total_completion_tokens += completion_tokens
|
|
self.total_cost += prompt_tokens * model_info.prompt_token_cost / 1000
|
|
if issubclass(type(model_info), CompletionModelInfo):
|
|
self.total_cost += (
|
|
completion_tokens * model_info.completion_token_cost / 1000
|
|
)
|
|
|
|
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
|
|
|
|
def set_total_budget(self, total_budget):
|
|
"""
|
|
Sets the total user-defined budget for API calls.
|
|
|
|
Args:
|
|
total_budget (float): The total budget for API calls.
|
|
"""
|
|
self.total_budget = total_budget
|
|
|
|
def get_total_prompt_tokens(self):
|
|
"""
|
|
Get the total number of prompt tokens.
|
|
|
|
Returns:
|
|
int: The total number of prompt tokens.
|
|
"""
|
|
return self.total_prompt_tokens
|
|
|
|
def get_total_completion_tokens(self):
|
|
"""
|
|
Get the total number of completion tokens.
|
|
|
|
Returns:
|
|
int: The total number of completion tokens.
|
|
"""
|
|
return self.total_completion_tokens
|
|
|
|
def get_total_cost(self):
|
|
"""
|
|
Get the total cost of API calls.
|
|
|
|
Returns:
|
|
float: The total cost of API calls.
|
|
"""
|
|
return self.total_cost
|
|
|
|
def get_total_budget(self):
|
|
"""
|
|
Get the total user-defined budget for API calls.
|
|
|
|
Returns:
|
|
float: The total budget for API calls.
|
|
"""
|
|
return self.total_budget
|
|
|
|
def get_models(self) -> List[Model]:
|
|
"""
|
|
Get list of available GPT models.
|
|
|
|
Returns:
|
|
list: List of available GPT models.
|
|
|
|
"""
|
|
if self.models is None:
|
|
all_models = openai.Model.list()["data"]
|
|
self.models = [model for model in all_models if "gpt" in model["id"]]
|
|
|
|
return self.models
|