mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-25 09:54:23 +01:00
148 lines
5.2 KiB
Python
148 lines
5.2 KiB
Python
from unittest.mock import MagicMock, patch
|
|
|
|
import pytest
|
|
from pytest_mock import MockerFixture
|
|
|
|
from autogpt.llm.api_manager import OPEN_AI_MODELS, ApiManager
|
|
|
|
api_manager = ApiManager()
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def reset_api_manager():
|
|
api_manager.reset()
|
|
yield
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def mock_costs(mocker: MockerFixture):
|
|
mocker.patch.multiple(
|
|
OPEN_AI_MODELS["gpt-3.5-turbo"],
|
|
prompt_token_cost=0.0013,
|
|
completion_token_cost=0.0025,
|
|
)
|
|
mocker.patch.multiple(
|
|
OPEN_AI_MODELS["text-embedding-ada-002"],
|
|
prompt_token_cost=0.0004,
|
|
)
|
|
yield
|
|
|
|
|
|
class TestApiManager:
|
|
@staticmethod
|
|
def test_create_chat_completion_debug_mode(caplog):
|
|
"""Test if debug mode logs response."""
|
|
api_manager_debug = ApiManager(debug=True)
|
|
messages = [
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": "Who won the world series in 2020?"},
|
|
]
|
|
model = "gpt-3.5-turbo"
|
|
|
|
with patch("openai.ChatCompletion.create") as mock_create:
|
|
mock_response = MagicMock()
|
|
del mock_response.error
|
|
mock_response.usage.prompt_tokens = 10
|
|
mock_response.usage.completion_tokens = 20
|
|
mock_create.return_value = mock_response
|
|
|
|
api_manager_debug.create_chat_completion(messages, model=model)
|
|
|
|
assert "Response" in caplog.text
|
|
|
|
@staticmethod
|
|
def test_create_chat_completion_empty_messages():
|
|
"""Test if empty messages result in zero tokens and cost."""
|
|
messages = []
|
|
model = "gpt-3.5-turbo"
|
|
|
|
with patch("openai.ChatCompletion.create") as mock_create:
|
|
mock_response = MagicMock()
|
|
del mock_response.error
|
|
mock_response.usage.prompt_tokens = 0
|
|
mock_response.usage.completion_tokens = 0
|
|
mock_create.return_value = mock_response
|
|
|
|
api_manager.create_chat_completion(messages, model=model)
|
|
|
|
assert api_manager.get_total_prompt_tokens() == 0
|
|
assert api_manager.get_total_completion_tokens() == 0
|
|
assert api_manager.get_total_cost() == 0
|
|
|
|
@staticmethod
|
|
def test_create_chat_completion_valid_inputs():
|
|
"""Test if valid inputs result in correct tokens and cost."""
|
|
messages = [
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": "Who won the world series in 2020?"},
|
|
]
|
|
model = "gpt-3.5-turbo"
|
|
|
|
with patch("openai.ChatCompletion.create") as mock_create:
|
|
mock_response = MagicMock()
|
|
del mock_response.error
|
|
mock_response.usage.prompt_tokens = 10
|
|
mock_response.usage.completion_tokens = 20
|
|
mock_create.return_value = mock_response
|
|
|
|
api_manager.create_chat_completion(messages, model=model)
|
|
|
|
assert api_manager.get_total_prompt_tokens() == 10
|
|
assert api_manager.get_total_completion_tokens() == 20
|
|
assert api_manager.get_total_cost() == (10 * 0.0013 + 20 * 0.0025) / 1000
|
|
|
|
def test_getter_methods(self):
|
|
"""Test the getter methods for total tokens, cost, and budget."""
|
|
api_manager.update_cost(600, 1200, "gpt-3.5-turbo")
|
|
api_manager.set_total_budget(10.0)
|
|
assert api_manager.get_total_prompt_tokens() == 600
|
|
assert api_manager.get_total_completion_tokens() == 1200
|
|
assert api_manager.get_total_cost() == (600 * 0.0013 + 1200 * 0.0025) / 1000
|
|
assert api_manager.get_total_budget() == 10.0
|
|
|
|
@staticmethod
|
|
def test_set_total_budget():
|
|
"""Test if setting the total budget works correctly."""
|
|
total_budget = 10.0
|
|
api_manager.set_total_budget(total_budget)
|
|
|
|
assert api_manager.get_total_budget() == total_budget
|
|
|
|
@staticmethod
|
|
def test_update_cost_completion_model():
|
|
"""Test if updating the cost works correctly."""
|
|
prompt_tokens = 50
|
|
completion_tokens = 100
|
|
model = "gpt-3.5-turbo"
|
|
|
|
api_manager.update_cost(prompt_tokens, completion_tokens, model)
|
|
|
|
assert api_manager.get_total_prompt_tokens() == prompt_tokens
|
|
assert api_manager.get_total_completion_tokens() == completion_tokens
|
|
assert (
|
|
api_manager.get_total_cost()
|
|
== (prompt_tokens * 0.0013 + completion_tokens * 0.0025) / 1000
|
|
)
|
|
|
|
@staticmethod
|
|
def test_update_cost_embedding_model():
|
|
"""Test if updating the cost works correctly."""
|
|
prompt_tokens = 1337
|
|
model = "text-embedding-ada-002"
|
|
|
|
api_manager.update_cost(prompt_tokens, 0, model)
|
|
|
|
assert api_manager.get_total_prompt_tokens() == prompt_tokens
|
|
assert api_manager.get_total_completion_tokens() == 0
|
|
assert api_manager.get_total_cost() == (prompt_tokens * 0.0004) / 1000
|
|
|
|
@staticmethod
|
|
def test_get_models():
|
|
"""Test if getting models works correctly."""
|
|
with patch("openai.Model.list") as mock_list_models:
|
|
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
|
|
result = api_manager.get_models()
|
|
|
|
assert result[0]["id"] == "gpt-3.5-turbo"
|
|
assert api_manager.models[0]["id"] == "gpt-3.5-turbo"
|