Files
Auto-GPT/tests/integration/test_provider_openai.py
James Collins 6e6e7fcc9a Extract openai API calls and retry at lowest level (#3696)
* Extract open ai api calls and retry at lowest level

* Forgot a test

* Gotta fix my local docker config so I can let pre-commit hooks run, ugh

* fix: merge artiface

* Fix linting

* Update memory.vector.utils

* feat: make sure resp exists

* fix: raise error message if created

* feat: rename file

* fix: partial test fix

* fix: update comments

* fix: linting

* fix: remove broken test

* fix: require a model to exist

* fix: BaseError issue

* fix: runtime error

* Fix mock response in test_make_agent

* add 429 as errors to retry

---------

Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com>
Co-authored-by: Nicholas Tindle <nick@ntindle.com>
Co-authored-by: Reinier van der Leer <github@pwuts.nl>
Co-authored-by: Nicholas Tindle <nicktindle@outlook.com>
Co-authored-by: Luke K (pr-0f3t) <2609441+lc0rp@users.noreply.github.com>
Co-authored-by: Merwane Hamadi <merwanehamadi@gmail.com>
2023-06-14 07:59:26 -07:00

68 lines
2.0 KiB
Python

from unittest.mock import MagicMock, patch
import pytest
from autogpt.llm.api_manager import COSTS, ApiManager
from autogpt.llm.providers import openai
api_manager = ApiManager()
@pytest.fixture(autouse=True)
def reset_api_manager():
api_manager.reset()
yield
@pytest.fixture(autouse=True)
def mock_costs():
with patch.dict(
COSTS,
{
"gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0},
},
clear=True,
):
yield
class TestProviderOpenAI:
@staticmethod
def test_create_chat_completion_debug_mode(caplog):
"""Test if debug mode logs response."""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
]
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 10
mock_response.usage.completion_tokens = 20
mock_create.return_value = mock_response
openai.create_chat_completion(messages, model=model)
assert "Response" in caplog.text
@staticmethod
def test_create_chat_completion_empty_messages():
"""Test if empty messages result in zero tokens and cost."""
messages = []
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 0
mock_response.usage.completion_tokens = 0
mock_create.return_value = mock_response
openai.create_chat_completion(messages, model=model)
assert api_manager.get_total_prompt_tokens() == 0
assert api_manager.get_total_completion_tokens() == 0
assert api_manager.get_total_cost() == 0