mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-27 10:54:35 +01:00
Fixing circular imports
This commit is contained in:
@@ -5,7 +5,6 @@ from typing import List, Optional
|
||||
import openai
|
||||
from openai import Model
|
||||
from autogpt.llm.base import CompletionModelInfo
|
||||
from autogpt.llm.providers.openai import OPEN_AI_MODELS
|
||||
from autogpt.logs import logger
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
@@ -35,6 +34,7 @@ class ApiManager(metaclass=Singleton):
|
||||
model (str): The model used for the API call.
|
||||
"""
|
||||
# the .model property in API responses can contain version suffixes like -v2
|
||||
from autogpt.llm.providers.openai import OPEN_AI_MODELS
|
||||
model = model[:-3] if model.endswith("-v2") else model
|
||||
model_info = OPEN_AI_MODELS[model]
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError, Timeout
|
||||
from openai.openai_object import OpenAIObject
|
||||
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import (
|
||||
ChatModelInfo,
|
||||
EmbeddingModelInfo,
|
||||
@@ -111,6 +110,7 @@ OPEN_AI_MODELS: dict[str, ChatModelInfo | EmbeddingModelInfo | TextModelInfo] =
|
||||
|
||||
def meter_api(func):
|
||||
"""Adds ApiManager metering to functions which make OpenAI API calls"""
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
api_manager = ApiManager()
|
||||
|
||||
openai_obj_processor = openai.util.convert_to_openai_object
|
||||
|
||||
@@ -2,7 +2,7 @@ from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.llm.api_manager import COSTS, ApiManager
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.providers import openai
|
||||
|
||||
api_manager = ApiManager()
|
||||
@@ -14,19 +14,6 @@ def reset_api_manager():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_costs():
|
||||
with patch.dict(
|
||||
COSTS,
|
||||
{
|
||||
"gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0},
|
||||
},
|
||||
clear=True,
|
||||
):
|
||||
yield
|
||||
|
||||
|
||||
class TestProviderOpenAI:
|
||||
@staticmethod
|
||||
def test_create_chat_completion_debug_mode(caplog):
|
||||
|
||||
@@ -3,7 +3,8 @@ from unittest.mock import MagicMock, patch
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.llm.api_manager import OPEN_AI_MODELS, ApiManager
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.providers.openai import OPEN_AI_MODELS
|
||||
|
||||
api_manager = ApiManager()
|
||||
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.app import list_agents, start_agent
|
||||
|
||||
|
||||
def test_make_agent(agent: Agent, mocker: MockerFixture) -> None:
|
||||
"""Test that an agent can be created"""
|
||||
mock = mocker.patch("openai.ChatCompletion.create")
|
||||
|
||||
response = MagicMock()
|
||||
response.choices[0].message.content = "Test message"
|
||||
response.usage.prompt_tokens = 1
|
||||
response.usage.completion_tokens = 1
|
||||
del response.error
|
||||
|
||||
mock.return_value = response
|
||||
start_agent("Test Agent", "chat", "Hello, how are you?", agent, "gpt-3.5-turbo")
|
||||
agents = list_agents(agent)
|
||||
assert "List of agents:\n0: chat" == agents
|
||||
start_agent("Test Agent 2", "write", "Hello, how are you?", agent, "gpt-3.5-turbo")
|
||||
agents = list_agents(agent.config)
|
||||
assert "List of agents:\n0: chat\n1: write" == agents
|
||||
Reference in New Issue
Block a user