Files
Auto-GPT/autogpts/autogpt/tests/unit/test_config.py
Reinier van der Leer 39c46ef6be feat(agent/core): Add Anthropic Claude 3 support (#7085)
- feat(agent/core): Add `AnthropicProvider`
  - Add `ANTHROPIC_API_KEY` to .env.template and docs

  Notable differences in logic compared to `OpenAIProvider`:
  - Merges subsequent user messages in `AnthropicProvider._get_chat_completion_args`
  - Merges and extracts all system messages into `system` parameter in `AnthropicProvider._get_chat_completion_args`
  - Supports prefill; merges prefill content (if any) into generated response

- Prompt changes to improve compatibility with `AnthropicProvider`
  Anthropic has a slightly different API compared to OpenAI, and has much stricter input validation. E.g. Anthropic only supports a single `system` prompt, where OpenAI allows multiple `system` messages. Anthropic also forbids sequences of multiple `user` or `assistant` messages and requires that messages alternate between roles.
  - Move response format instruction from separate message into main system prompt
  - Fix clock message format
  - Add pre-fill to `OneShot` generated prompt

- refactor(agent/core): Tweak `model_providers.schema`
  - Simplify `ModelProviderUsage`
     - Remove attribute `total_tokens` as it is always equal to `prompt_tokens + completion_tokens`
     - Modify signature of `update_usage(..)`; no longer requires a full `ModelResponse` object as input
  - Improve `ModelProviderBudget`
     - Change type of attribute `usage` to `defaultdict[str, ModelProviderUsage]` -> allow per-model usage tracking
     - Modify signature of `update_usage_and_cost(..)`; no longer requires a full `ModelResponse` object as input
     - Allow `ModelProviderBudget` zero-argument instantiation
  - Fix type of `AssistantChatMessage.role` to match `ChatMessage.role` (str -> `ChatMessage.Role`)
  - Add shared attributes and constructor to `ModelProvider` base class
  - Add `max_output_tokens` parameter to `create_chat_completion` interface
  - Add pre-filling as a global feature
    - Add `prefill_response` field to `ChatPrompt` model
    - Add `prefill_response` parameter to `create_chat_completion` interface
  - Add `ChatModelProvider.get_available_models()` and remove `ApiManager`
  - Remove unused `OpenAIChatParser` typedef in openai.py
  - Remove redundant `budget` attribute definition on `OpenAISettings`
  - Remove unnecessary `usage` in `OpenAIProvider` > `default_settings` > `budget`

- feat(agent): Allow use of any available LLM provider through `MultiProvider`
  - Add `MultiProvider` (`model_providers.multi`)
  - Replace all references to / uses of `OpenAIProvider` with `MultiProvider`
  - Change type of `Config.smart_llm` and `Config.fast_llm` from `str` to `ModelName`

- feat(agent/core): Validate function call arguments in `create_chat_completion`
    - Add `validate_call` method to `CompletionModelFunction` in `model_providers.schema`
    - Add `validate_tool_calls` utility function in `model_providers.utils`
    - Add tool call validation step to `create_chat_completion` in `OpenAIProvider` and `AnthropicProvider`
    - Remove (now redundant) command argument validation logic in agent.py and models/command.py

- refactor(agent): Rename `get_openai_command_specs` to `function_specs_from_commands`
2024-05-04 20:33:25 +02:00

185 lines
5.8 KiB
Python

"""
Test cases for the config class, which handles the configuration settings
for the AI and ensures it behaves as a singleton.
"""
import asyncio
import os
from typing import Any
from unittest import mock
import pytest
from openai.pagination import AsyncPage
from openai.types import Model
from pydantic import SecretStr
from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, apply_overrides_to_config
from autogpt.config import Config, ConfigBuilder
from autogpt.core.resource.model_providers.schema import (
ChatModelInfo,
ModelProviderName,
)
def test_initial_values(config: Config) -> None:
"""
Test if the initial values of the config class attributes are set correctly.
"""
assert config.continuous_mode is False
assert config.tts_config.speak_mode is False
assert config.fast_llm.startswith("gpt-3.5-turbo")
assert config.smart_llm.startswith("gpt-4")
@pytest.mark.asyncio
@mock.patch("openai.resources.models.AsyncModels.list")
async def test_fallback_to_gpt3_if_gpt4_not_available(
mock_list_models: Any, config: Config
) -> None:
"""
Test if models update to gpt-3.5-turbo if gpt-4 is not available.
"""
config.fast_llm = GPT_4_MODEL
config.smart_llm = GPT_4_MODEL
mock_list_models.return_value = asyncio.Future()
mock_list_models.return_value.set_result(
AsyncPage(
data=[Model(id=GPT_3_MODEL, created=0, object="model", owned_by="AutoGPT")],
object="Models", # no idea what this should be, but irrelevant
)
)
await apply_overrides_to_config(
config=config,
gpt3only=False,
gpt4only=False,
)
assert config.fast_llm == GPT_3_MODEL
assert config.smart_llm == GPT_3_MODEL
def test_missing_azure_config(config: Config) -> None:
assert config.openai_credentials is not None
config_file = config.app_data_dir / "azure_config.yaml"
with pytest.raises(FileNotFoundError):
config.openai_credentials.load_azure_config(config_file)
config_file.write_text("")
with pytest.raises(ValueError):
config.openai_credentials.load_azure_config(config_file)
assert config.openai_credentials.api_type != "azure"
assert config.openai_credentials.api_version == ""
assert config.openai_credentials.azure_model_to_deploy_id_map is None
@pytest.fixture
def config_with_azure(config: Config):
config_file = config.app_data_dir / "azure_config.yaml"
config_file.write_text(
f"""
azure_api_type: azure
azure_api_version: 2023-06-01-preview
azure_endpoint: https://dummy.openai.azure.com
azure_model_map:
{config.fast_llm}: FAST-LLM_ID
{config.smart_llm}: SMART-LLM_ID
{config.embedding_model}: embedding-deployment-id-for-azure
"""
)
os.environ["USE_AZURE"] = "True"
os.environ["AZURE_CONFIG_FILE"] = str(config_file)
config_with_azure = ConfigBuilder.build_config_from_env(
project_root=config.project_root
)
yield config_with_azure
del os.environ["USE_AZURE"]
del os.environ["AZURE_CONFIG_FILE"]
def test_azure_config(config_with_azure: Config) -> None:
assert (credentials := config_with_azure.openai_credentials) is not None
assert credentials.api_type == "azure"
assert credentials.api_version == "2023-06-01-preview"
assert credentials.azure_endpoint == SecretStr("https://dummy.openai.azure.com")
assert credentials.azure_model_to_deploy_id_map == {
config_with_azure.fast_llm: "FAST-LLM_ID",
config_with_azure.smart_llm: "SMART-LLM_ID",
config_with_azure.embedding_model: "embedding-deployment-id-for-azure",
}
fast_llm = config_with_azure.fast_llm
smart_llm = config_with_azure.smart_llm
assert (
credentials.get_model_access_kwargs(config_with_azure.fast_llm)["model"]
== "FAST-LLM_ID"
)
assert (
credentials.get_model_access_kwargs(config_with_azure.smart_llm)["model"]
== "SMART-LLM_ID"
)
# Emulate --gpt4only
config_with_azure.fast_llm = smart_llm
assert (
credentials.get_model_access_kwargs(config_with_azure.fast_llm)["model"]
== "SMART-LLM_ID"
)
assert (
credentials.get_model_access_kwargs(config_with_azure.smart_llm)["model"]
== "SMART-LLM_ID"
)
# Emulate --gpt3only
config_with_azure.fast_llm = config_with_azure.smart_llm = fast_llm
assert (
credentials.get_model_access_kwargs(config_with_azure.fast_llm)["model"]
== "FAST-LLM_ID"
)
assert (
credentials.get_model_access_kwargs(config_with_azure.smart_llm)["model"]
== "FAST-LLM_ID"
)
@pytest.mark.asyncio
async def test_create_config_gpt4only(config: Config) -> None:
with mock.patch(
"autogpt.core.resource.model_providers.multi.MultiProvider.get_available_models"
) as mock_get_models:
mock_get_models.return_value = [
ChatModelInfo(
name=GPT_4_MODEL,
provider_name=ModelProviderName.OPENAI,
max_tokens=4096,
)
]
await apply_overrides_to_config(
config=config,
gpt4only=True,
)
assert config.fast_llm == GPT_4_MODEL
assert config.smart_llm == GPT_4_MODEL
@pytest.mark.asyncio
async def test_create_config_gpt3only(config: Config) -> None:
with mock.patch(
"autogpt.core.resource.model_providers.multi.MultiProvider.get_available_models"
) as mock_get_models:
mock_get_models.return_value = [
ChatModelInfo(
name=GPT_3_MODEL,
provider_name=ModelProviderName.OPENAI,
max_tokens=4096,
)
]
await apply_overrides_to_config(
config=config,
gpt3only=True,
)
assert config.fast_llm == GPT_3_MODEL
assert config.smart_llm == GPT_3_MODEL