Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com>
This commit is contained in:
Konrad
2023-05-20 19:45:27 -04:00
committed by GitHub
parent c30f5b7d5e
commit 57ea7b5216
7 changed files with 122 additions and 3 deletions

View File

@@ -1,9 +1,12 @@
"""Configurator module.""" """Configurator module."""
from __future__ import annotations
import click import click
from colorama import Back, Fore, Style from colorama import Back, Fore, Style
from autogpt import utils from autogpt import utils
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm.llm_utils import check_model
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory import get_supported_memory_backends from autogpt.memory import get_supported_memory_backends
@@ -45,6 +48,8 @@ def create_config(
CFG.set_debug_mode(False) CFG.set_debug_mode(False)
CFG.set_continuous_mode(False) CFG.set_continuous_mode(False)
CFG.set_speak_mode(False) CFG.set_speak_mode(False)
CFG.set_fast_llm_model(check_model(CFG.fast_llm_model, "fast_llm_model"))
CFG.set_smart_llm_model(check_model(CFG.smart_llm_model, "smart_llm_model"))
if debug: if debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")

View File

@@ -1,6 +1,9 @@
from __future__ import annotations from __future__ import annotations
from typing import List, Optional
import openai import openai
from openai import Model
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm.modelsinfo import COSTS from autogpt.llm.modelsinfo import COSTS
@@ -14,12 +17,14 @@ class ApiManager(metaclass=Singleton):
self.total_completion_tokens = 0 self.total_completion_tokens = 0
self.total_cost = 0 self.total_cost = 0
self.total_budget = 0 self.total_budget = 0
self.models: Optional[list[Model]] = None
def reset(self): def reset(self):
self.total_prompt_tokens = 0 self.total_prompt_tokens = 0
self.total_completion_tokens = 0 self.total_completion_tokens = 0
self.total_cost = 0 self.total_cost = 0
self.total_budget = 0.0 self.total_budget = 0.0
self.models = None
def create_chat_completion( def create_chat_completion(
self, self,
@@ -127,3 +132,17 @@ class ApiManager(metaclass=Singleton):
float: The total budget for API calls. float: The total budget for API calls.
""" """
return self.total_budget return self.total_budget
def get_models(self) -> List[Model]:
"""
Get list of available GPT models.
Returns:
list: List of available GPT models.
"""
if self.models is None:
all_models = openai.Model.list()["data"]
self.models = [model for model in all_models if "gpt" in model["id"]]
return self.models

View File

@@ -3,7 +3,7 @@ from __future__ import annotations
import functools import functools
import time import time
from itertools import islice from itertools import islice
from typing import List, Optional from typing import List, Literal, Optional
import numpy as np import numpy as np
import openai import openai
@@ -293,3 +293,22 @@ def create_embedding(
) # normalize the length to one ) # normalize the length to one
chunk_embeddings = chunk_embeddings.tolist() chunk_embeddings = chunk_embeddings.tolist()
return chunk_embeddings return chunk_embeddings
def check_model(
model_name: str, model_type: Literal["smart_llm_model", "fast_llm_model"]
) -> str:
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
api_manager = ApiManager()
models = api_manager.get_models()
if any(model_name in m["id"] for m in models):
return model_name
logger.typewriter_log(
"WARNING: ",
Fore.YELLOW,
f"You do not have access to {model_name}. Setting {model_type} to "
f"gpt-3.5-turbo.",
)
return "gpt-3.5-turbo"

View File

@@ -47,6 +47,7 @@ def run_auto_gpt(
cfg = Config() cfg = Config()
# TODO: fill in llm values here # TODO: fill in llm values here
check_openai_api_key() check_openai_api_key()
create_config( create_config(
continuous, continuous,
continuous_limit, continuous_limit,

View File

@@ -118,3 +118,13 @@ class TestApiManager:
assert api_manager.get_total_prompt_tokens() == 50 assert api_manager.get_total_prompt_tokens() == 50
assert api_manager.get_total_completion_tokens() == 100 assert api_manager.get_total_completion_tokens() == 100
assert api_manager.get_total_cost() == (50 * 0.002 + 100 * 0.002) / 1000 assert api_manager.get_total_cost() == (50 * 0.002 + 100 * 0.002) / 1000
@staticmethod
def test_get_models():
"""Test if getting models works correctly."""
with patch("openai.Model.list") as mock_list_models:
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
result = api_manager.get_models()
assert result[0]["id"] == "gpt-3.5-turbo"
assert api_manager.models[0]["id"] == "gpt-3.5-turbo"

View File

@@ -2,10 +2,11 @@
Test cases for the Config class, which handles the configuration settings Test cases for the Config class, which handles the configuration settings
for the AI and ensures it behaves as a singleton. for the AI and ensures it behaves as a singleton.
""" """
from unittest.mock import patch
import pytest from openai import InvalidRequestError
from autogpt.config import Config from autogpt.configurator import create_config
def test_initial_values(config): def test_initial_values(config):
@@ -117,3 +118,40 @@ def test_set_debug_mode(config):
# Reset debug mode # Reset debug mode
config.set_debug_mode(debug_mode) config.set_debug_mode(debug_mode)
@patch("openai.Model.list")
def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config):
"""
Test if models update to gpt-3.5-turbo if both are set to gpt-4.
"""
fast_llm_model = config.fast_llm_model
smart_llm_model = config.smart_llm_model
config.fast_llm_model = "gpt-4"
config.smart_llm_model = "gpt-4"
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
create_config(
continuous=False,
continuous_limit=False,
ai_settings_file="",
prompt_settings_file="",
skip_reprompt=False,
speak=False,
debug=False,
gpt3only=False,
gpt4only=False,
memory_type="",
browser_name="",
allow_downloads=False,
skip_news=False,
)
assert config.fast_llm_model == "gpt-3.5-turbo"
assert config.smart_llm_model == "gpt-3.5-turbo"
# Reset config
config.set_fast_llm_model(fast_llm_model)
config.set_smart_llm_model(smart_llm_model)

View File

@@ -1,7 +1,11 @@
from unittest.mock import patch
import pytest import pytest
from openai import InvalidRequestError
from openai.error import APIError, RateLimitError from openai.error import APIError, RateLimitError
from autogpt.llm import llm_utils from autogpt.llm import llm_utils
from autogpt.llm.llm_utils import check_model
@pytest.fixture(params=[RateLimitError, APIError]) @pytest.fixture(params=[RateLimitError, APIError])
@@ -131,3 +135,26 @@ def test_chunked_tokens():
] ]
output = list(llm_utils.chunked_tokens(text, "cl100k_base", 8191)) output = list(llm_utils.chunked_tokens(text, "cl100k_base", 8191))
assert output == expected_output assert output == expected_output
def test_check_model(api_manager):
"""
Test if check_model() returns original model when valid.
Test if check_model() returns gpt-3.5-turbo when model is invalid.
"""
with patch("openai.Model.list") as mock_list_models:
# Test when correct model is returned
mock_list_models.return_value = {"data": [{"id": "gpt-4"}]}
result = check_model("gpt-4", "smart_llm_model")
assert result == "gpt-4"
# Reset api manager models
api_manager.models = None
# Test when incorrect model is returned
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
result = check_model("gpt-4", "fast_llm_model")
assert result == "gpt-3.5-turbo"
# Reset api manager models
api_manager.models = None