mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-03 14:24:24 +01:00
Co-authored-by: Reinier van der Leer <github@pwuts.nl> Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: Nicholas Tindle <nicktindle@outlook.com> Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> Co-authored-by: merwanehamadi <merwanehamadi@gmail.com> Co-authored-by: Merwane Hamadi <merwanehamadi@gmail.com> Co-authored-by: Richard Beales <rich@richbeales.net> Co-authored-by: Luke K <2609441+lc0rp@users.noreply.github.com> Co-authored-by: Luke K (pr-0f3t) <2609441+lc0rp@users.noreply.github.com> Co-authored-by: Erik Peterson <e@eriklp.com> Co-authored-by: Auto-GPT-Bot <github-bot@agpt.co> Co-authored-by: Benny van der Lans <49377421+bfalans@users.noreply.github.com> Co-authored-by: Jan <jan-github@phobia.de> Co-authored-by: Robin Richtsfeld <robin.richtsfeld@gmail.com> Co-authored-by: Marc Bornträger <marc.borntraeger@gmail.com> Co-authored-by: Stefan Ayala <stefanayala3266@gmail.com> Co-authored-by: javableu <45064273+javableu@users.noreply.github.com> Co-authored-by: DGdev91 <DGdev91@users.noreply.github.com> Co-authored-by: Kinance <kinance@gmail.com> Co-authored-by: digger yu <digger-yu@outlook.com> Co-authored-by: David <scenaristeur@gmail.com> Co-authored-by: gravelBridge <john.tian31@gmail.com> Fix Python CI "update cassettes" step (#4591) fix CI (#4596) Fix inverted logic for deny_command (#4563) fix current_score.json generation (#4601) Fix duckduckgo rate limiting (#4592) Fix debug code challenge (#4632) Fix issues with information retrieval challenge a (#4622) fix issues with env configuration and .env.template (#4630) Fix prompt issue causing 'No Command' issues and challenge to fail (#4623) Fix benchmark logs (#4653) Fix typo in docs/setup.md (#4613) Fix run.sh shebang (#4561) Fix autogpt docker image not working because missing prompt_settings (#4680) Fix execute_command coming from plugins (#4730)
204 lines
6.0 KiB
Python
204 lines
6.0 KiB
Python
"""
|
|
Test cases for the Config class, which handles the configuration settings
|
|
for the AI and ensures it behaves as a singleton.
|
|
"""
|
|
from unittest import mock
|
|
from unittest.mock import patch
|
|
|
|
import pytest
|
|
|
|
from autogpt.config.config import Config
|
|
from autogpt.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
|
|
from autogpt.workspace.workspace import Workspace
|
|
|
|
|
|
def test_initial_values(config: Config):
|
|
"""
|
|
Test if the initial values of the Config class attributes are set correctly.
|
|
"""
|
|
assert config.debug_mode == False
|
|
assert config.continuous_mode == False
|
|
assert config.speak_mode == False
|
|
assert config.fast_llm_model == "gpt-3.5-turbo"
|
|
assert config.smart_llm_model == "gpt-3.5-turbo"
|
|
|
|
|
|
def test_set_continuous_mode(config: Config):
|
|
"""
|
|
Test if the set_continuous_mode() method updates the continuous_mode attribute.
|
|
"""
|
|
# Store continuous mode to reset it after the test
|
|
continuous_mode = config.continuous_mode
|
|
|
|
config.set_continuous_mode(True)
|
|
assert config.continuous_mode == True
|
|
|
|
# Reset continuous mode
|
|
config.set_continuous_mode(continuous_mode)
|
|
|
|
|
|
def test_set_speak_mode(config: Config):
|
|
"""
|
|
Test if the set_speak_mode() method updates the speak_mode attribute.
|
|
"""
|
|
# Store speak mode to reset it after the test
|
|
speak_mode = config.speak_mode
|
|
|
|
config.set_speak_mode(True)
|
|
assert config.speak_mode == True
|
|
|
|
# Reset speak mode
|
|
config.set_speak_mode(speak_mode)
|
|
|
|
|
|
def test_set_fast_llm_model(config: Config):
|
|
"""
|
|
Test if the set_fast_llm_model() method updates the fast_llm_model attribute.
|
|
"""
|
|
# Store model name to reset it after the test
|
|
fast_llm_model = config.fast_llm_model
|
|
|
|
config.set_fast_llm_model("gpt-3.5-turbo-test")
|
|
assert config.fast_llm_model == "gpt-3.5-turbo-test"
|
|
|
|
# Reset model name
|
|
config.set_fast_llm_model(fast_llm_model)
|
|
|
|
|
|
def test_set_smart_llm_model(config: Config):
|
|
"""
|
|
Test if the set_smart_llm_model() method updates the smart_llm_model attribute.
|
|
"""
|
|
# Store model name to reset it after the test
|
|
smart_llm_model = config.smart_llm_model
|
|
|
|
config.set_smart_llm_model("gpt-4-test")
|
|
assert config.smart_llm_model == "gpt-4-test"
|
|
|
|
# Reset model name
|
|
config.set_smart_llm_model(smart_llm_model)
|
|
|
|
|
|
def test_set_debug_mode(config: Config):
|
|
"""
|
|
Test if the set_debug_mode() method updates the debug_mode attribute.
|
|
"""
|
|
# Store debug mode to reset it after the test
|
|
debug_mode = config.debug_mode
|
|
|
|
config.set_debug_mode(True)
|
|
assert config.debug_mode == True
|
|
|
|
# Reset debug mode
|
|
config.set_debug_mode(debug_mode)
|
|
|
|
|
|
@patch("openai.Model.list")
|
|
def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config):
|
|
"""
|
|
Test if models update to gpt-3.5-turbo if both are set to gpt-4.
|
|
"""
|
|
fast_llm_model = config.fast_llm_model
|
|
smart_llm_model = config.smart_llm_model
|
|
|
|
config.fast_llm_model = "gpt-4"
|
|
config.smart_llm_model = "gpt-4"
|
|
|
|
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
|
|
|
|
create_config(
|
|
config=config,
|
|
continuous=False,
|
|
continuous_limit=False,
|
|
ai_settings_file="",
|
|
prompt_settings_file="",
|
|
skip_reprompt=False,
|
|
speak=False,
|
|
debug=False,
|
|
gpt3only=False,
|
|
gpt4only=False,
|
|
memory_type="",
|
|
browser_name="",
|
|
allow_downloads=False,
|
|
skip_news=False,
|
|
)
|
|
|
|
assert config.fast_llm_model == "gpt-3.5-turbo"
|
|
assert config.smart_llm_model == "gpt-3.5-turbo"
|
|
|
|
# Reset config
|
|
config.set_fast_llm_model(fast_llm_model)
|
|
config.set_smart_llm_model(smart_llm_model)
|
|
|
|
|
|
def test_missing_azure_config(config: Config, workspace: Workspace):
|
|
config_file = workspace.get_path("azure_config.yaml")
|
|
with pytest.raises(FileNotFoundError):
|
|
config.load_azure_config(str(config_file))
|
|
|
|
config_file.write_text("")
|
|
config.load_azure_config(str(config_file))
|
|
|
|
assert config.openai_api_type == "azure"
|
|
assert config.openai_api_base == ""
|
|
assert config.openai_api_version == "2023-03-15-preview"
|
|
assert config.azure_model_to_deployment_id_map == {}
|
|
|
|
|
|
def test_create_config_gpt4only(config: Config) -> None:
|
|
fast_llm_model = config.fast_llm_model
|
|
smart_llm_model = config.smart_llm_model
|
|
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
|
|
mock_get_models.return_value = [{"id": GPT_4_MODEL}]
|
|
create_config(
|
|
config=config,
|
|
continuous=False,
|
|
continuous_limit=None,
|
|
ai_settings_file=None,
|
|
prompt_settings_file=None,
|
|
skip_reprompt=False,
|
|
speak=False,
|
|
debug=False,
|
|
gpt3only=False,
|
|
gpt4only=True,
|
|
memory_type=None,
|
|
browser_name=None,
|
|
allow_downloads=False,
|
|
skip_news=False,
|
|
)
|
|
assert config.fast_llm_model == GPT_4_MODEL
|
|
assert config.smart_llm_model == GPT_4_MODEL
|
|
|
|
# Reset config
|
|
config.set_fast_llm_model(fast_llm_model)
|
|
config.set_smart_llm_model(smart_llm_model)
|
|
|
|
|
|
def test_create_config_gpt3only(config: Config) -> None:
|
|
fast_llm_model = config.fast_llm_model
|
|
smart_llm_model = config.smart_llm_model
|
|
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
|
|
mock_get_models.return_value = [{"id": GPT_3_MODEL}]
|
|
create_config(
|
|
config=config,
|
|
continuous=False,
|
|
continuous_limit=None,
|
|
ai_settings_file=None,
|
|
prompt_settings_file=None,
|
|
skip_reprompt=False,
|
|
speak=False,
|
|
debug=False,
|
|
gpt3only=True,
|
|
gpt4only=False,
|
|
memory_type=None,
|
|
browser_name=None,
|
|
allow_downloads=False,
|
|
skip_news=False,
|
|
)
|
|
assert config.fast_llm_model == GPT_3_MODEL
|
|
assert config.smart_llm_model == GPT_3_MODEL
|
|
|
|
# Reset config
|
|
config.set_fast_llm_model(fast_llm_model)
|
|
config.set_smart_llm_model(smart_llm_model)
|