Fix #942: Respect --gpt4only, --gpt3only cli args (#3144)

Co-authored-by: Luke K <KayLuke@users.noreply.github.com>
Co-authored-by: Nicholas Tindle <nick@ntindle.com>
Co-authored-by: Reinier van der Leer <github@pwuts.nl>
Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com>
This commit is contained in:
Luke K (pr-0f3t)
2023-05-29 10:10:51 -07:00
committed by GitHub
parent 1ddf2324ff
commit 1446ffddb0
4 changed files with 104 additions and 23 deletions

View File

@@ -128,11 +128,13 @@ class Agent:
# Send message to AI, get response
with Spinner("Thinking... ", plain_output=cfg.plain_output):
assistant_reply = chat_with_ai(
cfg,
self,
self.system_prompt,
self.triggering_prompt,
cfg.fast_token_limit,
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
cfg.fast_llm_model,
)
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
for plugin in cfg.plugins:

View File

@@ -14,6 +14,9 @@ from autogpt.memory.vector import get_supported_memory_backends
if TYPE_CHECKING:
from autogpt.config import Config
GPT_4_MODEL = "gpt-4"
GPT_3_MODEL = "gpt-3.5-turbo"
def create_config(
config: Config,
@@ -51,8 +54,6 @@ def create_config(
config.set_debug_mode(False)
config.set_continuous_mode(False)
config.set_speak_mode(False)
config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model"))
config.set_smart_llm_model(check_model(config.smart_llm_model, "smart_llm_model"))
if debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
@@ -83,13 +84,26 @@ def create_config(
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
config.set_speak_mode(True)
# Set the default LLM models
if gpt3only:
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
config.set_smart_llm_model(config.fast_llm_model)
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config
config.set_fast_llm_model(GPT_3_MODEL)
config.set_smart_llm_model(GPT_3_MODEL)
if gpt4only:
elif (
gpt4only
and check_model(GPT_4_MODEL, model_type="smart_llm_model") == GPT_4_MODEL
):
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
config.set_fast_llm_model(config.smart_llm_model)
# --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config
config.set_fast_llm_model(GPT_4_MODEL)
config.set_smart_llm_model(GPT_4_MODEL)
else:
config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model"))
config.set_smart_llm_model(
check_model(config.smart_llm_model, "smart_llm_model")
)
if memory_type:
supported_memory = get_supported_memory_backends()

View File

@@ -13,29 +13,34 @@ from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME
from autogpt.logs import logger
cfg = Config()
# TODO: Change debug from hardcode to argument
def chat_with_ai(
config: Config,
agent: Agent,
system_prompt: str,
user_input: str,
token_limit: int,
model: str | None = None,
):
"""
Interact with the OpenAI API, sending the prompt, user input,
message history, and permanent memory.
Args:
config (Config): The config to use.
agent (Agent): The agent to use.
system_prompt (str): The prompt explaining the rules to the AI.
user_input (str): The input from the user.
token_limit (int): The maximum number of tokens allowed in the API call.
model (str, optional): The model to use. If None, the config.fast_llm_model will be used. Defaults to None.
Returns:
str: The AI's response.
"""
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
if model is None:
model = config.fast_llm_model
# Reserve 1000 tokens for the response
logger.debug(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
@@ -140,8 +145,8 @@ def chat_with_ai(
# Append user input, the length of this is accounted for above
message_sequence.append(user_input_msg)
plugin_count = len(cfg.plugins)
for i, plugin in enumerate(cfg.plugins):
plugin_count = len(config.plugins)
for i, plugin in enumerate(config.plugins):
if not plugin.can_handle_on_planning():
continue
plugin_response = plugin.on_planning(
@@ -157,7 +162,6 @@ def chat_with_ai(
logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
break
message_sequence.add("system", plugin_response)
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
# assert tokens_remaining >= 0, "Tokens remaining is negative.

View File

@@ -2,14 +2,17 @@
Test cases for the Config class, which handles the configuration settings
for the AI and ensures it behaves as a singleton.
"""
from unittest import mock
from unittest.mock import patch
import pytest
from autogpt.configurator import create_config
from autogpt.config.config import Config
from autogpt.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
from autogpt.workspace.workspace import Workspace
def test_initial_values(config):
def test_initial_values(config: Config):
"""
Test if the initial values of the Config class attributes are set correctly.
"""
@@ -22,7 +25,7 @@ def test_initial_values(config):
assert config.smart_token_limit == 8000
def test_set_continuous_mode(config):
def test_set_continuous_mode(config: Config):
"""
Test if the set_continuous_mode() method updates the continuous_mode attribute.
"""
@@ -36,7 +39,7 @@ def test_set_continuous_mode(config):
config.set_continuous_mode(continuous_mode)
def test_set_speak_mode(config):
def test_set_speak_mode(config: Config):
"""
Test if the set_speak_mode() method updates the speak_mode attribute.
"""
@@ -50,7 +53,7 @@ def test_set_speak_mode(config):
config.set_speak_mode(speak_mode)
def test_set_fast_llm_model(config):
def test_set_fast_llm_model(config: Config):
"""
Test if the set_fast_llm_model() method updates the fast_llm_model attribute.
"""
@@ -64,7 +67,7 @@ def test_set_fast_llm_model(config):
config.set_fast_llm_model(fast_llm_model)
def test_set_smart_llm_model(config):
def test_set_smart_llm_model(config: Config):
"""
Test if the set_smart_llm_model() method updates the smart_llm_model attribute.
"""
@@ -78,7 +81,7 @@ def test_set_smart_llm_model(config):
config.set_smart_llm_model(smart_llm_model)
def test_set_fast_token_limit(config):
def test_set_fast_token_limit(config: Config):
"""
Test if the set_fast_token_limit() method updates the fast_token_limit attribute.
"""
@@ -92,7 +95,7 @@ def test_set_fast_token_limit(config):
config.set_fast_token_limit(fast_token_limit)
def test_set_smart_token_limit(config):
def test_set_smart_token_limit(config: Config):
"""
Test if the set_smart_token_limit() method updates the smart_token_limit attribute.
"""
@@ -106,7 +109,7 @@ def test_set_smart_token_limit(config):
config.set_smart_token_limit(smart_token_limit)
def test_set_debug_mode(config):
def test_set_debug_mode(config: Config):
"""
Test if the set_debug_mode() method updates the debug_mode attribute.
"""
@@ -121,7 +124,7 @@ def test_set_debug_mode(config):
@patch("openai.Model.list")
def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config):
def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config):
"""
Test if models update to gpt-3.5-turbo if both are set to gpt-4.
"""
@@ -158,7 +161,7 @@ def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config):
config.set_smart_llm_model(smart_llm_model)
def test_missing_azure_config(config, workspace):
def test_missing_azure_config(config: Config, workspace: Workspace):
config_file = workspace.get_path("azure_config.yaml")
with pytest.raises(FileNotFoundError):
config.load_azure_config(str(config_file))
@@ -170,3 +173,61 @@ def test_missing_azure_config(config, workspace):
assert config.openai_api_base == ""
assert config.openai_api_version == "2023-03-15-preview"
assert config.azure_model_to_deployment_id_map == {}
def test_create_config_gpt4only(config: Config) -> None:
fast_llm_model = config.fast_llm_model
smart_llm_model = config.smart_llm_model
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
mock_get_models.return_value = [{"id": GPT_4_MODEL}]
create_config(
config=config,
continuous=False,
continuous_limit=None,
ai_settings_file=None,
prompt_settings_file=None,
skip_reprompt=False,
speak=False,
debug=False,
gpt3only=False,
gpt4only=True,
memory_type=None,
browser_name=None,
allow_downloads=False,
skip_news=False,
)
assert config.fast_llm_model == GPT_4_MODEL
assert config.smart_llm_model == GPT_4_MODEL
# Reset config
config.set_fast_llm_model(fast_llm_model)
config.set_smart_llm_model(smart_llm_model)
def test_create_config_gpt3only(config: Config) -> None:
fast_llm_model = config.fast_llm_model
smart_llm_model = config.smart_llm_model
with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models:
mock_get_models.return_value = [{"id": GPT_3_MODEL}]
create_config(
config=config,
continuous=False,
continuous_limit=None,
ai_settings_file=None,
prompt_settings_file=None,
skip_reprompt=False,
speak=False,
debug=False,
gpt3only=True,
gpt4only=False,
memory_type=None,
browser_name=None,
allow_downloads=False,
skip_news=False,
)
assert config.fast_llm_model == GPT_3_MODEL
assert config.smart_llm_model == GPT_3_MODEL
# Reset config
config.set_fast_llm_model(fast_llm_model)
config.set_smart_llm_model(smart_llm_model)