fixed max tokens for gpt-3.5-turbo-1106

This commit is contained in:
AlexHTW
2023-11-07 22:28:23 +01:00
parent cd287c8fe0
commit 1e835d7747

View File

@@ -34,12 +34,12 @@ def default_max_tokens(model: str) -> int:
"""
base = 1200
if model in GPT_3_MODELS:
if model == "gpt-3.5-turbo-0301":
return 4096
return base
elif model in GPT_4_MODELS:
return base * 2
elif model in GPT_3_16K_MODELS:
elif model in GPT_3_16K_MODELS:
if model == "gpt-3.5-turbo-0301":
return 4096
return base * 4
elif model in GPT_4_32K_MODELS:
return base * 8