mirror of
https://github.com/aljazceru/chatgpt-telegram-bot.git
synced 2025-12-21 06:35:02 +01:00
added 128k models
This commit is contained in:
@@ -22,7 +22,8 @@ GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613")
|
|||||||
GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613")
|
GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613")
|
||||||
GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613")
|
GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613")
|
||||||
GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613")
|
GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613")
|
||||||
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS
|
GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-vision-preview")
|
||||||
|
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS
|
||||||
|
|
||||||
|
|
||||||
def default_max_tokens(model: str) -> int:
|
def default_max_tokens(model: str) -> int:
|
||||||
@@ -40,6 +41,8 @@ def default_max_tokens(model: str) -> int:
|
|||||||
return base * 4
|
return base * 4
|
||||||
elif model in GPT_4_32K_MODELS:
|
elif model in GPT_4_32K_MODELS:
|
||||||
return base * 8
|
return base * 8
|
||||||
|
elif model in GPT_4_128K_MODELS:
|
||||||
|
return base * 100
|
||||||
|
|
||||||
|
|
||||||
def are_functions_available(model: str) -> bool:
|
def are_functions_available(model: str) -> bool:
|
||||||
@@ -50,7 +53,7 @@ def are_functions_available(model: str) -> bool:
|
|||||||
if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"):
|
if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"):
|
||||||
return False
|
return False
|
||||||
# Stable models will be updated to support functions on June 27, 2023
|
# Stable models will be updated to support functions on June 27, 2023
|
||||||
if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k"):
|
if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview", "gpt-4-vision-preview"):
|
||||||
return datetime.date.today() > datetime.date(2023, 6, 27)
|
return datetime.date.today() > datetime.date(2023, 6, 27)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -411,6 +414,8 @@ class OpenAIHelper:
|
|||||||
return base * 2
|
return base * 2
|
||||||
if self.config['model'] in GPT_4_32K_MODELS:
|
if self.config['model'] in GPT_4_32K_MODELS:
|
||||||
return base * 8
|
return base * 8
|
||||||
|
if self.config['model'] in GPT_4_128K_MODELS:
|
||||||
|
return base * 100
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"Max tokens for model {self.config['model']} is not implemented yet."
|
f"Max tokens for model {self.config['model']} is not implemented yet."
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user