mirror of
https://github.com/aljazceru/chatgpt-telegram-bot.git
synced 2025-12-19 05:35:20 +01:00
@@ -28,8 +28,8 @@ GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-turbo-preview")
|
|||||||
GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613")
|
GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613")
|
||||||
GPT_4_VISION_MODELS = ("gpt-4-vision-preview",)
|
GPT_4_VISION_MODELS = ("gpt-4-vision-preview",)
|
||||||
GPT_4_128K_MODELS = ("gpt-4-1106-preview","gpt-4-0125-preview","gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09")
|
GPT_4_128K_MODELS = ("gpt-4-1106-preview","gpt-4-0125-preview","gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09")
|
||||||
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS
|
GPT_4O_MODELS = ("gpt-4o",)
|
||||||
|
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS + GPT_4O_MODELS
|
||||||
|
|
||||||
def default_max_tokens(model: str) -> int:
|
def default_max_tokens(model: str) -> int:
|
||||||
"""
|
"""
|
||||||
@@ -42,7 +42,7 @@ def default_max_tokens(model: str) -> int:
|
|||||||
return base
|
return base
|
||||||
elif model in GPT_4_MODELS:
|
elif model in GPT_4_MODELS:
|
||||||
return base * 2
|
return base * 2
|
||||||
elif model in GPT_3_16K_MODELS:
|
elif model in GPT_3_16K_MODELS:
|
||||||
if model == "gpt-3.5-turbo-1106":
|
if model == "gpt-3.5-turbo-1106":
|
||||||
return 4096
|
return 4096
|
||||||
return base * 4
|
return base * 4
|
||||||
@@ -52,6 +52,8 @@ def default_max_tokens(model: str) -> int:
|
|||||||
return 4096
|
return 4096
|
||||||
elif model in GPT_4_128K_MODELS:
|
elif model in GPT_4_128K_MODELS:
|
||||||
return 4096
|
return 4096
|
||||||
|
elif model in GPT_4O_MODELS:
|
||||||
|
return 4096
|
||||||
|
|
||||||
|
|
||||||
def are_functions_available(model: str) -> bool:
|
def are_functions_available(model: str) -> bool:
|
||||||
@@ -634,6 +636,8 @@ class OpenAIHelper:
|
|||||||
return base * 31
|
return base * 31
|
||||||
if self.config['model'] in GPT_4_128K_MODELS:
|
if self.config['model'] in GPT_4_128K_MODELS:
|
||||||
return base * 31
|
return base * 31
|
||||||
|
if self.config['model'] in GPT_4O_MODELS:
|
||||||
|
return base * 31
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"Max tokens for model {self.config['model']} is not implemented yet."
|
f"Max tokens for model {self.config['model']} is not implemented yet."
|
||||||
)
|
)
|
||||||
@@ -654,7 +658,7 @@ class OpenAIHelper:
|
|||||||
if model in GPT_3_MODELS + GPT_3_16K_MODELS:
|
if model in GPT_3_MODELS + GPT_3_16K_MODELS:
|
||||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||||
elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS:
|
elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS + GPT_4O_MODELS:
|
||||||
tokens_per_message = 3
|
tokens_per_message = 3
|
||||||
tokens_per_name = 1
|
tokens_per_name = 1
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
python-dotenv~=1.0.0
|
python-dotenv~=1.0.0
|
||||||
pydub~=0.25.1
|
pydub~=0.25.1
|
||||||
tiktoken==0.5.1
|
tiktoken==0.7.0
|
||||||
openai==1.3.3
|
openai==1.3.3
|
||||||
python-telegram-bot==20.3
|
python-telegram-bot==20.3
|
||||||
requests~=2.31.0
|
requests~=2.31.0
|
||||||
|
|||||||
Reference in New Issue
Block a user