mirror of
https://github.com/aljazceru/chatgpt-telegram-bot.git
synced 2025-12-20 22:24:57 +01:00
update gpt-4 token counting
This commit is contained in:
@@ -209,15 +209,21 @@ class OpenAIHelper:
|
|||||||
encoding = tiktoken.encoding_for_model(model)
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
encoding = tiktoken.get_encoding("gpt-3.5-turbo")
|
encoding = tiktoken.get_encoding("gpt-3.5-turbo")
|
||||||
if model in GPT_ALL_MODELS:
|
|
||||||
num_tokens = 0
|
if model in GPT_3_MODELS:
|
||||||
for message in messages:
|
tokens_per_message = 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
|
||||||
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
|
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||||
for key, value in message.items():
|
elif model in GPT_4_MODELS + GPT_4_32K_MODELS:
|
||||||
num_tokens += len(encoding.encode(value))
|
tokens_per_message = 3
|
||||||
if key == "name": # if there's a name, the role is omitted
|
tokens_per_name = 1
|
||||||
num_tokens += -1 # role is always required and always 1 token
|
|
||||||
num_tokens += 2 # every reply is primed with <im_start>assistant
|
|
||||||
return num_tokens
|
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(f"__count_tokens() is not presently implemented for model {model}")
|
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}.""")
|
||||||
|
num_tokens = 0
|
||||||
|
for message in messages:
|
||||||
|
num_tokens += tokens_per_message
|
||||||
|
for key, value in message.items():
|
||||||
|
num_tokens += len(encoding.encode(value))
|
||||||
|
if key == "name":
|
||||||
|
num_tokens += tokens_per_name
|
||||||
|
num_tokens += 2 # every reply is primed with <im_start>assistant
|
||||||
|
return num_tokens
|
||||||
|
|||||||
Reference in New Issue
Block a user