From ca0a244b0011fd2dda30622444252ebe3935f5ea Mon Sep 17 00:00:00 2001 From: mirmakhamat Date: Fri, 20 Oct 2023 14:35:24 +0500 Subject: [PATCH 01/19] added uzbek translation --- README.md | 4 ++-- translations.json | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 34e23b3..5ff3a7b 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ A [Telegram bot](https://core.telegram.org/bots/api) that integrates with OpenAI - [x] GPT-4 support - If you have access to the GPT-4 API, simply change the `OPENAI_MODEL` parameter to `gpt-4` - [x] Localized bot language - - Available languages :gb: :de: :ru: :tr: :it: :finland: :es: :indonesia: :netherlands: :cn: :taiwan: :vietnam: :iran: :brazil: :ukraine: :malaysia: + - Available languages :gb: :de: :ru: :tr: :it: :finland: :es: :indonesia: :netherlands: :cn: :taiwan: :vietnam: :iran: :brazil: :ukraine: :malaysia: :uzbekistan: - [x] Improved inline queries support for group and private chats - by [@bugfloyd](https://github.com/bugfloyd) - To use this feature, enable inline queries for your bot in BotFather via the `/setinline` [command](https://core.telegram.org/bots/inline) - [x] (NEW!) Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) @@ -98,7 +98,7 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `IMAGE_SIZE` | The DALL路E generated image size. Allowed values: `256x256`, `512x512` or `1024x1024` | `512x512` | | `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | | `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | -| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | +| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`, `uz`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | | `WHISPER_PROMPT` | To improve the accuracy of Whisper's transcription service, especially for specific names or terms, you can set up a custom message. [Speech to text - Prompting](https://platform.openai.com/docs/guides/speech-to-text/prompting) | `-` | Check out the [official API reference](https://platform.openai.com/docs/api-reference/chat) for more details. diff --git a/translations.json b/translations.json index f603efb..fba0371 100644 --- a/translations.json +++ b/translations.json @@ -686,5 +686,48 @@ "ask_chatgpt":"Tanya ChatGPT", "loading":"Memuatkan...", "function_unavailable_in_inline_mode": "Fungsi ini tidak tersedia dalam mod sebaris" + }, + "uz": { + "help_description": "Yordam xabarini ko'rsatish", + "reset_description": "Suhbatni qayta boshlang. Agar xohlasangiz, umumiy ko'rsatmalar bering (masalan, /reset siz foydali yordamchisiz)", + "image_description": "Tasvirni so'rov bo'yicha yaratish (masalan, /image mushuk)", + "stats_description": "Hozirgi foydalanilgan statistikani olish", + "resend_description": "Oxirgi xabarni qayta yuborish", + "chat_description": "Bot bilan suxbat!", + "disallowed": "Kechirasiz, sizga bu botdan foydalanish taqiqlangan. Siz manba kodini tekshirishingiz mumkin https://github.com/n3d1117/chatgpt-telegram-bot", + "budget_limit": "Kechirasiz, siz foydalanish chegarasiga yetdingiz.", + "help_text": ["Men ChatGPT botman, men bilan gaplashing!", "Menga ovozli xabar yoki fayl yuboring, men uni siz uchun transkripsiya qilaman", "Ochiq manba: https://github.com/n3d1117/chatgpt-telegram-bot"], + "stats_conversation": ["Hozirgi suhbat", "tarixdagi chat xabarlari", "tarixdagi suhbat tokenlari"], + "usage_today": "Bugungi foydalanish", + "usage_month": "Bu oydagi foydalanish", + "stats_tokens": "tokenlar", + "stats_images": "yaratilgan tasvirlar", + "stats_transcribe": ["minutlar va", "soniyalar transkripsiya qilingan"], + "stats_total": "馃挵 Jami miqdor $", + "stats_budget": "Qolgan budjetingiz", + "monthly": " bu oy uchun", + "daily": " bugun uchun", + "all-time": "", + "stats_openai": "Shu oyda OpenAI hisobingizdan to'lov amalga oshirildi $", + "resend_failed": "Sizda qayta yuborish uchun hech narsa yo'q", + "reset_done": "Bajarildi!", + "image_no_prompt": "Iltimos, so'rov yozing! (masalan, /image mushuk)", + "image_fail": "Tasvir yaratish amalga oshmadi", + "media_download_fail": ["Audio faylni yuklab olish amalga oshmadi", "Fayl hajmi katta emasligiga ishonch hosil qiling. (max 20MB)"], + "media_type_fail": "Qo'llab-quvvatlanmaydigan fayl turi", + "transcript": "Transkript", + "answer": "Javob", + "transcribe_fail": "Matnni transkripsiya qilib bo'lmadi", + "chat_fail": "Javob olish amalga oshmadi", + "prompt": "so'rov", + "completion": "yakunlash", + "openai_rate_limit": "OpenAI ta'rif chegarasidan oshib ketdi", + "openai_invalid": "OpenAI So'rov noto'g'ri", + "error": "Xatolik yuz berdi", + "try_again": "Birozdan keyin qayta urinib ko'ring", + "answer_with_chatgpt": "ChatGPT bilan javob berish", + "ask_chatgpt": "ChatGPTdan so'rash", + "loading": "Yuklanmoqda...", + "function_unavailable_in_inline_mode": "Bu funksiya inline rejimida mavjud emas" } } From 274e784e8e408add9f59ddda98d7f6781d56d7a6 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Mon, 6 Nov 2023 23:50:11 +0100 Subject: [PATCH 02/19] added 128k models --- bot/openai_helper.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 38b630c..e2b5e92 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -22,7 +22,8 @@ GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") -GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS +GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-vision-preview") +GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS def default_max_tokens(model: str) -> int: @@ -40,6 +41,8 @@ def default_max_tokens(model: str) -> int: return base * 4 elif model in GPT_4_32K_MODELS: return base * 8 + elif model in GPT_4_128K_MODELS: + return base * 100 def are_functions_available(model: str) -> bool: @@ -50,7 +53,7 @@ def are_functions_available(model: str) -> bool: if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"): return False # Stable models will be updated to support functions on June 27, 2023 - if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k"): + if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview", "gpt-4-vision-preview"): return datetime.date.today() > datetime.date(2023, 6, 27) return True @@ -411,6 +414,8 @@ class OpenAIHelper: return base * 2 if self.config['model'] in GPT_4_32K_MODELS: return base * 8 + if self.config['model'] in GPT_4_128K_MODELS: + return base * 100 raise NotImplementedError( f"Max tokens for model {self.config['model']} is not implemented yet." ) From 8a87563282fa4333ee02ff91267729a8790e4a5c Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Mon, 6 Nov 2023 23:55:21 +0100 Subject: [PATCH 03/19] added new models to count_tokens --- bot/openai_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index e2b5e92..8bad4ec 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -436,7 +436,7 @@ class OpenAIHelper: if model in GPT_3_MODELS + GPT_3_16K_MODELS: tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n tokens_per_name = -1 # if there's a name, the role is omitted - elif model in GPT_4_MODELS + GPT_4_32K_MODELS: + elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS: tokens_per_message = 3 tokens_per_name = 1 else: From 534cf3ae1a81145012bcaacfbda50d44846cbce7 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Mon, 6 Nov 2023 23:59:35 +0100 Subject: [PATCH 04/19] fixed name of vision model --- bot/openai_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 8bad4ec..e22a1df 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -22,7 +22,7 @@ GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") -GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-vision-preview") +GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-1106-vision-preview") GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS @@ -53,7 +53,7 @@ def are_functions_available(model: str) -> bool: if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"): return False # Stable models will be updated to support functions on June 27, 2023 - if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview", "gpt-4-vision-preview"): + if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview", "gpt-4-1106-vision-preview"): return datetime.date.today() > datetime.date(2023, 6, 27) return True From ad6e56a2199ae3c7af05643fc845c2b105fca0ed Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 00:02:49 +0100 Subject: [PATCH 05/19] fixed name of vision model --- bot/openai_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index e22a1df..8bad4ec 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -22,7 +22,7 @@ GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") -GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-1106-vision-preview") +GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-vision-preview") GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS @@ -53,7 +53,7 @@ def are_functions_available(model: str) -> bool: if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"): return False # Stable models will be updated to support functions on June 27, 2023 - if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview", "gpt-4-1106-vision-preview"): + if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview", "gpt-4-vision-preview"): return datetime.date.today() > datetime.date(2023, 6, 27) return True From 900d40c65fcbea2d874f604f029644b4fca727ed Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 00:05:04 +0100 Subject: [PATCH 06/19] removed vision model --- bot/openai_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 8bad4ec..b24687d 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -22,7 +22,7 @@ GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") -GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-vision-preview") +GPT_4_128K_MODELS = ("gpt-4-1106-preview") GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS @@ -53,7 +53,7 @@ def are_functions_available(model: str) -> bool: if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"): return False # Stable models will be updated to support functions on June 27, 2023 - if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview", "gpt-4-vision-preview"): + if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview"): return datetime.date.today() > datetime.date(2023, 6, 27) return True From db7b76357d7db352bb0cef3dbda81eb57e8255f0 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 00:55:11 +0100 Subject: [PATCH 07/19] added dalle 3 and options --- .env.example | 3 +++ README.md | 5 ++++- bot/main.py | 3 +++ bot/openai_helper.py | 3 +++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.env.example b/.env.example index d6eb078..c0caaa3 100644 --- a/.env.example +++ b/.env.example @@ -34,6 +34,9 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # TEMPERATURE=1.0 # PRESENCE_PENALTY=0.0 # FREQUENCY_PENALTY=0.0 +# IMAGE_MODEL=dall-e-3 +# IMAGE_QUALITY=hd +# IMAGE_STYLE=natural # IMAGE_SIZE=512x512 # GROUP_TRIGGER_KEYWORD="" # IGNORE_GROUP_TRANSCRIPTIONS=true diff --git a/README.md b/README.md index 34e23b3..b896c59 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,10 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `TEMPERATURE` | Number between 0 and 2. Higher values will make the output more random | `1.0` | | `PRESENCE_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far | `0.0` | | `FREQUENCY_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far | `0.0` | -| `IMAGE_SIZE` | The DALL路E generated image size. Allowed values: `256x256`, `512x512` or `1024x1024` | `512x512` | +| `IMAGE_MODEL` | The DALL路E model to be used. Available models: `dall-e-2` and `dall-e-3`, find current available models [here](https://platform.openai.com/docs/models/dall-e) | `dall-e-2` | +| `IMAGE_QUALITY` | Quality of DALL路E images, only available for `dall-e-3`-model. Possible options: `standard` or `hd`, beware of [pricing differences](https://openai.com/pricing#image-models). | `standard` | +| `IMAGE_STYLE` | Style for DALL路E image generation, only available for `dall-e-3`-model. Possible options: `vivid` or `natural`. Check availbe styles [here](https://platform.openai.com/docs/api-reference/images/create). | `vivid` | +| `IMAGE_SIZE` | The DALL路E generated image size. Must be `256x256`, `512x512`, or `1024x1024` for dall-e-2. Must be `1024x1024`, `1792x1024`, or `1024x1792` for dall-e-3 models. | `512x512` | | `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | | `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | | `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | diff --git a/bot/main.py b/bot/main.py index d7605fd..3fe43af 100644 --- a/bot/main.py +++ b/bot/main.py @@ -41,6 +41,9 @@ def main(): 'max_tokens': int(os.environ.get('MAX_TOKENS', max_tokens_default)), 'n_choices': int(os.environ.get('N_CHOICES', 1)), 'temperature': float(os.environ.get('TEMPERATURE', 1.0)), + 'image_model': os.environ.get('IMAGE_MODEL', 'dall-e-2'), + 'image_quality': os.environ.get('IMAGE_QUALITY', 'standard'), + 'image_style': os.environ.get('IMAGE_STYLE', 'vivid'), 'image_size': os.environ.get('IMAGE_SIZE', '512x512'), 'model': model, 'enable_functions': os.environ.get('ENABLE_FUNCTIONS', str(functions_available)).lower() == 'true', diff --git a/bot/openai_helper.py b/bot/openai_helper.py index b24687d..40e6b4b 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -324,6 +324,9 @@ class OpenAIHelper: response = await openai.Image.acreate( prompt=prompt, n=1, + model=self.config['image_model'], + quality=self.config['image_quality'], + style=self.config['image_style'], size=self.config['image_size'] ) From 21d7f1f70572e00417af5b1c8ff77221905795c2 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 01:11:44 +0100 Subject: [PATCH 08/19] changed available sizes for dall-e-3 --- README.md | 2 +- bot/openai_helper.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b896c59..b89f046 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,7 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `IMAGE_MODEL` | The DALL路E model to be used. Available models: `dall-e-2` and `dall-e-3`, find current available models [here](https://platform.openai.com/docs/models/dall-e) | `dall-e-2` | | `IMAGE_QUALITY` | Quality of DALL路E images, only available for `dall-e-3`-model. Possible options: `standard` or `hd`, beware of [pricing differences](https://openai.com/pricing#image-models). | `standard` | | `IMAGE_STYLE` | Style for DALL路E image generation, only available for `dall-e-3`-model. Possible options: `vivid` or `natural`. Check availbe styles [here](https://platform.openai.com/docs/api-reference/images/create). | `vivid` | -| `IMAGE_SIZE` | The DALL路E generated image size. Must be `256x256`, `512x512`, or `1024x1024` for dall-e-2. Must be `1024x1024`, `1792x1024`, or `1024x1792` for dall-e-3 models. | `512x512` | +| `IMAGE_SIZE` | The DALL路E generated image size. Must be `256x256`, `512x512`, or `1024x1024` for dall-e-2. Must be `1024x1024` for dall-e-3 models. | `512x512` | | `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | | `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | | `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 40e6b4b..920a73e 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -22,7 +22,7 @@ GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") -GPT_4_128K_MODELS = ("gpt-4-1106-preview") +GPT_4_128K_MODELS = ("gpt-4-1106-preview",) GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS From f38acff5d472cb085331249d19959a750ae9af7e Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 08:07:32 +0100 Subject: [PATCH 09/19] added gpt-3-turbo-1106 model --- bot/openai_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 920a73e..b75378a 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -19,7 +19,7 @@ from plugin_manager import PluginManager # Models can be found here: https://platform.openai.com/docs/models/overview GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") -GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") +GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-1106") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") GPT_4_128K_MODELS = ("gpt-4-1106-preview",) @@ -53,7 +53,7 @@ def are_functions_available(model: str) -> bool: if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"): return False # Stable models will be updated to support functions on June 27, 2023 - if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k","gpt-4-1106-preview"): + if model in ("gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-32k","gpt-4-1106-preview"): return datetime.date.today() > datetime.date(2023, 6, 27) return True From 1f470f4e4abc6201e84af750b352c4a6bc0d9187 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 08:14:58 +0100 Subject: [PATCH 10/19] fixed max tokens for gpt-4-128k --- bot/openai_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index b75378a..a31ac3d 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -42,7 +42,7 @@ def default_max_tokens(model: str) -> int: elif model in GPT_4_32K_MODELS: return base * 8 elif model in GPT_4_128K_MODELS: - return base * 100 + return base * 31 def are_functions_available(model: str) -> bool: @@ -418,7 +418,7 @@ class OpenAIHelper: if self.config['model'] in GPT_4_32K_MODELS: return base * 8 if self.config['model'] in GPT_4_128K_MODELS: - return base * 100 + return base * 31 raise NotImplementedError( f"Max tokens for model {self.config['model']} is not implemented yet." ) From 31f4d78c3935fe72c83a60ac606a2d5c6a9ecfee Mon Sep 17 00:00:00 2001 From: kristaller486 <85458179+kristaller486@users.noreply.github.com> Date: Tue, 7 Nov 2023 12:15:56 +0300 Subject: [PATCH 11/19] Added docs for OpenAI-compatibles APIs support --- .env.example | 1 + README.md | 50 ++++++++++++++++++++++++++------------------------ 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/.env.example b/.env.example index d6eb078..a1a5d0b 100644 --- a/.env.example +++ b/.env.example @@ -22,6 +22,7 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # ENABLE_TRANSCRIPTION=true # PROXY=http://localhost:8080 # OPENAI_MODEL=gpt-3.5-turbo +# OPENAI_API_BASE=https://example.com/v1/ # ASSISTANT_PROMPT="You are a helpful assistant." # SHOW_USAGE=false # STREAM=true diff --git a/README.md b/README.md index 34e23b3..8e2164e 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ A [Telegram bot](https://core.telegram.org/bots/api) that integrates with OpenAI - [x] (NEW!) Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) - [x] (NEW!) Support *functions* (plugins) to extend the bot's functionality with 3rd party services - Weather, Spotify, Web search, text-to-speech and more. See [here](#available-plugins) for a list of available plugins +- [x] Support unofficial OpenAI-compatible APIs ## Additional features - help needed! If you'd like to help, check out the [issues](https://github.com/n3d1117/chatgpt-telegram-bot/issues) section and contribute! @@ -76,30 +77,31 @@ The following parameters are optional and can be set in the `.env` file: Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/184) for possible budget configurations. #### Additional optional configuration options -| Parameter | Description | Default value | -|------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------| -| `ENABLE_QUOTING` | Whether to enable message quoting in private chats | `true` | -| `ENABLE_IMAGE_GENERATION` | Whether to enable image generation via the `/image` command | `true` | -| `ENABLE_TRANSCRIPTION` | Whether to enable transcriptions of audio and video messages | `true` | -| `PROXY` | Proxy to be used for OpenAI and Telegram bot (e.g. `http://localhost:8080`) | - | -| `OPENAI_MODEL` | The OpenAI model to use for generating responses. You can find all available models [here](https://platform.openai.com/docs/models/) | `gpt-3.5-turbo` | -| `ASSISTANT_PROMPT` | A system message that sets the tone and controls the behavior of the assistant | `You are a helpful assistant.` | -| `SHOW_USAGE` | Whether to show OpenAI token usage information after each response | `false` | -| `STREAM` | Whether to stream responses. **Note**: incompatible, if enabled, with `N_CHOICES` higher than 1 | `true` | -| `MAX_TOKENS` | Upper bound on how many tokens the ChatGPT API will return | `1200` for GPT-3, `2400` for GPT-4 | -| `MAX_HISTORY_SIZE` | Max number of messages to keep in memory, after which the conversation will be summarised to avoid excessive token usage | `15` | -| `MAX_CONVERSATION_AGE_MINUTES` | Maximum number of minutes a conversation should live since the last message, after which the conversation will be reset | `180` | -| `VOICE_REPLY_WITH_TRANSCRIPT_ONLY` | Whether to answer to voice messages with the transcript only or with a ChatGPT response of the transcript | `false` | -| `VOICE_REPLY_PROMPTS` | A semicolon separated list of phrases (i.e. `Hi bot;Hello chat`). If the transcript starts with any of them, it will be treated as a prompt even if `VOICE_REPLY_WITH_TRANSCRIPT_ONLY` is set to `true` | - | -| `N_CHOICES` | Number of answers to generate for each input message. **Note**: setting this to a number higher than 1 will not work properly if `STREAM` is enabled | `1` | -| `TEMPERATURE` | Number between 0 and 2. Higher values will make the output more random | `1.0` | -| `PRESENCE_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far | `0.0` | -| `FREQUENCY_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far | `0.0` | -| `IMAGE_SIZE` | The DALL路E generated image size. Allowed values: `256x256`, `512x512` or `1024x1024` | `512x512` | -| `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | -| `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | -| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | -| `WHISPER_PROMPT` | To improve the accuracy of Whisper's transcription service, especially for specific names or terms, you can set up a custom message. [Speech to text - Prompting](https://platform.openai.com/docs/guides/speech-to-text/prompting) | `-` | +| Parameter | Description | Default value | +|------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------| +| `ENABLE_QUOTING` | Whether to enable message quoting in private chats | `true` | +| `ENABLE_IMAGE_GENERATION` | Whether to enable image generation via the `/image` command | `true` | +| `ENABLE_TRANSCRIPTION` | Whether to enable transcriptions of audio and video messages | `true` | +| `PROXY` | Proxy to be used for OpenAI and Telegram bot (e.g. `http://localhost:8080`) | - | +| `OPENAI_MODEL` | The OpenAI model to use for generating responses. You can find all available models [here](https://platform.openai.com/docs/models/) | `gpt-3.5-turbo` | +| `OPENAI_API_BASE` | Endpoint URL for unofficial OpenAI-compatible APIs (e.g., LocalAI or text-generation-webui) | Default OpenAI API URL | +| `ASSISTANT_PROMPT` | A system message that sets the tone and controls the behavior of the assistant | `You are a helpful assistant.` | +| `SHOW_USAGE` | Whether to show OpenAI token usage information after each response | `false` | +| `STREAM` | Whether to stream responses. **Note**: incompatible, if enabled, with `N_CHOICES` higher than 1 | `true` | +| `MAX_TOKENS` | Upper bound on how many tokens the ChatGPT API will return | `1200` for GPT-3, `2400` for GPT-4 | +| `MAX_HISTORY_SIZE` | Max number of messages to keep in memory, after which the conversation will be summarised to avoid excessive token usage | `15` | +| `MAX_CONVERSATION_AGE_MINUTES` | Maximum number of minutes a conversation should live since the last message, after which the conversation will be reset | `180` | +| `VOICE_REPLY_WITH_TRANSCRIPT_ONLY` | Whether to answer to voice messages with the transcript only or with a ChatGPT response of the transcript | `false` | +| `VOICE_REPLY_PROMPTS` | A semicolon separated list of phrases (i.e. `Hi bot;Hello chat`). If the transcript starts with any of them, it will be treated as a prompt even if `VOICE_REPLY_WITH_TRANSCRIPT_ONLY` is set to `true` | - | +| `N_CHOICES` | Number of answers to generate for each input message. **Note**: setting this to a number higher than 1 will not work properly if `STREAM` is enabled | `1` | +| `TEMPERATURE` | Number between 0 and 2. Higher values will make the output more random | `1.0` | +| `PRESENCE_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far | `0.0` | +| `FREQUENCY_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far | `0.0` | +| `IMAGE_SIZE` | The DALL路E generated image size. Allowed values: `256x256`, `512x512` or `1024x1024` | `512x512` | +| `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | +| `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | +| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | +| `WHISPER_PROMPT` | To improve the accuracy of Whisper's transcription service, especially for specific names or terms, you can set up a custom message. [Speech to text - Prompting](https://platform.openai.com/docs/guides/speech-to-text/prompting) | `-` | Check out the [official API reference](https://platform.openai.com/docs/api-reference/chat) for more details. From f06051cfa025cf0f77b5ac2c18f5eb28a3d8ea14 Mon Sep 17 00:00:00 2001 From: gilcu3 <828241+gilcu3@users.noreply.github.com> Date: Tue, 7 Nov 2023 12:52:34 +0100 Subject: [PATCH 12/19] add image receive mode --- .env.example | 1 + README.md | 1 + bot/main.py | 1 + bot/telegram_bot.py | 16 ++++++++++++---- 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/.env.example b/.env.example index d6eb078..9b52332 100644 --- a/.env.example +++ b/.env.example @@ -35,6 +35,7 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # PRESENCE_PENALTY=0.0 # FREQUENCY_PENALTY=0.0 # IMAGE_SIZE=512x512 +# IMAGE_FORMAT=document # GROUP_TRIGGER_KEYWORD="" # IGNORE_GROUP_TRANSCRIPTIONS=true # BOT_LANGUAGE=en \ No newline at end of file diff --git a/README.md b/README.md index 34e23b3..96a6569 100644 --- a/README.md +++ b/README.md @@ -96,6 +96,7 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `PRESENCE_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far | `0.0` | | `FREQUENCY_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far | `0.0` | | `IMAGE_SIZE` | The DALL路E generated image size. Allowed values: `256x256`, `512x512` or `1024x1024` | `512x512` | +| `IMAGE_RECEIVE_MODE` | The Telegram image receive mode. Allowed values: `document` or `photo` | `photo` | | `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | | `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | | `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | diff --git a/bot/main.py b/bot/main.py index d7605fd..5dc8589 100644 --- a/bot/main.py +++ b/bot/main.py @@ -81,6 +81,7 @@ def main(): 'group_trigger_keyword': os.environ.get('GROUP_TRIGGER_KEYWORD', ''), 'token_price': float(os.environ.get('TOKEN_PRICE', 0.002)), 'image_prices': [float(i) for i in os.environ.get('IMAGE_PRICES', "0.016,0.018,0.02").split(",")], + 'image_receive_mode': os.environ.get('IMAGE_RECEIVE_MODE', "photo"), 'transcription_price': float(os.environ.get('TRANSCRIPTION_PRICE', 0.006)), 'bot_language': os.environ.get('BOT_LANGUAGE', 'en'), } diff --git a/bot/telegram_bot.py b/bot/telegram_bot.py index 57a3f0a..52fee29 100644 --- a/bot/telegram_bot.py +++ b/bot/telegram_bot.py @@ -228,10 +228,18 @@ class ChatGPTTelegramBot: async def _generate(): try: image_url, image_size = await self.openai.generate_image(prompt=image_query) - await update.effective_message.reply_photo( - reply_to_message_id=get_reply_to_message_id(self.config, update), - photo=image_url - ) + if self.config['image_receive_mode'] == 'photo': + await update.effective_message.reply_photo( + reply_to_message_id=get_reply_to_message_id(self.config, update), + photo=image_url + ) + elif self.config['image_receive_mode'] == 'document': + await update.effective_message.reply_document( + reply_to_message_id=get_reply_to_message_id(self.config, update), + document=image_url + ) + else: + raise Exception(f"env variable IMAGE_RECEIVE_MODE has invalid value {self.config['image_receive_mode']}") # add image request to users usage tracker user_id = update.message.from_user.id self.usage[user_id].add_image_request(image_size, self.config['image_prices']) From cd287c8fe063e308ab06d7d98a9e780748b34c96 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 14:53:25 +0100 Subject: [PATCH 13/19] fixed max response tokens for new models --- .env.example | 2 +- bot/openai_helper.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.env.example b/.env.example index c0caaa3..7520f91 100644 --- a/.env.example +++ b/.env.example @@ -37,7 +37,7 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # IMAGE_MODEL=dall-e-3 # IMAGE_QUALITY=hd # IMAGE_STYLE=natural -# IMAGE_SIZE=512x512 +# IMAGE_SIZE=1024x1024 # GROUP_TRIGGER_KEYWORD="" # IGNORE_GROUP_TRANSCRIPTIONS=true # BOT_LANGUAGE=en \ No newline at end of file diff --git a/bot/openai_helper.py b/bot/openai_helper.py index a31ac3d..26fad44 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -34,6 +34,8 @@ def default_max_tokens(model: str) -> int: """ base = 1200 if model in GPT_3_MODELS: + if model == "gpt-3.5-turbo-0301": + return 4096 return base elif model in GPT_4_MODELS: return base * 2 @@ -42,7 +44,7 @@ def default_max_tokens(model: str) -> int: elif model in GPT_4_32K_MODELS: return base * 8 elif model in GPT_4_128K_MODELS: - return base * 31 + return 4096 def are_functions_available(model: str) -> bool: From 1e835d7747735452ae0eff749dfa5e7e1901cde5 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 22:28:23 +0100 Subject: [PATCH 14/19] fixed max tokens for gpt-3.5-turbo-1106 --- bot/openai_helper.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 26fad44..9065d9d 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -34,12 +34,12 @@ def default_max_tokens(model: str) -> int: """ base = 1200 if model in GPT_3_MODELS: - if model == "gpt-3.5-turbo-0301": - return 4096 return base elif model in GPT_4_MODELS: return base * 2 - elif model in GPT_3_16K_MODELS: + elif model in GPT_3_16K_MODELS: + if model == "gpt-3.5-turbo-0301": + return 4096 return base * 4 elif model in GPT_4_32K_MODELS: return base * 8 From 968c3b94df7133e171b4d50439685ab697087133 Mon Sep 17 00:00:00 2001 From: AlexHTW Date: Tue, 7 Nov 2023 22:29:31 +0100 Subject: [PATCH 15/19] fixed model name --- bot/openai_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 9065d9d..83836a7 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -38,7 +38,7 @@ def default_max_tokens(model: str) -> int: elif model in GPT_4_MODELS: return base * 2 elif model in GPT_3_16K_MODELS: - if model == "gpt-3.5-turbo-0301": + if model == "gpt-3.5-turbo-1106": return 4096 return base * 4 elif model in GPT_4_32K_MODELS: From 5ada35f93e2c0187beeb7943bd563b0a25ff2c9b Mon Sep 17 00:00:00 2001 From: gilcu3 <828241+gilcu3@users.noreply.github.com> Date: Wed, 8 Nov 2023 18:01:47 +0100 Subject: [PATCH 16/19] missing change --- bot/openai_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/openai_helper.py b/bot/openai_helper.py index b8a5f12..4fe39bf 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -398,7 +398,7 @@ class OpenAIHelper: messages=messages, temperature=0.4 ) - return response.choices[0]['message']['content'] + return response.choices[0].message.content def __max_model_tokens(self): base = 4096 From b91764cd7eee3c12969627e369f7ef06ca6ce86a Mon Sep 17 00:00:00 2001 From: ned Date: Sat, 18 Nov 2023 15:24:20 +0100 Subject: [PATCH 17/19] Update README.md --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f780d24..2bb78f4 100644 --- a/README.md +++ b/README.md @@ -33,9 +33,10 @@ A [Telegram bot](https://core.telegram.org/bots/api) that integrates with OpenAI - Available languages :gb: :de: :ru: :tr: :it: :finland: :es: :indonesia: :netherlands: :cn: :taiwan: :vietnam: :iran: :brazil: :ukraine: :malaysia: :uzbekistan: - [x] Improved inline queries support for group and private chats - by [@bugfloyd](https://github.com/bugfloyd) - To use this feature, enable inline queries for your bot in BotFather via the `/setinline` [command](https://core.telegram.org/bots/inline) -- [x] (NEW!) Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) -- [x] (NEW!) Support *functions* (plugins) to extend the bot's functionality with 3rd party services +- [x] Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) +- [x] Support *functions* (plugins) to extend the bot's functionality with 3rd party services - Weather, Spotify, Web search, text-to-speech and more. See [here](#available-plugins) for a list of available plugins +- [x] (NEW!) Support GPT-4 Turbo and DALL路E 3 [announced on November 6, 2023](https://openai.com/blog/new-models-and-developer-products-announced-at-devday) - by by [@AlexHTW](https://github.com/AlexHTW) ## Additional features - help needed! If you'd like to help, check out the [issues](https://github.com/n3d1117/chatgpt-telegram-bot/issues) section and contribute! From f3a7956fe18969cba158c60894ef9ef346d53906 Mon Sep 17 00:00:00 2001 From: ned <11541888+n3d1117@users.noreply.github.com> Date: Sat, 18 Nov 2023 15:33:24 +0100 Subject: [PATCH 18/19] Update main.py --- bot/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/main.py b/bot/main.py index a7ea575..85a2cb0 100644 --- a/bot/main.py +++ b/bot/main.py @@ -84,7 +84,7 @@ def main(): 'group_trigger_keyword': os.environ.get('GROUP_TRIGGER_KEYWORD', ''), 'token_price': float(os.environ.get('TOKEN_PRICE', 0.002)), 'image_prices': [float(i) for i in os.environ.get('IMAGE_PRICES', "0.016,0.018,0.02").split(",")], - 'image_receive_mode': os.environ.get('IMAGE_RECEIVE_MODE', "photo"), + 'image_receive_mode': os.environ.get('IMAGE_FORMAT', "photo"), 'transcription_price': float(os.environ.get('TRANSCRIPTION_PRICE', 0.006)), 'bot_language': os.environ.get('BOT_LANGUAGE', 'en'), } From fcc83a579cad6cb92127c0c5f3c0c3376b7f72f4 Mon Sep 17 00:00:00 2001 From: ned <11541888+n3d1117@users.noreply.github.com> Date: Sat, 18 Nov 2023 16:02:56 +0100 Subject: [PATCH 19/19] Bump to openai-python 1.3.3 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index fa0570d..83b4367 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ python-dotenv~=1.0.0 pydub~=0.25.1 tiktoken==0.5.1 -openai==1.1.1 +openai==1.3.3 python-telegram-bot==20.3 requests~=2.31.0 tenacity==8.2.2 @@ -10,4 +10,4 @@ duckduckgo_search~=3.8.3 spotipy~=2.23.0 pytube~=15.0.0 gtts~=2.3.2 -whois~=0.9.27 \ No newline at end of file +whois~=0.9.27