diff --git a/.env.example b/.env.example index 9de40bc..31d1ba1 100644 --- a/.env.example +++ b/.env.example @@ -23,6 +23,7 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # ENABLE_TRANSCRIPTION=true # PROXY=http://localhost:8080 # OPENAI_MODEL=gpt-3.5-turbo +# OPENAI_API_BASE=https://example.com/v1/ # ASSISTANT_PROMPT="You are a helpful assistant." # SHOW_USAGE=false # STREAM=true @@ -35,7 +36,11 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # TEMPERATURE=1.0 # PRESENCE_PENALTY=0.0 # FREQUENCY_PENALTY=0.0 -# IMAGE_SIZE=512x512 +# IMAGE_MODEL=dall-e-3 +# IMAGE_QUALITY=hd +# IMAGE_STYLE=natural +# IMAGE_SIZE=1024x1024 +# IMAGE_FORMAT=document # GROUP_TRIGGER_KEYWORD="" # IGNORE_GROUP_TRANSCRIPTIONS=true # TTS_MODEL="tts-1" diff --git a/README.md b/README.md index b5bea20..ba363dc 100644 --- a/README.md +++ b/README.md @@ -30,12 +30,14 @@ A [Telegram bot](https://core.telegram.org/bots/api) that integrates with OpenAI - [x] GPT-4 support - If you have access to the GPT-4 API, simply change the `OPENAI_MODEL` parameter to `gpt-4` - [x] Localized bot language - - Available languages :gb: :de: :ru: :tr: :it: :finland: :es: :indonesia: :netherlands: :cn: :taiwan: :vietnam: :iran: :brazil: :ukraine: :malaysia: + - Available languages :gb: :de: :ru: :tr: :it: :finland: :es: :indonesia: :netherlands: :cn: :taiwan: :vietnam: :iran: :brazil: :ukraine: :malaysia: :uzbekistan: - [x] Improved inline queries support for group and private chats - by [@bugfloyd](https://github.com/bugfloyd) - To use this feature, enable inline queries for your bot in BotFather via the `/setinline` [command](https://core.telegram.org/bots/inline) -- [x] (NEW!) Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) -- [x] (NEW!) Support *functions* (plugins) to extend the bot's functionality with 3rd party services +- [x] Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) +- [x] Support *functions* (plugins) to extend the bot's functionality with 3rd party services - Weather, Spotify, Web search, text-to-speech and more. See [here](#available-plugins) for a list of available plugins +- [x] Support unofficial OpenAI-compatible APIs - by [@kristaller486](https://github.com/kristaller486) +- [x] (NEW!) Support GPT-4 Turbo and DALL·E 3 [announced on November 6, 2023](https://openai.com/blog/new-models-and-developer-products-announced-at-devday) - by by [@AlexHTW](https://github.com/AlexHTW) ## Additional features - help needed! If you'd like to help, check out the [issues](https://github.com/n3d1117/chatgpt-telegram-bot/issues) section and contribute! @@ -84,6 +86,7 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `ENABLE_TTS_GENERATION` | Whether to enable text to speech generation via the `/tts` | `true` | | `PROXY` | Proxy to be used for OpenAI and Telegram bot (e.g. `http://localhost:8080`) | - | | `OPENAI_MODEL` | The OpenAI model to use for generating responses. You can find all available models [here](https://platform.openai.com/docs/models/) | `gpt-3.5-turbo` | +| `OPENAI_API_BASE` | Endpoint URL for unofficial OpenAI-compatible APIs (e.g., LocalAI or text-generation-webui) | Default OpenAI API URL | | `ASSISTANT_PROMPT` | A system message that sets the tone and controls the behavior of the assistant | `You are a helpful assistant.` | | `SHOW_USAGE` | Whether to show OpenAI token usage information after each response | `false` | | `STREAM` | Whether to stream responses. **Note**: incompatible, if enabled, with `N_CHOICES` higher than 1 | `true` | @@ -96,10 +99,14 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `TEMPERATURE` | Number between 0 and 2. Higher values will make the output more random | `1.0` | | `PRESENCE_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far | `0.0` | | `FREQUENCY_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far | `0.0` | -| `IMAGE_SIZE` | The DALL·E generated image size. Allowed values: `256x256`, `512x512` or `1024x1024` | `512x512` | +| `IMAGE_FORMAT` | The Telegram image receive mode. Allowed values: `document` or `photo` | `photo` | +| `IMAGE_MODEL` | The DALL·E model to be used. Available models: `dall-e-2` and `dall-e-3`, find current available models [here](https://platform.openai.com/docs/models/dall-e) | `dall-e-2` | +| `IMAGE_QUALITY` | Quality of DALL·E images, only available for `dall-e-3`-model. Possible options: `standard` or `hd`, beware of [pricing differences](https://openai.com/pricing#image-models). | `standard` | +| `IMAGE_STYLE` | Style for DALL·E image generation, only available for `dall-e-3`-model. Possible options: `vivid` or `natural`. Check availbe styles [here](https://platform.openai.com/docs/api-reference/images/create). | `vivid` | +| `IMAGE_SIZE` | The DALL·E generated image size. Must be `256x256`, `512x512`, or `1024x1024` for dall-e-2. Must be `1024x1024` for dall-e-3 models. | `512x512` | | `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | | `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | -| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | +| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`, `uz`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | | `WHISPER_PROMPT` | To improve the accuracy of Whisper's transcription service, especially for specific names or terms, you can set up a custom message. [Speech to text - Prompting](https://platform.openai.com/docs/guides/speech-to-text/prompting) | `-` | | `TTS_VOICE` | The Text to Speech voice to use. Allowed values: `alloy`, `echo`, `fable`, `onyx`, `nova`, or `shimmer` | `alloy` | | `TTS_MODEL` | The Text to Speech model to use. Allowed values: `tts-1` or `tts-1-hd` | `tts-1` | diff --git a/bot/main.py b/bot/main.py index f0e5e89..2511234 100644 --- a/bot/main.py +++ b/bot/main.py @@ -41,6 +41,9 @@ def main(): 'max_tokens': int(os.environ.get('MAX_TOKENS', max_tokens_default)), 'n_choices': int(os.environ.get('N_CHOICES', 1)), 'temperature': float(os.environ.get('TEMPERATURE', 1.0)), + 'image_model': os.environ.get('IMAGE_MODEL', 'dall-e-2'), + 'image_quality': os.environ.get('IMAGE_QUALITY', 'standard'), + 'image_style': os.environ.get('IMAGE_STYLE', 'vivid'), 'image_size': os.environ.get('IMAGE_SIZE', '512x512'), 'model': model, 'enable_functions': os.environ.get('ENABLE_FUNCTIONS', str(functions_available)).lower() == 'true', @@ -84,6 +87,7 @@ def main(): 'group_trigger_keyword': os.environ.get('GROUP_TRIGGER_KEYWORD', ''), 'token_price': float(os.environ.get('TOKEN_PRICE', 0.002)), 'image_prices': [float(i) for i in os.environ.get('IMAGE_PRICES', "0.016,0.018,0.02").split(",")], + 'image_receive_mode': os.environ.get('IMAGE_FORMAT', "photo"), 'tts_model': os.environ.get('TTS_MODEL', 'tts-1'), 'tts_prices': [float(i) for i in os.environ.get('TTS_PRICES', "0.015,0.030").split(",")], 'transcription_price': float(os.environ.get('TRANSCRIPTION_PRICE', 0.006)), diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 2c47a70..43e1d5a 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -21,10 +21,11 @@ from plugin_manager import PluginManager # Models can be found here: https://platform.openai.com/docs/models/overview GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") -GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") +GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-1106") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") -GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS +GPT_4_128K_MODELS = ("gpt-4-1106-preview",) +GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS def default_max_tokens(model: str) -> int: @@ -38,10 +39,14 @@ def default_max_tokens(model: str) -> int: return base elif model in GPT_4_MODELS: return base * 2 - elif model in GPT_3_16K_MODELS: + elif model in GPT_3_16K_MODELS: + if model == "gpt-3.5-turbo-1106": + return 4096 return base * 4 elif model in GPT_4_32K_MODELS: return base * 8 + elif model in GPT_4_128K_MODELS: + return 4096 def are_functions_available(model: str) -> bool: @@ -52,7 +57,7 @@ def are_functions_available(model: str) -> bool: if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"): return False # Stable models will be updated to support functions on June 27, 2023 - if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k"): + if model in ("gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-32k","gpt-4-1106-preview"): return datetime.date.today() > datetime.date(2023, 6, 27) return True @@ -321,6 +326,9 @@ class OpenAIHelper: response = await self.client.images.generate( prompt=prompt, n=1, + model=self.config['image_model'], + quality=self.config['image_quality'], + style=self.config['image_style'], size=self.config['image_size'] ) @@ -421,7 +429,7 @@ class OpenAIHelper: messages=messages, temperature=0.4 ) - return response.choices[0]['message']['content'] + return response.choices[0].message.content def __max_model_tokens(self): base = 4096 @@ -433,6 +441,8 @@ class OpenAIHelper: return base * 2 if self.config['model'] in GPT_4_32K_MODELS: return base * 8 + if self.config['model'] in GPT_4_128K_MODELS: + return base * 31 raise NotImplementedError( f"Max tokens for model {self.config['model']} is not implemented yet." ) @@ -453,7 +463,7 @@ class OpenAIHelper: if model in GPT_3_MODELS + GPT_3_16K_MODELS: tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n tokens_per_name = -1 # if there's a name, the role is omitted - elif model in GPT_4_MODELS + GPT_4_32K_MODELS: + elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS: tokens_per_message = 3 tokens_per_name = 1 else: diff --git a/bot/telegram_bot.py b/bot/telegram_bot.py index 3cb6d9b..3727949 100644 --- a/bot/telegram_bot.py +++ b/bot/telegram_bot.py @@ -242,10 +242,18 @@ class ChatGPTTelegramBot: async def _generate(): try: image_url, image_size = await self.openai.generate_image(prompt=image_query) - await update.effective_message.reply_photo( - reply_to_message_id=get_reply_to_message_id(self.config, update), - photo=image_url - ) + if self.config['image_receive_mode'] == 'photo': + await update.effective_message.reply_photo( + reply_to_message_id=get_reply_to_message_id(self.config, update), + photo=image_url + ) + elif self.config['image_receive_mode'] == 'document': + await update.effective_message.reply_document( + reply_to_message_id=get_reply_to_message_id(self.config, update), + document=image_url + ) + else: + raise Exception(f"env variable IMAGE_RECEIVE_MODE has invalid value {self.config['image_receive_mode']}") # add image request to users usage tracker user_id = update.message.from_user.id self.usage[user_id].add_image_request(image_size, self.config['image_prices']) diff --git a/requirements.txt b/requirements.txt index fa0570d..83b4367 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ python-dotenv~=1.0.0 pydub~=0.25.1 tiktoken==0.5.1 -openai==1.1.1 +openai==1.3.3 python-telegram-bot==20.3 requests~=2.31.0 tenacity==8.2.2 @@ -10,4 +10,4 @@ duckduckgo_search~=3.8.3 spotipy~=2.23.0 pytube~=15.0.0 gtts~=2.3.2 -whois~=0.9.27 \ No newline at end of file +whois~=0.9.27 diff --git a/translations.json b/translations.json index 63e1245..144ea0c 100644 --- a/translations.json +++ b/translations.json @@ -690,5 +690,48 @@ "ask_chatgpt":"Tanya ChatGPT", "loading":"Memuatkan...", "function_unavailable_in_inline_mode": "Fungsi ini tidak tersedia dalam mod sebaris" + }, + "uz": { + "help_description": "Yordam xabarini ko'rsatish", + "reset_description": "Suhbatni qayta boshlang. Agar xohlasangiz, umumiy ko'rsatmalar bering (masalan, /reset siz foydali yordamchisiz)", + "image_description": "Tasvirni so'rov bo'yicha yaratish (masalan, /image mushuk)", + "stats_description": "Hozirgi foydalanilgan statistikani olish", + "resend_description": "Oxirgi xabarni qayta yuborish", + "chat_description": "Bot bilan suxbat!", + "disallowed": "Kechirasiz, sizga bu botdan foydalanish taqiqlangan. Siz manba kodini tekshirishingiz mumkin https://github.com/n3d1117/chatgpt-telegram-bot", + "budget_limit": "Kechirasiz, siz foydalanish chegarasiga yetdingiz.", + "help_text": ["Men ChatGPT botman, men bilan gaplashing!", "Menga ovozli xabar yoki fayl yuboring, men uni siz uchun transkripsiya qilaman", "Ochiq manba: https://github.com/n3d1117/chatgpt-telegram-bot"], + "stats_conversation": ["Hozirgi suhbat", "tarixdagi chat xabarlari", "tarixdagi suhbat tokenlari"], + "usage_today": "Bugungi foydalanish", + "usage_month": "Bu oydagi foydalanish", + "stats_tokens": "tokenlar", + "stats_images": "yaratilgan tasvirlar", + "stats_transcribe": ["minutlar va", "soniyalar transkripsiya qilingan"], + "stats_total": "💰 Jami miqdor $", + "stats_budget": "Qolgan budjetingiz", + "monthly": " bu oy uchun", + "daily": " bugun uchun", + "all-time": "", + "stats_openai": "Shu oyda OpenAI hisobingizdan to'lov amalga oshirildi $", + "resend_failed": "Sizda qayta yuborish uchun hech narsa yo'q", + "reset_done": "Bajarildi!", + "image_no_prompt": "Iltimos, so'rov yozing! (masalan, /image mushuk)", + "image_fail": "Tasvir yaratish amalga oshmadi", + "media_download_fail": ["Audio faylni yuklab olish amalga oshmadi", "Fayl hajmi katta emasligiga ishonch hosil qiling. (max 20MB)"], + "media_type_fail": "Qo'llab-quvvatlanmaydigan fayl turi", + "transcript": "Transkript", + "answer": "Javob", + "transcribe_fail": "Matnni transkripsiya qilib bo'lmadi", + "chat_fail": "Javob olish amalga oshmadi", + "prompt": "so'rov", + "completion": "yakunlash", + "openai_rate_limit": "OpenAI ta'rif chegarasidan oshib ketdi", + "openai_invalid": "OpenAI So'rov noto'g'ri", + "error": "Xatolik yuz berdi", + "try_again": "Birozdan keyin qayta urinib ko'ring", + "answer_with_chatgpt": "ChatGPT bilan javob berish", + "ask_chatgpt": "ChatGPTdan so'rash", + "loading": "Yuklanmoqda...", + "function_unavailable_in_inline_mode": "Bu funksiya inline rejimida mavjud emas" } }