diff --git a/.env.example b/.env.example index 9b52332..06961bf 100644 --- a/.env.example +++ b/.env.example @@ -22,6 +22,7 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # ENABLE_TRANSCRIPTION=true # PROXY=http://localhost:8080 # OPENAI_MODEL=gpt-3.5-turbo +# OPENAI_API_BASE=https://example.com/v1/ # ASSISTANT_PROMPT="You are a helpful assistant." # SHOW_USAGE=false # STREAM=true @@ -34,7 +35,10 @@ ALLOWED_TELEGRAM_USER_IDS=USER_ID_1,USER_ID_2 # TEMPERATURE=1.0 # PRESENCE_PENALTY=0.0 # FREQUENCY_PENALTY=0.0 -# IMAGE_SIZE=512x512 +# IMAGE_MODEL=dall-e-3 +# IMAGE_QUALITY=hd +# IMAGE_STYLE=natural +# IMAGE_SIZE=1024x1024 # IMAGE_FORMAT=document # GROUP_TRIGGER_KEYWORD="" # IGNORE_GROUP_TRANSCRIPTIONS=true diff --git a/README.md b/README.md index 96a6569..34e59b6 100644 --- a/README.md +++ b/README.md @@ -30,12 +30,14 @@ A [Telegram bot](https://core.telegram.org/bots/api) that integrates with OpenAI - [x] GPT-4 support - If you have access to the GPT-4 API, simply change the `OPENAI_MODEL` parameter to `gpt-4` - [x] Localized bot language - - Available languages :gb: :de: :ru: :tr: :it: :finland: :es: :indonesia: :netherlands: :cn: :taiwan: :vietnam: :iran: :brazil: :ukraine: :malaysia: + - Available languages :gb: :de: :ru: :tr: :it: :finland: :es: :indonesia: :netherlands: :cn: :taiwan: :vietnam: :iran: :brazil: :ukraine: :malaysia: :uzbekistan: - [x] Improved inline queries support for group and private chats - by [@bugfloyd](https://github.com/bugfloyd) - To use this feature, enable inline queries for your bot in BotFather via the `/setinline` [command](https://core.telegram.org/bots/inline) -- [x] (NEW!) Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) -- [x] (NEW!) Support *functions* (plugins) to extend the bot's functionality with 3rd party services +- [x] Support *new models* [announced on June 13, 2023](https://openai.com/blog/function-calling-and-other-api-updates) +- [x] Support *functions* (plugins) to extend the bot's functionality with 3rd party services - Weather, Spotify, Web search, text-to-speech and more. See [here](#available-plugins) for a list of available plugins +- [x] Support unofficial OpenAI-compatible APIs - by [@kristaller486](https://github.com/kristaller486) +- [x] (NEW!) Support GPT-4 Turbo and DALL·E 3 [announced on November 6, 2023](https://openai.com/blog/new-models-and-developer-products-announced-at-devday) - by by [@AlexHTW](https://github.com/AlexHTW) ## Additional features - help needed! If you'd like to help, check out the [issues](https://github.com/n3d1117/chatgpt-telegram-bot/issues) section and contribute! @@ -83,6 +85,7 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `ENABLE_TRANSCRIPTION` | Whether to enable transcriptions of audio and video messages | `true` | | `PROXY` | Proxy to be used for OpenAI and Telegram bot (e.g. `http://localhost:8080`) | - | | `OPENAI_MODEL` | The OpenAI model to use for generating responses. You can find all available models [here](https://platform.openai.com/docs/models/) | `gpt-3.5-turbo` | +| `OPENAI_API_BASE` | Endpoint URL for unofficial OpenAI-compatible APIs (e.g., LocalAI or text-generation-webui) | Default OpenAI API URL | | `ASSISTANT_PROMPT` | A system message that sets the tone and controls the behavior of the assistant | `You are a helpful assistant.` | | `SHOW_USAGE` | Whether to show OpenAI token usage information after each response | `false` | | `STREAM` | Whether to stream responses. **Note**: incompatible, if enabled, with `N_CHOICES` higher than 1 | `true` | @@ -95,11 +98,14 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di | `TEMPERATURE` | Number between 0 and 2. Higher values will make the output more random | `1.0` | | `PRESENCE_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far | `0.0` | | `FREQUENCY_PENALTY` | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far | `0.0` | -| `IMAGE_SIZE` | The DALL·E generated image size. Allowed values: `256x256`, `512x512` or `1024x1024` | `512x512` | -| `IMAGE_RECEIVE_MODE` | The Telegram image receive mode. Allowed values: `document` or `photo` | `photo` | +| `IMAGE_FORMAT` | The Telegram image receive mode. Allowed values: `document` or `photo` | `photo` | +| `IMAGE_MODEL` | The DALL·E model to be used. Available models: `dall-e-2` and `dall-e-3`, find current available models [here](https://platform.openai.com/docs/models/dall-e) | `dall-e-2` | +| `IMAGE_QUALITY` | Quality of DALL·E images, only available for `dall-e-3`-model. Possible options: `standard` or `hd`, beware of [pricing differences](https://openai.com/pricing#image-models). | `standard` | +| `IMAGE_STYLE` | Style for DALL·E image generation, only available for `dall-e-3`-model. Possible options: `vivid` or `natural`. Check availbe styles [here](https://platform.openai.com/docs/api-reference/images/create). | `vivid` | +| `IMAGE_SIZE` | The DALL·E generated image size. Must be `256x256`, `512x512`, or `1024x1024` for dall-e-2. Must be `1024x1024` for dall-e-3 models. | `512x512` | | `GROUP_TRIGGER_KEYWORD` | If set, the bot in group chats will only respond to messages that start with this keyword | - | | `IGNORE_GROUP_TRANSCRIPTIONS` | If set to true, the bot will not process transcriptions in group chats | `true` | -| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | +| `BOT_LANGUAGE` | Language of general bot messages. Currently available: `en`, `de`, `ru`, `tr`, `it`, `fi`, `es`, `id`, `nl`, `zh-cn`, `zh-tw`, `vi`, `fa`, `pt-br`, `uk`, `ms`, `uz`. [Contribute with additional translations](https://github.com/n3d1117/chatgpt-telegram-bot/discussions/219) | `en` | | `WHISPER_PROMPT` | To improve the accuracy of Whisper's transcription service, especially for specific names or terms, you can set up a custom message. [Speech to text - Prompting](https://platform.openai.com/docs/guides/speech-to-text/prompting) | `-` | Check out the [official API reference](https://platform.openai.com/docs/api-reference/chat) for more details. diff --git a/bot/main.py b/bot/main.py index 5dc8589..a7ea575 100644 --- a/bot/main.py +++ b/bot/main.py @@ -41,6 +41,9 @@ def main(): 'max_tokens': int(os.environ.get('MAX_TOKENS', max_tokens_default)), 'n_choices': int(os.environ.get('N_CHOICES', 1)), 'temperature': float(os.environ.get('TEMPERATURE', 1.0)), + 'image_model': os.environ.get('IMAGE_MODEL', 'dall-e-2'), + 'image_quality': os.environ.get('IMAGE_QUALITY', 'standard'), + 'image_style': os.environ.get('IMAGE_STYLE', 'vivid'), 'image_size': os.environ.get('IMAGE_SIZE', '512x512'), 'model': model, 'enable_functions': os.environ.get('ENABLE_FUNCTIONS', str(functions_available)).lower() == 'true', diff --git a/bot/openai_helper.py b/bot/openai_helper.py index 38b630c..83836a7 100644 --- a/bot/openai_helper.py +++ b/bot/openai_helper.py @@ -19,10 +19,11 @@ from plugin_manager import PluginManager # Models can be found here: https://platform.openai.com/docs/models/overview GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613") -GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613") +GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-1106") GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613") GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613") -GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS +GPT_4_128K_MODELS = ("gpt-4-1106-preview",) +GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS def default_max_tokens(model: str) -> int: @@ -36,10 +37,14 @@ def default_max_tokens(model: str) -> int: return base elif model in GPT_4_MODELS: return base * 2 - elif model in GPT_3_16K_MODELS: + elif model in GPT_3_16K_MODELS: + if model == "gpt-3.5-turbo-1106": + return 4096 return base * 4 elif model in GPT_4_32K_MODELS: return base * 8 + elif model in GPT_4_128K_MODELS: + return 4096 def are_functions_available(model: str) -> bool: @@ -50,7 +55,7 @@ def are_functions_available(model: str) -> bool: if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"): return False # Stable models will be updated to support functions on June 27, 2023 - if model in ("gpt-3.5-turbo", "gpt-4", "gpt-4-32k"): + if model in ("gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-32k","gpt-4-1106-preview"): return datetime.date.today() > datetime.date(2023, 6, 27) return True @@ -321,6 +326,9 @@ class OpenAIHelper: response = await openai.Image.acreate( prompt=prompt, n=1, + model=self.config['image_model'], + quality=self.config['image_quality'], + style=self.config['image_style'], size=self.config['image_size'] ) @@ -411,6 +419,8 @@ class OpenAIHelper: return base * 2 if self.config['model'] in GPT_4_32K_MODELS: return base * 8 + if self.config['model'] in GPT_4_128K_MODELS: + return base * 31 raise NotImplementedError( f"Max tokens for model {self.config['model']} is not implemented yet." ) @@ -431,7 +441,7 @@ class OpenAIHelper: if model in GPT_3_MODELS + GPT_3_16K_MODELS: tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n tokens_per_name = -1 # if there's a name, the role is omitted - elif model in GPT_4_MODELS + GPT_4_32K_MODELS: + elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_128K_MODELS: tokens_per_message = 3 tokens_per_name = 1 else: diff --git a/translations.json b/translations.json index f603efb..fba0371 100644 --- a/translations.json +++ b/translations.json @@ -686,5 +686,48 @@ "ask_chatgpt":"Tanya ChatGPT", "loading":"Memuatkan...", "function_unavailable_in_inline_mode": "Fungsi ini tidak tersedia dalam mod sebaris" + }, + "uz": { + "help_description": "Yordam xabarini ko'rsatish", + "reset_description": "Suhbatni qayta boshlang. Agar xohlasangiz, umumiy ko'rsatmalar bering (masalan, /reset siz foydali yordamchisiz)", + "image_description": "Tasvirni so'rov bo'yicha yaratish (masalan, /image mushuk)", + "stats_description": "Hozirgi foydalanilgan statistikani olish", + "resend_description": "Oxirgi xabarni qayta yuborish", + "chat_description": "Bot bilan suxbat!", + "disallowed": "Kechirasiz, sizga bu botdan foydalanish taqiqlangan. Siz manba kodini tekshirishingiz mumkin https://github.com/n3d1117/chatgpt-telegram-bot", + "budget_limit": "Kechirasiz, siz foydalanish chegarasiga yetdingiz.", + "help_text": ["Men ChatGPT botman, men bilan gaplashing!", "Menga ovozli xabar yoki fayl yuboring, men uni siz uchun transkripsiya qilaman", "Ochiq manba: https://github.com/n3d1117/chatgpt-telegram-bot"], + "stats_conversation": ["Hozirgi suhbat", "tarixdagi chat xabarlari", "tarixdagi suhbat tokenlari"], + "usage_today": "Bugungi foydalanish", + "usage_month": "Bu oydagi foydalanish", + "stats_tokens": "tokenlar", + "stats_images": "yaratilgan tasvirlar", + "stats_transcribe": ["minutlar va", "soniyalar transkripsiya qilingan"], + "stats_total": "💰 Jami miqdor $", + "stats_budget": "Qolgan budjetingiz", + "monthly": " bu oy uchun", + "daily": " bugun uchun", + "all-time": "", + "stats_openai": "Shu oyda OpenAI hisobingizdan to'lov amalga oshirildi $", + "resend_failed": "Sizda qayta yuborish uchun hech narsa yo'q", + "reset_done": "Bajarildi!", + "image_no_prompt": "Iltimos, so'rov yozing! (masalan, /image mushuk)", + "image_fail": "Tasvir yaratish amalga oshmadi", + "media_download_fail": ["Audio faylni yuklab olish amalga oshmadi", "Fayl hajmi katta emasligiga ishonch hosil qiling. (max 20MB)"], + "media_type_fail": "Qo'llab-quvvatlanmaydigan fayl turi", + "transcript": "Transkript", + "answer": "Javob", + "transcribe_fail": "Matnni transkripsiya qilib bo'lmadi", + "chat_fail": "Javob olish amalga oshmadi", + "prompt": "so'rov", + "completion": "yakunlash", + "openai_rate_limit": "OpenAI ta'rif chegarasidan oshib ketdi", + "openai_invalid": "OpenAI So'rov noto'g'ri", + "error": "Xatolik yuz berdi", + "try_again": "Birozdan keyin qayta urinib ko'ring", + "answer_with_chatgpt": "ChatGPT bilan javob berish", + "ask_chatgpt": "ChatGPTdan so'rash", + "loading": "Yuklanmoqda...", + "function_unavailable_in_inline_mode": "Bu funksiya inline rejimida mavjud emas" } }