mirror of
https://github.com/aljazceru/chatgpt-telegram-bot.git
synced 2026-01-04 21:45:33 +01:00
added conf option to show token usage
This commit is contained in:
@@ -5,4 +5,7 @@ OPENAI_API_KEY="XXX"
|
||||
TELEGRAM_BOT_TOKEN="XXX"
|
||||
|
||||
# Comma separated list of telegram user IDs, or * to allow all
|
||||
ALLOWED_TELEGRAM_USER_IDS="USER_ID_1,<USER_ID_2"
|
||||
ALLOWED_TELEGRAM_USER_IDS="USER_ID_1,<USER_ID_2"
|
||||
|
||||
# Whether to show OpenAI token usage information after each response
|
||||
SHOW_USAGE=false
|
||||
@@ -17,9 +17,9 @@ A [Telegram bot](https://core.telegram.org/bots/api) that integrates with OpenAI
|
||||
- [x] Docker support
|
||||
- [x] (NEW!) Support multiple answers!
|
||||
- [x] (NEW!) Customizable model parameters (see [configuration](#configuration) section)
|
||||
- [x] (NEW!) See token usage after each answer
|
||||
|
||||
## Coming soon
|
||||
- [ ] See remaining tokens and current usage
|
||||
- [ ] Multi-chat support
|
||||
- [ ] Image generation using DALL·E APIs
|
||||
|
||||
@@ -41,10 +41,12 @@ Customize the configuration by copying `.env.example` and renaming it to `.env`,
|
||||
OPENAI_API_KEY="<YOUR_OPENAI_API_KEY>"
|
||||
TELEGRAM_BOT_TOKEN="<YOUR_TELEGRAM_BOT_TOKEN>"
|
||||
ALLOWED_TELEGRAM_USER_IDS="<USER_ID_1>,<USER_ID_2>,..." # Defaults to "*" (everyone)
|
||||
SHOW_USAGE=false
|
||||
```
|
||||
* `OPENAI_API_KEY`: Your OpenAI API key, get if from [here](https://platform.openai.com/account/api-keys)
|
||||
* `TELEGRAM_BOT_TOKEN`: Your Telegram bot's token, obtained using [BotFather](http://t.me/botfather) (see [tutorial](https://core.telegram.org/bots/tutorial#obtain-your-bot-token))
|
||||
* `ALLOWED_TELEGRAM_USER_IDS`: A comma-separated list of Telegram user IDs that are allowed to interact with the bot (use [getidsbot](https://t.me/getidsbot) to find your user ID). **Important**: by default, *everyone* is allowed (`*`)
|
||||
* `SHOW_USAGE`: Whether to show OpenAI token usage information after each response. Optional, defaults to `false`
|
||||
|
||||
Additional optional model parameters can be configured from the `main.py` file:
|
||||
```python
|
||||
|
||||
@@ -37,22 +37,29 @@ class GPTHelper:
|
||||
)
|
||||
|
||||
if len(response.choices) > 0:
|
||||
answer = ''
|
||||
|
||||
if len(response.choices) > 1 and self.config['n_choices'] > 1:
|
||||
answer = ''
|
||||
for index, choice in enumerate(response.choices):
|
||||
if index == 0:
|
||||
self.history.append({"role": "assistant", "content": choice['message']['content']})
|
||||
answer += f'{index+1}\u20e3\n'
|
||||
answer += choice['message']['content']
|
||||
answer += '\n\n'
|
||||
return answer
|
||||
else:
|
||||
answer = response.choices[0]['message']['content']
|
||||
self.history.append({"role": "assistant", "content": answer})
|
||||
return answer
|
||||
|
||||
if self.config['show_usage']:
|
||||
answer += "\n\n---\n" \
|
||||
f"💰 Tokens used: {str(response.usage['total_tokens'])}" \
|
||||
f" ({str(response.usage['prompt_tokens'])} prompt," \
|
||||
f" {str(response.usage['completion_tokens'])} completion)"
|
||||
|
||||
return answer
|
||||
else:
|
||||
logging.error('No response from GPT-3')
|
||||
return "No response from GPT-3"
|
||||
return "⚠️ _An error has occurred_ ⚠️\nPlease try again in a while."
|
||||
|
||||
except openai.error.RateLimitError as e:
|
||||
logging.exception(e)
|
||||
|
||||
Reference in New Issue
Block a user