mirror of
https://github.com/aljazceru/chatgpt-telegram-bot.git
synced 2026-01-08 23:46:00 +01:00
added N_CHOICES, TEMPERATURE and IMAGE_SIZE to the .env file
This commit is contained in:
10
.env.example
10
.env.example
@@ -25,6 +25,16 @@ MAX_CONVERSATION_AGE_MINUTES=180
|
||||
# Whether to answer to voice messages with the transcript or with a ChatGPT response of the transcript
|
||||
VOICE_REPLY_WITH_TRANSCRIPT_ONLY=true
|
||||
|
||||
# How many chat completion choices to generate for each input message
|
||||
N_CHOICES=1
|
||||
|
||||
# Number between 0 and 2. Higher values like 0.8 will make the output more random,
|
||||
# while lower values like 0.2 will make it more focused and deterministic
|
||||
TEMPERATURE=1.0
|
||||
|
||||
# The DALL·E generated image size
|
||||
IMAGE_SIZE="512x512"
|
||||
|
||||
# Group trigger keyword, if set, the bot will only respond to messages that start with this keyword
|
||||
# Useful for bots added to groups with privacy mode disabled
|
||||
GROUP_TRIGGER_KEYWORD=""
|
||||
|
||||
18
README.md
18
README.md
@@ -57,6 +57,9 @@ MAX_TOKENS=2000 # Defaults to 1200
|
||||
MAX_HISTORY_SIZE=15 # Defaults to 10
|
||||
MAX_CONVERSATION_AGE_MINUTES=120 # Defaults to 180 (3h)
|
||||
VOICE_REPLY_WITH_TRANSCRIPT_ONLY=false # Defaults to true
|
||||
N_CHOICES=1 # Defaults to 1
|
||||
TEMPERATURE=1.0 # Defaults to 1.0
|
||||
IMAGE_SIZE="256x256" # Defaults to 512x512
|
||||
GROUP_TRIGGER_KEYWORD="@bot" # Defaults to "" (no keyword required)
|
||||
TOKEN_PRICE=0.002 # Defaults to 0.002, current price: https://openai.com/pricing
|
||||
IMAGE_PRICES="0.016,0.018,0.02" # Defaults to OpenAI Dall-E pricing for sizes 256x256,512x512,1024x1024
|
||||
@@ -72,6 +75,9 @@ TRANSCRIPTION_PRICE=0.006 # Defaults to minute price of OpenAI Whisper of 0.006
|
||||
* `MAX_HISTORY_SIZE`: Max number of messages to keep in memory, after which the conversation will be summarised to avoid excessive token usage ([#34](https://github.com/n3d1117/chatgpt-telegram-bot/issues/34))
|
||||
* `MAX_CONVERSATION_AGE_MINUTES`: Maximum number of minutes a conversation should live, after which the conversation will be reset to avoid excessive token usage
|
||||
* `VOICE_REPLY_WITH_TRANSCRIPT_ONLY`: Whether to answer to voice messages with the transcript only or with a ChatGPT response of the transcript ([#38](https://github.com/n3d1117/chatgpt-telegram-bot/issues/38))
|
||||
* `N_CHOICES`: Number of answers to generate for each input message
|
||||
* `TEMPERATURE`: Number between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic
|
||||
* `IMAGE_SIZE`: The DALL·E generated image size. Allowed values: "256x256", "512x512", or "1024x1024"
|
||||
* `GROUP_TRIGGER_KEYWORD`: If set, the bot will only respond to messages that start with this keyword. This is useful for bots added to groups with privacy mode disabled. **Note**: by default, *no keyword* is required (`""`)
|
||||
* `TOKEN_PRICE`: USD-price per 1000 tokens for cost information in usage statistics
|
||||
|
||||
@@ -83,23 +89,13 @@ TRANSCRIPTION_PRICE=0.006 # Defaults to minute price of OpenAI Whisper of 0.006
|
||||
# 'gpt-3.5-turbo' or 'gpt-3.5-turbo-0301'
|
||||
'model': 'gpt-3.5-turbo',
|
||||
|
||||
# Number between 0 and 2. Higher values like 0.8 will make the output more random,
|
||||
# while lower values like 0.2 will make it more focused and deterministic. Defaults to 1
|
||||
'temperature': 1,
|
||||
|
||||
# How many answers to generate for each input message. Defaults to 1
|
||||
'n_choices': 1,
|
||||
|
||||
# Number between -2.0 and 2.0. Positive values penalize new tokens based on whether
|
||||
# they appear in the text so far, increasing the model's likelihood to talk about new topics. Defaults to 0
|
||||
'presence_penalty': 0,
|
||||
|
||||
# Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
|
||||
# frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. Defaults to 0
|
||||
'frequency_penalty': 0,
|
||||
|
||||
# The DALL·E generated image size. 256x256, 512x512, or 1024x1024. Defaults to 512x512
|
||||
'image_size': '512x512'
|
||||
'frequency_penalty': 0
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
15
main.py
15
main.py
@@ -33,27 +33,20 @@ def main():
|
||||
'max_conversation_age_minutes': int(os.environ.get('MAX_CONVERSATION_AGE_MINUTES', 180)),
|
||||
'assistant_prompt': os.environ.get('ASSISTANT_PROMPT', 'You are a helpful assistant.'),
|
||||
'max_tokens': int(os.environ.get('MAX_TOKENS', 1200)),
|
||||
'n_choices': int(os.environ.get('N_CHOICES', 1200)),
|
||||
'temperature': float(os.environ.get('TEMPERATURE', 1.0)),
|
||||
'image_size': os.environ.get('IMAGE_SIZE', '512x512'),
|
||||
|
||||
# 'gpt-3.5-turbo' or 'gpt-3.5-turbo-0301'
|
||||
'model': 'gpt-3.5-turbo',
|
||||
|
||||
# Number between 0 and 2. Higher values like 0.8 will make the output more random,
|
||||
# while lower values like 0.2 will make it more focused and deterministic.
|
||||
'temperature': 1,
|
||||
|
||||
# How many chat completion choices to generate for each input message.
|
||||
'n_choices': 1,
|
||||
|
||||
# Number between -2.0 and 2.0. Positive values penalize new tokens based on whether
|
||||
# they appear in the text so far, increasing the model's likelihood to talk about new topics.
|
||||
'presence_penalty': 0,
|
||||
|
||||
# Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
|
||||
# frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
||||
'frequency_penalty': 0,
|
||||
|
||||
# The DALL·E generated image size
|
||||
'image_size': '512x512'
|
||||
'frequency_penalty': 0
|
||||
}
|
||||
|
||||
telegram_config = {
|
||||
|
||||
Reference in New Issue
Block a user