Use gpt-4 by default for the main thought process

Allow specifying the llm through dotenv
Move more things into config
This commit is contained in:
Taylor Brown
2023-04-02 21:35:28 -05:00
parent 3e587bc7fb
commit 80ccd10d0b
7 changed files with 56 additions and 28 deletions

View File

@@ -1,2 +1,4 @@
OPENAI_API_KEY=your-openai-api-key OPENAI_API_KEY=your-openai-api-key
ELEVENLABS_API_KEY=your-elevenlabs-api-key ELEVENLABS_API_KEY=your-elevenlabs-api-key
SMART_LLM_MODEL="gpt-4"
FAST_LLM_MODEL="gpt-3.5-turbo"

View File

@@ -1,7 +1,5 @@
from typing import List, Optional from typing import List, Optional
import json
import openai import openai
import dirtyjson
from config import Config from config import Config
cfg = Config() cfg = Config()

View File

@@ -2,12 +2,8 @@ import os
import time import time
import openai import openai
from dotenv import load_dotenv from dotenv import load_dotenv
from config import Config
# Load environment variables from .env file cfg = Config()
load_dotenv()
# Initialize the OpenAI API client
openai.api_key = os.getenv("OPENAI_API_KEY")
def create_chat_message(role, content): def create_chat_message(role, content):
@@ -65,8 +61,9 @@ def chat_with_ai(
f"{message['role'].capitalize()}: {message['content']}") f"{message['role'].capitalize()}: {message['content']}")
print("----------- END OF CONTEXT ----------------") print("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
response = openai.ChatCompletion.create( response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",#model="gpt-4", model=cfg.smart_llm_model,
messages=current_context, messages=current_context,
) )

View File

@@ -1,3 +1,9 @@
import os
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class Singleton(type): class Singleton(type):
""" """
Singleton metaclass for ensuring only one instance of a class. Singleton metaclass for ensuring only one instance of a class.
@@ -21,9 +27,25 @@ class Config(metaclass=Singleton):
def __init__(self): def __init__(self):
self.continuous_mode = False self.continuous_mode = False
self.speak_mode = False self.speak_mode = False
self.fast_llm_model = "gpt-3.5-turbo" # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
self.smart_llm_model = "gpt-3.5-turbo" self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.thinking_token_limit = 4000 self.thinking_token_limit = 4000
# Initialize the OpenAI API client
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
# Print values:
print("Config values:")
print(f"continuous_mode: {self.continuous_mode}")
print(f"speak_mode: {self.speak_mode}")
print(f"fast_llm_model: {self.fast_llm_model}")
print(f"smart_llm_model: {self.smart_llm_model}")
print(f"thinking_token_limit: {self.thinking_token_limit}")
print(f"openai_api_key: {self.openai_api_key}")
print(f"elevenlabs_api_key: {self.elevenlabs_api_key}")
def set_continuous_mode(self, value: bool): def set_continuous_mode(self, value: bool):
self.continuous_mode = value self.continuous_mode = value
@@ -39,3 +61,12 @@ class Config(metaclass=Singleton):
def set_thinking_token_limit(self, value: int): def set_thinking_token_limit(self, value: int):
self.thinking_token_limit = value self.thinking_token_limit = value
def set_openai_api_key(self, value: str):
self.apiopenai_api_key_key = value
def set_elevenlabs_api_key(self, value: str):
self.elevenlabs_api_key = value

View File

@@ -1,7 +1,15 @@
import os
from pathlib import Path
def load_prompt(): def load_prompt():
try: try:
# get directory of this file:
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
data_dir = file_dir / "data"
prompt_file = data_dir / "prompt.txt"
# Load the promt from data/prompt.txt # Load the promt from data/prompt.txt
with open("data/prompt.txt", "r") as prompt_file: with open(prompt_file, "r") as prompt_file:
prompt = prompt_file.read() prompt = prompt_file.read()
return prompt return prompt

View File

@@ -11,17 +11,11 @@ import speak
from enum import Enum, auto from enum import Enum, auto
import sys import sys
from config import Config from config import Config
from dotenv import load_dotenv
from json_parser import fix_and_parse_json from json_parser import fix_and_parse_json
from ai_config import AIConfig from ai_config import AIConfig
import traceback import traceback
import yaml import yaml
# Load environment variables from .env file
load_dotenv()
class Argument(Enum): class Argument(Enum):
CONTINUOUS_MODE = "continuous-mode" CONTINUOUS_MODE = "continuous-mode"
SPEAK_MODE = "speak-mode" SPEAK_MODE = "speak-mode"
@@ -262,11 +256,12 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True) cfg.set_speak_mode(True)
# TODO: Better argument parsing: # TODO: Better argument parsing:
# TODO: fill in llm values here # TODO: fill in llm values here
cfg = Config() cfg = Config()
parse_arguments() parse_arguments()
ai_name = "" ai_name = ""
prompt = construct_prompt() prompt = construct_prompt()

View File

@@ -1,20 +1,17 @@
import os import os
from playsound import playsound from playsound import playsound
import requests import requests
from dotenv import load_dotenv from config import Config
cfg = Config()
# Load environment variables from .env file
load_dotenv()
# TODO: Nicer names for these ids
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
tts_headers = { tts_headers = {
"Content-Type": "application/json", "Content-Type": "application/json",
"xi-api-key": os.getenv("ELEVENLABS_API_KEY") "xi-api-key": cfg.elevenlabs_api_key
} }
def say_text(text, voice_index=0): def say_text(text, voice_index=0):
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
voice_id=voices[voice_index]) voice_id=voices[voice_index])