diff --git a/.env.template b/.env.template index a598aa7b..cbf0cd9b 100644 --- a/.env.template +++ b/.env.template @@ -1,2 +1,4 @@ OPENAI_API_KEY=your-openai-api-key ELEVENLABS_API_KEY=your-elevenlabs-api-key +SMART_LLM_MODEL="gpt-4" +FAST_LLM_MODEL="gpt-3.5-turbo" \ No newline at end of file diff --git a/scripts/call_ai_function.py b/scripts/call_ai_function.py index 57d833b9..7afb3b5d 100644 --- a/scripts/call_ai_function.py +++ b/scripts/call_ai_function.py @@ -1,7 +1,5 @@ from typing import List, Optional -import json import openai -import dirtyjson from config import Config cfg = Config() diff --git a/scripts/chat.py b/scripts/chat.py index 69764959..d9b75b20 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -2,12 +2,8 @@ import os import time import openai from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -# Initialize the OpenAI API client -openai.api_key = os.getenv("OPENAI_API_KEY") +from config import Config +cfg = Config() def create_chat_message(role, content): @@ -65,8 +61,9 @@ def chat_with_ai( f"{message['role'].capitalize()}: {message['content']}") print("----------- END OF CONTEXT ----------------") + # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about response = openai.ChatCompletion.create( - model="gpt-3.5-turbo",#model="gpt-4", + model=cfg.smart_llm_model, messages=current_context, ) diff --git a/scripts/config.py b/scripts/config.py index e67c61f0..4c892b8f 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -1,3 +1,9 @@ +import os + +from dotenv import load_dotenv +# Load environment variables from .env file +load_dotenv() + class Singleton(type): """ Singleton metaclass for ensuring only one instance of a class. @@ -21,9 +27,25 @@ class Config(metaclass=Singleton): def __init__(self): self.continuous_mode = False self.speak_mode = False - self.fast_llm_model = "gpt-3.5-turbo" - self.smart_llm_model = "gpt-3.5-turbo" + # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good + self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") + self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") self.thinking_token_limit = 4000 + # Initialize the OpenAI API client + self.openai_api_key = os.getenv("OPENAI_API_KEY") + self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") + # Print values: + print("Config values:") + print(f"continuous_mode: {self.continuous_mode}") + print(f"speak_mode: {self.speak_mode}") + print(f"fast_llm_model: {self.fast_llm_model}") + print(f"smart_llm_model: {self.smart_llm_model}") + print(f"thinking_token_limit: {self.thinking_token_limit}") + print(f"openai_api_key: {self.openai_api_key}") + print(f"elevenlabs_api_key: {self.elevenlabs_api_key}") + + + def set_continuous_mode(self, value: bool): self.continuous_mode = value @@ -39,3 +61,12 @@ class Config(metaclass=Singleton): def set_thinking_token_limit(self, value: int): self.thinking_token_limit = value + + def set_openai_api_key(self, value: str): + self.apiopenai_api_key_key = value + + def set_elevenlabs_api_key(self, value: str): + self.elevenlabs_api_key = value + + + diff --git a/scripts/data.py b/scripts/data.py index 19473557..0a72cbbf 100644 --- a/scripts/data.py +++ b/scripts/data.py @@ -1,7 +1,15 @@ +import os +from pathlib import Path + + def load_prompt(): try: + # get directory of this file: + file_dir = Path(os.path.dirname(os.path.realpath(__file__))) + data_dir = file_dir / "data" + prompt_file = data_dir / "prompt.txt" # Load the promt from data/prompt.txt - with open("data/prompt.txt", "r") as prompt_file: + with open(prompt_file, "r") as prompt_file: prompt = prompt_file.read() return prompt diff --git a/scripts/main.py b/scripts/main.py index 0d4a5648..40f8f4c4 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -11,17 +11,11 @@ import speak from enum import Enum, auto import sys from config import Config -from dotenv import load_dotenv from json_parser import fix_and_parse_json from ai_config import AIConfig import traceback import yaml - -# Load environment variables from .env file -load_dotenv() - - class Argument(Enum): CONTINUOUS_MODE = "continuous-mode" SPEAK_MODE = "speak-mode" @@ -262,11 +256,12 @@ def parse_arguments(): print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") cfg.set_speak_mode(True) - # TODO: Better argument parsing: - # TODO: fill in llm values here + + +# TODO: Better argument parsing: +# TODO: fill in llm values here cfg = Config() - parse_arguments() ai_name = "" prompt = construct_prompt() diff --git a/scripts/speak.py b/scripts/speak.py index a0f29fc5..2fbcbed2 100644 --- a/scripts/speak.py +++ b/scripts/speak.py @@ -1,20 +1,17 @@ import os from playsound import playsound import requests -from dotenv import load_dotenv - - -# Load environment variables from .env file -load_dotenv() +from config import Config +cfg = Config() +# TODO: Nicer names for these ids voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] tts_headers = { "Content-Type": "application/json", - "xi-api-key": os.getenv("ELEVENLABS_API_KEY") + "xi-api-key": cfg.elevenlabs_api_key } - def say_text(text, voice_index=0): tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( voice_id=voices[voice_index])