diff --git a/scripts/agent_manager.py b/scripts/agent_manager.py index 9939332b..9ac801f9 100644 --- a/scripts/agent_manager.py +++ b/scripts/agent_manager.py @@ -1,4 +1,5 @@ import openai +from llm_utils import create_chat_completion next_key = 0 agents = {} # key, (task, full_message_history, model) @@ -13,13 +14,11 @@ def create_agent(task, prompt, model): messages = [{"role": "user", "content": prompt}, ] # Start GTP3 instance - response = openai.ChatCompletion.create( + agent_reply = create_chat_completion( model=model, messages=messages, ) - agent_reply = response.choices[0].message["content"] - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) @@ -42,14 +41,11 @@ def message_agent(key, message): messages.append({"role": "user", "content": message}) # Start GTP3 instance - response = openai.ChatCompletion.create( + agent_reply = create_chat_completion( model=model, messages=messages, ) - # Get agent response - agent_reply = response.choices[0].message["content"] - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) diff --git a/scripts/browse.py b/scripts/browse.py index 178bb58a..b965d66a 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -4,6 +4,7 @@ from bs4 import BeautifulSoup from readability import Document import openai from config import Config +from llm_utils import create_chat_completion cfg = Config() @@ -101,13 +102,11 @@ def summarize_text(text, is_website=True): chunk}, ] - response = openai.ChatCompletion.create( + summary = create_chat_completion( model=cfg.fast_llm_model, messages=messages, max_tokens=300, ) - - summary = response.choices[0].message.content summaries.append(summary) print("Summarized " + str(len(chunks)) + " chunks.") @@ -129,11 +128,10 @@ def summarize_text(text, is_website=True): combined_summary}, ] - response = openai.ChatCompletion.create( + final_summary = create_chat_completion( model=cfg.fast_llm_model, messages=messages, max_tokens=300, ) - final_summary = response.choices[0].message.content return final_summary diff --git a/scripts/call_ai_function.py b/scripts/call_ai_function.py index 7afb3b5d..63ee77b9 100644 --- a/scripts/call_ai_function.py +++ b/scripts/call_ai_function.py @@ -3,6 +3,8 @@ import openai from config import Config cfg = Config() +from llm_utils import create_chat_completion + # This is a magic function that can do anything with no-code. See # https://github.com/Torantulino/AI-Functions for more info. def call_ai_function(function, args, description, model=cfg.smart_llm_model): @@ -18,8 +20,8 @@ def call_ai_function(function, args, description, model=cfg.smart_llm_model): {"role": "user", "content": args}, ] - response = openai.ChatCompletion.create( + response = create_chat_completion( model=model, messages=messages, temperature=0 ) - return response.choices[0].message["content"] + return response diff --git a/scripts/chat.py b/scripts/chat.py index d9b75b20..817a5e9d 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -5,6 +5,7 @@ from dotenv import load_dotenv from config import Config cfg = Config() +from llm_utils import create_chat_completion def create_chat_message(role, content): """ @@ -62,13 +63,11 @@ def chat_with_ai( print("----------- END OF CONTEXT ----------------") # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about - response = openai.ChatCompletion.create( + assistant_reply = create_chat_completion( model=cfg.smart_llm_model, messages=current_context, ) - assistant_reply = response.choices[0].message["content"] - # Update full message history full_message_history.append( create_chat_message( @@ -79,5 +78,6 @@ def chat_with_ai( return assistant_reply except openai.error.RateLimitError: + # TODO: WHen we switch to langchain, this is built in print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") time.sleep(10) diff --git a/scripts/llm_utils.py b/scripts/llm_utils.py new file mode 100644 index 00000000..41f39625 --- /dev/null +++ b/scripts/llm_utils.py @@ -0,0 +1,16 @@ +import openai +from config import Config +cfg = Config() + +openai.api_key = cfg.openai_api_key + +# Overly simple abstraction until we create something better +def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str: + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens + ) + + return response.choices[0].message["content"]