diff --git a/scripts/chat.py b/scripts/chat.py index 5938d8f8..9dc14b7c 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -1,3 +1,4 @@ +import time import openai import keys @@ -18,42 +19,47 @@ def create_chat_message(role, content): return {"role": role, "content": content} def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit, debug = False): - """ - Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory. + while True: + try: + """ + Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory. - Args: - prompt (str): The prompt explaining the rules to the AI. - user_input (str): The input from the user. - full_message_history (list): The list of all messages sent between the user and the AI. - permanent_memory (list): The list of items in the AI's permanent memory. - token_limit (int): The maximum number of tokens allowed in the API call. + Args: + prompt (str): The prompt explaining the rules to the AI. + user_input (str): The input from the user. + full_message_history (list): The list of all messages sent between the user and the AI. + permanent_memory (list): The list of items in the AI's permanent memory. + token_limit (int): The maximum number of tokens allowed in the API call. - Returns: - str: The AI's response. - """ - current_context = [create_chat_message("system", prompt), create_chat_message("system", f"Permanent memory: {permanent_memory}")] - current_context.extend(full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):]) - current_context.extend([create_chat_message("user", user_input)]) + Returns: + str: The AI's response. + """ + current_context = [create_chat_message("system", prompt), create_chat_message("system", f"Permanent memory: {permanent_memory}")] + current_context.extend(full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):]) + current_context.extend([create_chat_message("user", user_input)]) - # Debug print the current context - if debug: - print("------------ CONTEXT SENT TO AI ---------------") - for message in current_context: - # Skip printing the prompt - if message["role"] == "system" and message["content"] == prompt: - continue - print(f"{message['role'].capitalize()}: {message['content']}") - print("----------- END OF CONTEXT ----------------") + # Debug print the current context + if debug: + print("------------ CONTEXT SENT TO AI ---------------") + for message in current_context: + # Skip printing the prompt + if message["role"] == "system" and message["content"] == prompt: + continue + print(f"{message['role'].capitalize()}: {message['content']}") + print("----------- END OF CONTEXT ----------------") - response = openai.ChatCompletion.create( - model="gpt-4", - messages=current_context, - ) + response = openai.ChatCompletion.create( + model="gpt-4", + messages=current_context, + ) - assistant_reply = response.choices[0].message["content"] + assistant_reply = response.choices[0].message["content"] - # Update full message history - full_message_history.append(create_chat_message("user", user_input)) - full_message_history.append(create_chat_message("assistant", assistant_reply)) + # Update full message history + full_message_history.append(create_chat_message("user", user_input)) + full_message_history.append(create_chat_message("assistant", assistant_reply)) - return assistant_reply + return assistant_reply + except openai.RateLimitError: + print("Error: ", "API Rate Limit Reached. Waiting 60 seconds...") + time.sleep(60)