mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 14:04:27 +01:00
Consolidate calls to openai
Starting to abstract away the calls to openai
This commit is contained in:
@@ -1,4 +1,5 @@
|
|||||||
import openai
|
import openai
|
||||||
|
from llm_utils import create_chat_completion
|
||||||
|
|
||||||
next_key = 0
|
next_key = 0
|
||||||
agents = {} # key, (task, full_message_history, model)
|
agents = {} # key, (task, full_message_history, model)
|
||||||
@@ -13,13 +14,11 @@ def create_agent(task, prompt, model):
|
|||||||
messages = [{"role": "user", "content": prompt}, ]
|
messages = [{"role": "user", "content": prompt}, ]
|
||||||
|
|
||||||
# Start GTP3 instance
|
# Start GTP3 instance
|
||||||
response = openai.ChatCompletion.create(
|
agent_reply = create_chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
)
|
)
|
||||||
|
|
||||||
agent_reply = response.choices[0].message["content"]
|
|
||||||
|
|
||||||
# Update full message history
|
# Update full message history
|
||||||
messages.append({"role": "assistant", "content": agent_reply})
|
messages.append({"role": "assistant", "content": agent_reply})
|
||||||
|
|
||||||
@@ -42,14 +41,11 @@ def message_agent(key, message):
|
|||||||
messages.append({"role": "user", "content": message})
|
messages.append({"role": "user", "content": message})
|
||||||
|
|
||||||
# Start GTP3 instance
|
# Start GTP3 instance
|
||||||
response = openai.ChatCompletion.create(
|
agent_reply = create_chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get agent response
|
|
||||||
agent_reply = response.choices[0].message["content"]
|
|
||||||
|
|
||||||
# Update full message history
|
# Update full message history
|
||||||
messages.append({"role": "assistant", "content": agent_reply})
|
messages.append({"role": "assistant", "content": agent_reply})
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ from bs4 import BeautifulSoup
|
|||||||
from readability import Document
|
from readability import Document
|
||||||
import openai
|
import openai
|
||||||
from config import Config
|
from config import Config
|
||||||
|
from llm_utils import create_chat_completion
|
||||||
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
@@ -101,13 +102,11 @@ def summarize_text(text, is_website=True):
|
|||||||
chunk},
|
chunk},
|
||||||
]
|
]
|
||||||
|
|
||||||
response = openai.ChatCompletion.create(
|
summary = create_chat_completion(
|
||||||
model=cfg.fast_llm_model,
|
model=cfg.fast_llm_model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=300,
|
max_tokens=300,
|
||||||
)
|
)
|
||||||
|
|
||||||
summary = response.choices[0].message.content
|
|
||||||
summaries.append(summary)
|
summaries.append(summary)
|
||||||
print("Summarized " + str(len(chunks)) + " chunks.")
|
print("Summarized " + str(len(chunks)) + " chunks.")
|
||||||
|
|
||||||
@@ -129,11 +128,10 @@ def summarize_text(text, is_website=True):
|
|||||||
combined_summary},
|
combined_summary},
|
||||||
]
|
]
|
||||||
|
|
||||||
response = openai.ChatCompletion.create(
|
final_summary = create_chat_completion(
|
||||||
model=cfg.fast_llm_model,
|
model=cfg.fast_llm_model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=300,
|
max_tokens=300,
|
||||||
)
|
)
|
||||||
|
|
||||||
final_summary = response.choices[0].message.content
|
|
||||||
return final_summary
|
return final_summary
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ import openai
|
|||||||
from config import Config
|
from config import Config
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
|
from llm_utils import create_chat_completion
|
||||||
|
|
||||||
# This is a magic function that can do anything with no-code. See
|
# This is a magic function that can do anything with no-code. See
|
||||||
# https://github.com/Torantulino/AI-Functions for more info.
|
# https://github.com/Torantulino/AI-Functions for more info.
|
||||||
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
|
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
|
||||||
@@ -18,8 +20,8 @@ def call_ai_function(function, args, description, model=cfg.smart_llm_model):
|
|||||||
{"role": "user", "content": args},
|
{"role": "user", "content": args},
|
||||||
]
|
]
|
||||||
|
|
||||||
response = openai.ChatCompletion.create(
|
response = create_chat_completion(
|
||||||
model=model, messages=messages, temperature=0
|
model=model, messages=messages, temperature=0
|
||||||
)
|
)
|
||||||
|
|
||||||
return response.choices[0].message["content"]
|
return response
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ from dotenv import load_dotenv
|
|||||||
from config import Config
|
from config import Config
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
|
from llm_utils import create_chat_completion
|
||||||
|
|
||||||
def create_chat_message(role, content):
|
def create_chat_message(role, content):
|
||||||
"""
|
"""
|
||||||
@@ -62,13 +63,11 @@ def chat_with_ai(
|
|||||||
print("----------- END OF CONTEXT ----------------")
|
print("----------- END OF CONTEXT ----------------")
|
||||||
|
|
||||||
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
||||||
response = openai.ChatCompletion.create(
|
assistant_reply = create_chat_completion(
|
||||||
model=cfg.smart_llm_model,
|
model=cfg.smart_llm_model,
|
||||||
messages=current_context,
|
messages=current_context,
|
||||||
)
|
)
|
||||||
|
|
||||||
assistant_reply = response.choices[0].message["content"]
|
|
||||||
|
|
||||||
# Update full message history
|
# Update full message history
|
||||||
full_message_history.append(
|
full_message_history.append(
|
||||||
create_chat_message(
|
create_chat_message(
|
||||||
@@ -79,5 +78,6 @@ def chat_with_ai(
|
|||||||
|
|
||||||
return assistant_reply
|
return assistant_reply
|
||||||
except openai.error.RateLimitError:
|
except openai.error.RateLimitError:
|
||||||
|
# TODO: WHen we switch to langchain, this is built in
|
||||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|||||||
16
scripts/llm_utils.py
Normal file
16
scripts/llm_utils.py
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
import openai
|
||||||
|
from config import Config
|
||||||
|
cfg = Config()
|
||||||
|
|
||||||
|
openai.api_key = cfg.openai_api_key
|
||||||
|
|
||||||
|
# Overly simple abstraction until we create something better
|
||||||
|
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
temperature=temperature,
|
||||||
|
max_tokens=max_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.choices[0].message["content"]
|
||||||
Reference in New Issue
Block a user