mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
Apply autopep8 formatting to entire codebase
This commit is contained in:
@@ -5,6 +5,7 @@ import keys
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = keys.OPENAI_API_KEY
|
||||
|
||||
|
||||
def create_chat_message(role, content):
|
||||
"""
|
||||
Create a chat message with the given role and content.
|
||||
@@ -18,7 +19,14 @@ def create_chat_message(role, content):
|
||||
"""
|
||||
return {"role": role, "content": content}
|
||||
|
||||
def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit, debug = False):
|
||||
|
||||
def chat_with_ai(
|
||||
prompt,
|
||||
user_input,
|
||||
full_message_history,
|
||||
permanent_memory,
|
||||
token_limit,
|
||||
debug=False):
|
||||
while True:
|
||||
try:
|
||||
"""
|
||||
@@ -34,8 +42,12 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
current_context = [create_chat_message("system", prompt), create_chat_message("system", f"Permanent memory: {permanent_memory}")]
|
||||
current_context.extend(full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
|
||||
current_context = [
|
||||
create_chat_message(
|
||||
"system", prompt), create_chat_message(
|
||||
"system", f"Permanent memory: {permanent_memory}")]
|
||||
current_context.extend(
|
||||
full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
|
||||
current_context.extend([create_chat_message("user", user_input)])
|
||||
|
||||
# Debug print the current context
|
||||
@@ -45,7 +57,8 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
|
||||
# Skip printing the prompt
|
||||
if message["role"] == "system" and message["content"] == prompt:
|
||||
continue
|
||||
print(f"{message['role'].capitalize()}: {message['content']}")
|
||||
print(
|
||||
f"{message['role'].capitalize()}: {message['content']}")
|
||||
print("----------- END OF CONTEXT ----------------")
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
@@ -56,8 +69,12 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
|
||||
assistant_reply = response.choices[0].message["content"]
|
||||
|
||||
# Update full message history
|
||||
full_message_history.append(create_chat_message("user", user_input))
|
||||
full_message_history.append(create_chat_message("assistant", assistant_reply))
|
||||
full_message_history.append(
|
||||
create_chat_message(
|
||||
"user", user_input))
|
||||
full_message_history.append(
|
||||
create_chat_message(
|
||||
"assistant", assistant_reply))
|
||||
|
||||
return assistant_reply
|
||||
except openai.RateLimitError:
|
||||
|
||||
Reference in New Issue
Block a user