mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 14:34:23 +01:00
Apply autopep8 formatting to entire codebase
This commit is contained in:
@@ -4,11 +4,13 @@ next_key = 0
|
|||||||
agents = {} # key, (task, full_message_history, model)
|
agents = {} # key, (task, full_message_history, model)
|
||||||
|
|
||||||
# Create new GPT agent
|
# Create new GPT agent
|
||||||
|
|
||||||
|
|
||||||
def create_agent(task, prompt, model):
|
def create_agent(task, prompt, model):
|
||||||
global next_key
|
global next_key
|
||||||
global agents
|
global agents
|
||||||
|
|
||||||
messages = [{"role": "user", "content": prompt},]
|
messages = [{"role": "user", "content": prompt}, ]
|
||||||
|
|
||||||
# Start GTP3 instance
|
# Start GTP3 instance
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
@@ -22,12 +24,15 @@ def create_agent(task, prompt, model):
|
|||||||
messages.append({"role": "assistant", "content": agent_reply})
|
messages.append({"role": "assistant", "content": agent_reply})
|
||||||
|
|
||||||
key = next_key
|
key = next_key
|
||||||
next_key += 1 # This is done instead of len(agents) to make keys unique even if agents are deleted
|
# This is done instead of len(agents) to make keys unique even if agents
|
||||||
|
# are deleted
|
||||||
|
next_key += 1
|
||||||
|
|
||||||
agents[key] = (task, messages, model)
|
agents[key] = (task, messages, model)
|
||||||
|
|
||||||
return key, agent_reply
|
return key, agent_reply
|
||||||
|
|
||||||
|
|
||||||
def message_agent(key, message):
|
def message_agent(key, message):
|
||||||
global agents
|
global agents
|
||||||
|
|
||||||
@@ -50,12 +55,14 @@ def message_agent(key, message):
|
|||||||
|
|
||||||
return agent_reply
|
return agent_reply
|
||||||
|
|
||||||
|
|
||||||
def list_agents():
|
def list_agents():
|
||||||
global agents
|
global agents
|
||||||
|
|
||||||
# Return a list of agent keys and their tasks
|
# Return a list of agent keys and their tasks
|
||||||
return [(key, task) for key, (task, _, _) in agents.items()]
|
return [(key, task) for key, (task, _, _) in agents.items()]
|
||||||
|
|
||||||
|
|
||||||
def delete_agent(key):
|
def delete_agent(key):
|
||||||
global agents
|
global agents
|
||||||
|
|
||||||
@@ -64,5 +71,3 @@ def delete_agent(key):
|
|||||||
return True
|
return True
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,10 +2,14 @@ from typing import List, Optional
|
|||||||
import json
|
import json
|
||||||
import openai
|
import openai
|
||||||
|
|
||||||
def call_ai_function(function, args, description, model = "gpt-4"):
|
|
||||||
|
def call_ai_function(function, args, description, model="gpt-4"):
|
||||||
# parse args to comma seperated string
|
# parse args to comma seperated string
|
||||||
args = ", ".join(args)
|
args = ", ".join(args)
|
||||||
messages = [{"role": "system", "content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value."},{"role": "user", "content": args}]
|
messages = [{"role": "system",
|
||||||
|
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value."},
|
||||||
|
{"role": "user",
|
||||||
|
"content": args}]
|
||||||
|
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
model=model,
|
model=model,
|
||||||
@@ -15,7 +19,8 @@ def call_ai_function(function, args, description, model = "gpt-4"):
|
|||||||
|
|
||||||
return response.choices[0].message["content"]
|
return response.choices[0].message["content"]
|
||||||
|
|
||||||
### Evaluating code
|
# Evaluating code
|
||||||
|
|
||||||
|
|
||||||
def evaluate_code(code: str) -> List[str]:
|
def evaluate_code(code: str) -> List[str]:
|
||||||
function_string = "def analyze_code(code: str) -> List[str]:"
|
function_string = "def analyze_code(code: str) -> List[str]:"
|
||||||
@@ -26,7 +31,7 @@ def evaluate_code(code: str) -> List[str]:
|
|||||||
return json.loads(result_string)
|
return json.loads(result_string)
|
||||||
|
|
||||||
|
|
||||||
### Improving code
|
# Improving code
|
||||||
|
|
||||||
def improve_code(suggestions: List[str], code: str) -> str:
|
def improve_code(suggestions: List[str], code: str) -> str:
|
||||||
function_string = "def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
function_string = "def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
||||||
@@ -37,7 +42,7 @@ def improve_code(suggestions: List[str], code: str) -> str:
|
|||||||
return result_string
|
return result_string
|
||||||
|
|
||||||
|
|
||||||
### Writing tests
|
# Writing tests
|
||||||
|
|
||||||
def write_tests(code: str, focus: List[str]) -> str:
|
def write_tests(code: str, focus: List[str]) -> str:
|
||||||
function_string = "def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
function_string = "def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from googlesearch import search
|
from googlesearch import search
|
||||||
import requests
|
import requests
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from readability import Document#
|
from readability import Document
|
||||||
import openai
|
import openai
|
||||||
|
|
||||||
|
|
||||||
@@ -24,18 +24,21 @@ def scrape_text(url):
|
|||||||
|
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def extract_hyperlinks(soup):
|
def extract_hyperlinks(soup):
|
||||||
hyperlinks = []
|
hyperlinks = []
|
||||||
for link in soup.find_all('a', href=True):
|
for link in soup.find_all('a', href=True):
|
||||||
hyperlinks.append((link.text, link['href']))
|
hyperlinks.append((link.text, link['href']))
|
||||||
return hyperlinks
|
return hyperlinks
|
||||||
|
|
||||||
|
|
||||||
def format_hyperlinks(hyperlinks):
|
def format_hyperlinks(hyperlinks):
|
||||||
formatted_links = []
|
formatted_links = []
|
||||||
for link_text, link_url in hyperlinks:
|
for link_text, link_url in hyperlinks:
|
||||||
formatted_links.append(f"{link_text} ({link_url})")
|
formatted_links.append(f"{link_text} ({link_url})")
|
||||||
return formatted_links
|
return formatted_links
|
||||||
|
|
||||||
|
|
||||||
def scrape_links(url):
|
def scrape_links(url):
|
||||||
response = requests.get(url)
|
response = requests.get(url)
|
||||||
|
|
||||||
@@ -52,6 +55,7 @@ def scrape_links(url):
|
|||||||
|
|
||||||
return format_hyperlinks(hyperlinks)
|
return format_hyperlinks(hyperlinks)
|
||||||
|
|
||||||
|
|
||||||
def split_text(text, max_length=8192):
|
def split_text(text, max_length=8192):
|
||||||
paragraphs = text.split("\n")
|
paragraphs = text.split("\n")
|
||||||
current_length = 0
|
current_length = 0
|
||||||
@@ -69,7 +73,8 @@ def split_text(text, max_length=8192):
|
|||||||
if current_chunk:
|
if current_chunk:
|
||||||
yield "\n".join(current_chunk)
|
yield "\n".join(current_chunk)
|
||||||
|
|
||||||
def summarize_text(text, is_website = True):
|
|
||||||
|
def summarize_text(text, is_website=True):
|
||||||
if text == "":
|
if text == "":
|
||||||
return "Error: No text to summarize"
|
return "Error: No text to summarize"
|
||||||
|
|
||||||
@@ -78,13 +83,23 @@ def summarize_text(text, is_website = True):
|
|||||||
chunks = list(split_text(text))
|
chunks = list(split_text(text))
|
||||||
|
|
||||||
for i, chunk in enumerate(chunks):
|
for i, chunk in enumerate(chunks):
|
||||||
print("Summarizing chunk " + str(i+1) + " / " + str(len(chunks)))
|
print("Summarizing chunk " + str(i + 1) + " / " + str(len(chunks)))
|
||||||
if is_website:
|
if is_website:
|
||||||
messages = [{"role": "user", "content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " + chunk},]
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " +
|
||||||
|
chunk},
|
||||||
|
]
|
||||||
else:
|
else:
|
||||||
messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise and specific information: " + chunk},]
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Please summarize the following text, focusing on extracting concise and specific information: " +
|
||||||
|
chunk},
|
||||||
|
]
|
||||||
|
|
||||||
response= openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=300,
|
max_tokens=300,
|
||||||
@@ -98,9 +113,19 @@ def summarize_text(text, is_website = True):
|
|||||||
|
|
||||||
# Summarize the combined summary
|
# Summarize the combined summary
|
||||||
if is_website:
|
if is_website:
|
||||||
messages = [{"role": "user", "content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " + combined_summary},]
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " +
|
||||||
|
combined_summary},
|
||||||
|
]
|
||||||
else:
|
else:
|
||||||
messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise and specific infomation: " + combined_summary},]
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Please summarize the following text, focusing on extracting concise and specific infomation: " +
|
||||||
|
combined_summary},
|
||||||
|
]
|
||||||
|
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import keys
|
|||||||
# Initialize the OpenAI API client
|
# Initialize the OpenAI API client
|
||||||
openai.api_key = keys.OPENAI_API_KEY
|
openai.api_key = keys.OPENAI_API_KEY
|
||||||
|
|
||||||
|
|
||||||
def create_chat_message(role, content):
|
def create_chat_message(role, content):
|
||||||
"""
|
"""
|
||||||
Create a chat message with the given role and content.
|
Create a chat message with the given role and content.
|
||||||
@@ -18,7 +19,14 @@ def create_chat_message(role, content):
|
|||||||
"""
|
"""
|
||||||
return {"role": role, "content": content}
|
return {"role": role, "content": content}
|
||||||
|
|
||||||
def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit, debug = False):
|
|
||||||
|
def chat_with_ai(
|
||||||
|
prompt,
|
||||||
|
user_input,
|
||||||
|
full_message_history,
|
||||||
|
permanent_memory,
|
||||||
|
token_limit,
|
||||||
|
debug=False):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
"""
|
"""
|
||||||
@@ -34,8 +42,12 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
|
|||||||
Returns:
|
Returns:
|
||||||
str: The AI's response.
|
str: The AI's response.
|
||||||
"""
|
"""
|
||||||
current_context = [create_chat_message("system", prompt), create_chat_message("system", f"Permanent memory: {permanent_memory}")]
|
current_context = [
|
||||||
current_context.extend(full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
|
create_chat_message(
|
||||||
|
"system", prompt), create_chat_message(
|
||||||
|
"system", f"Permanent memory: {permanent_memory}")]
|
||||||
|
current_context.extend(
|
||||||
|
full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
|
||||||
current_context.extend([create_chat_message("user", user_input)])
|
current_context.extend([create_chat_message("user", user_input)])
|
||||||
|
|
||||||
# Debug print the current context
|
# Debug print the current context
|
||||||
@@ -45,7 +57,8 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
|
|||||||
# Skip printing the prompt
|
# Skip printing the prompt
|
||||||
if message["role"] == "system" and message["content"] == prompt:
|
if message["role"] == "system" and message["content"] == prompt:
|
||||||
continue
|
continue
|
||||||
print(f"{message['role'].capitalize()}: {message['content']}")
|
print(
|
||||||
|
f"{message['role'].capitalize()}: {message['content']}")
|
||||||
print("----------- END OF CONTEXT ----------------")
|
print("----------- END OF CONTEXT ----------------")
|
||||||
|
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
@@ -56,8 +69,12 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
|
|||||||
assistant_reply = response.choices[0].message["content"]
|
assistant_reply = response.choices[0].message["content"]
|
||||||
|
|
||||||
# Update full message history
|
# Update full message history
|
||||||
full_message_history.append(create_chat_message("user", user_input))
|
full_message_history.append(
|
||||||
full_message_history.append(create_chat_message("assistant", assistant_reply))
|
create_chat_message(
|
||||||
|
"user", user_input))
|
||||||
|
full_message_history.append(
|
||||||
|
create_chat_message(
|
||||||
|
"assistant", assistant_reply))
|
||||||
|
|
||||||
return assistant_reply
|
return assistant_reply
|
||||||
except openai.RateLimitError:
|
except openai.RateLimitError:
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ def get_command(response):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "Error:", str(e)
|
return "Error:", str(e)
|
||||||
|
|
||||||
|
|
||||||
def execute_command(command_name, arguments):
|
def execute_command(command_name, arguments):
|
||||||
try:
|
try:
|
||||||
if command_name == "google":
|
if command_name == "google":
|
||||||
@@ -41,7 +42,10 @@ def execute_command(command_name, arguments):
|
|||||||
elif command_name == "memory_ovr":
|
elif command_name == "memory_ovr":
|
||||||
return overwrite_memory(arguments["key"], arguments["string"])
|
return overwrite_memory(arguments["key"], arguments["string"])
|
||||||
elif command_name == "start_agent":
|
elif command_name == "start_agent":
|
||||||
return start_agent(arguments["name"], arguments["task"], arguments["prompt"])
|
return start_agent(
|
||||||
|
arguments["name"],
|
||||||
|
arguments["task"],
|
||||||
|
arguments["prompt"])
|
||||||
elif command_name == "message_agent":
|
elif command_name == "message_agent":
|
||||||
return message_agent(arguments["key"], arguments["message"])
|
return message_agent(arguments["key"], arguments["message"])
|
||||||
elif command_name == "list_agents":
|
elif command_name == "list_agents":
|
||||||
@@ -51,7 +55,9 @@ def execute_command(command_name, arguments):
|
|||||||
elif command_name == "navigate_website":
|
elif command_name == "navigate_website":
|
||||||
return navigate_website(arguments["action"], arguments["username"])
|
return navigate_website(arguments["action"], arguments["username"])
|
||||||
elif command_name == "register_account":
|
elif command_name == "register_account":
|
||||||
return register_account(arguments["username"], arguments["website"])
|
return register_account(
|
||||||
|
arguments["username"],
|
||||||
|
arguments["website"])
|
||||||
elif command_name == "get_text_summary":
|
elif command_name == "get_text_summary":
|
||||||
return get_text_summary(arguments["url"])
|
return get_text_summary(arguments["url"])
|
||||||
elif command_name == "get_hyperlinks":
|
elif command_name == "get_hyperlinks":
|
||||||
@@ -66,6 +72,9 @@ def execute_command(command_name, arguments):
|
|||||||
return delete_file(arguments["file"])
|
return delete_file(arguments["file"])
|
||||||
elif command_name == "browse_website":
|
elif command_name == "browse_website":
|
||||||
return browse_website(arguments["url"])
|
return browse_website(arguments["url"])
|
||||||
|
# TODO: Change these to take in a file rather than pasted code, if
|
||||||
|
# non-file is given, return instructions "Input should be a python
|
||||||
|
# filepath, write your code to file and try again"
|
||||||
elif command_name == "evaluate_code":
|
elif command_name == "evaluate_code":
|
||||||
return ai.evaluate_code(arguments["code"])
|
return ai.evaluate_code(arguments["code"])
|
||||||
elif command_name == "improve_code":
|
elif command_name == "improve_code":
|
||||||
@@ -82,8 +91,11 @@ def execute_command(command_name, arguments):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "Error: " + str(e)
|
return "Error: " + str(e)
|
||||||
|
|
||||||
|
|
||||||
def get_datetime():
|
def get_datetime():
|
||||||
return "Current date and time: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
return "Current date and time: " + \
|
||||||
|
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
|
|
||||||
def google_search(query, num_results=8):
|
def google_search(query, num_results=8):
|
||||||
search_results = []
|
search_results = []
|
||||||
@@ -92,6 +104,7 @@ def google_search(query, num_results=8):
|
|||||||
|
|
||||||
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||||
|
|
||||||
|
|
||||||
def browse_website(url):
|
def browse_website(url):
|
||||||
summary = get_text_summary(url)
|
summary = get_text_summary(url)
|
||||||
links = get_hyperlinks(url)
|
links = get_hyperlinks(url)
|
||||||
@@ -104,20 +117,24 @@ def browse_website(url):
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def get_text_summary(url):
|
def get_text_summary(url):
|
||||||
text = browse.scrape_text(url)
|
text = browse.scrape_text(url)
|
||||||
summary = browse.summarize_text(text)
|
summary = browse.summarize_text(text)
|
||||||
return """ "Result" : """ + summary
|
return """ "Result" : """ + summary
|
||||||
|
|
||||||
|
|
||||||
def get_hyperlinks(url):
|
def get_hyperlinks(url):
|
||||||
link_list = browse.scrape_links(url)
|
link_list = browse.scrape_links(url)
|
||||||
return link_list
|
return link_list
|
||||||
|
|
||||||
|
|
||||||
def commit_memory(string):
|
def commit_memory(string):
|
||||||
_text = f"""Committing memory with string "{string}" """
|
_text = f"""Committing memory with string "{string}" """
|
||||||
mem.permanent_memory.append(string)
|
mem.permanent_memory.append(string)
|
||||||
return _text
|
return _text
|
||||||
|
|
||||||
|
|
||||||
def delete_memory(key):
|
def delete_memory(key):
|
||||||
if key >= 0 and key < len(mem.permanent_memory):
|
if key >= 0 and key < len(mem.permanent_memory):
|
||||||
_text = "Deleting memory with key " + str(key)
|
_text = "Deleting memory with key " + str(key)
|
||||||
@@ -127,6 +144,8 @@ def delete_memory(key):
|
|||||||
else:
|
else:
|
||||||
print("Invalid key, cannot delete memory.")
|
print("Invalid key, cannot delete memory.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def overwrite_memory(key, string):
|
def overwrite_memory(key, string):
|
||||||
if key >= 0 and key < len(mem.permanent_memory):
|
if key >= 0 and key < len(mem.permanent_memory):
|
||||||
_text = "Overwriting memory with key " + \
|
_text = "Overwriting memory with key " + \
|
||||||
@@ -138,10 +157,12 @@ def overwrite_memory(key, string):
|
|||||||
print("Invalid key, cannot overwrite memory.")
|
print("Invalid key, cannot overwrite memory.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def shutdown():
|
def shutdown():
|
||||||
print("Shutting down...")
|
print("Shutting down...")
|
||||||
quit()
|
quit()
|
||||||
|
|
||||||
|
|
||||||
def start_agent(name, task, prompt, model="gpt-3.5-turbo"):
|
def start_agent(name, task, prompt, model="gpt-3.5-turbo"):
|
||||||
global cfg
|
global cfg
|
||||||
|
|
||||||
@@ -164,6 +185,7 @@ def start_agent(name, task, prompt, model="gpt-3.5-turbo"):
|
|||||||
|
|
||||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||||
|
|
||||||
|
|
||||||
def message_agent(key, message):
|
def message_agent(key, message):
|
||||||
global cfg
|
global cfg
|
||||||
agent_response = agents.message_agent(key, message)
|
agent_response = agents.message_agent(key, message)
|
||||||
@@ -174,12 +196,14 @@ def message_agent(key, message):
|
|||||||
|
|
||||||
return f"Agent {key} responded: {agent_response}"
|
return f"Agent {key} responded: {agent_response}"
|
||||||
|
|
||||||
|
|
||||||
def list_agents():
|
def list_agents():
|
||||||
return agents.list_agents()
|
return agents.list_agents()
|
||||||
|
|
||||||
|
|
||||||
def delete_agent(key):
|
def delete_agent(key):
|
||||||
result = agents.delete_agent(key)
|
result = agents.delete_agent(key)
|
||||||
if result == False:
|
if not result:
|
||||||
return f"Agent {key} does not exist."
|
return f"Agent {key} does not exist."
|
||||||
return f"Agent {key} deleted."
|
return f"Agent {key} deleted."
|
||||||
|
|
||||||
@@ -189,11 +213,14 @@ def navigate_website(action, username):
|
|||||||
print(_text)
|
print(_text)
|
||||||
return "Command not implemented yet."
|
return "Command not implemented yet."
|
||||||
|
|
||||||
|
|
||||||
def register_account(username, website):
|
def register_account(username, website):
|
||||||
_text = "Registering account with username " + username + " and website " + website
|
_text = "Registering account with username " + \
|
||||||
|
username + " and website " + website
|
||||||
print(_text)
|
print(_text)
|
||||||
return "Command not implemented yet."
|
return "Command not implemented yet."
|
||||||
|
|
||||||
|
|
||||||
def check_notifications(website):
|
def check_notifications(website):
|
||||||
_text = "Checking notifications from " + website
|
_text = "Checking notifications from " + website
|
||||||
print(_text)
|
print(_text)
|
||||||
|
|||||||
@@ -7,7 +7,9 @@ class Singleton(type):
|
|||||||
|
|
||||||
def __call__(cls, *args, **kwargs):
|
def __call__(cls, *args, **kwargs):
|
||||||
if cls not in cls._instances:
|
if cls not in cls._instances:
|
||||||
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
|
cls._instances[cls] = super(
|
||||||
|
Singleton, cls).__call__(
|
||||||
|
*args, **kwargs)
|
||||||
return cls._instances[cls]
|
return cls._instances[cls]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import docker
|
import docker
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
def execute_python_file(file):
|
def execute_python_file(file):
|
||||||
workspace_folder = "auto_gpt_workspace"
|
workspace_folder = "auto_gpt_workspace"
|
||||||
|
|
||||||
@@ -16,11 +17,15 @@ def execute_python_file(file):
|
|||||||
client = docker.from_env()
|
client = docker.from_env()
|
||||||
|
|
||||||
# You can replace 'python:3.8' with the desired Python image/version
|
# You can replace 'python:3.8' with the desired Python image/version
|
||||||
# You can find available Python images on Docker Hub: https://hub.docker.com/_/python
|
# You can find available Python images on Docker Hub:
|
||||||
|
# https://hub.docker.com/_/python
|
||||||
container = client.containers.run(
|
container = client.containers.run(
|
||||||
'python:3.8',
|
'python:3.8',
|
||||||
f'python {file}',
|
f'python {file}',
|
||||||
volumes={os.path.abspath(workspace_folder): {'bind': '/workspace', 'mode': 'ro'}},
|
volumes={
|
||||||
|
os.path.abspath(workspace_folder): {
|
||||||
|
'bind': '/workspace',
|
||||||
|
'mode': 'ro'}},
|
||||||
working_dir='/workspace',
|
working_dir='/workspace',
|
||||||
stderr=True,
|
stderr=True,
|
||||||
stdout=True,
|
stdout=True,
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ working_directory = "auto_gpt_workspace"
|
|||||||
if not os.path.exists(working_directory):
|
if not os.path.exists(working_directory):
|
||||||
os.makedirs(working_directory)
|
os.makedirs(working_directory)
|
||||||
|
|
||||||
|
|
||||||
def safe_join(base, *paths):
|
def safe_join(base, *paths):
|
||||||
new_path = os.path.join(base, *paths)
|
new_path = os.path.join(base, *paths)
|
||||||
norm_new_path = os.path.normpath(new_path)
|
norm_new_path = os.path.normpath(new_path)
|
||||||
@@ -16,6 +17,7 @@ def safe_join(base, *paths):
|
|||||||
|
|
||||||
return norm_new_path
|
return norm_new_path
|
||||||
|
|
||||||
|
|
||||||
def read_file(filename):
|
def read_file(filename):
|
||||||
try:
|
try:
|
||||||
filepath = safe_join(working_directory, filename)
|
filepath = safe_join(working_directory, filename)
|
||||||
@@ -25,6 +27,7 @@ def read_file(filename):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "Error: " + str(e)
|
return "Error: " + str(e)
|
||||||
|
|
||||||
|
|
||||||
def write_to_file(filename, text):
|
def write_to_file(filename, text):
|
||||||
try:
|
try:
|
||||||
filepath = safe_join(working_directory, filename)
|
filepath = safe_join(working_directory, filename)
|
||||||
@@ -34,6 +37,7 @@ def write_to_file(filename, text):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "Error: " + str(e)
|
return "Error: " + str(e)
|
||||||
|
|
||||||
|
|
||||||
def append_to_file(filename, text):
|
def append_to_file(filename, text):
|
||||||
try:
|
try:
|
||||||
filepath = safe_join(working_directory, filename)
|
filepath = safe_join(working_directory, filename)
|
||||||
@@ -43,6 +47,7 @@ def append_to_file(filename, text):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "Error: " + str(e)
|
return "Error: " + str(e)
|
||||||
|
|
||||||
|
|
||||||
def delete_file(filename):
|
def delete_file(filename):
|
||||||
try:
|
try:
|
||||||
filepath = safe_join(working_directory, filename)
|
filepath = safe_join(working_directory, filename)
|
||||||
|
|||||||
109
scripts/main.py
109
scripts/main.py
@@ -12,11 +12,19 @@ from enum import Enum, auto
|
|||||||
import sys
|
import sys
|
||||||
from config import Config
|
from config import Config
|
||||||
|
|
||||||
|
|
||||||
class Argument(Enum):
|
class Argument(Enum):
|
||||||
CONTINUOUS_MODE = "continuous-mode"
|
CONTINUOUS_MODE = "continuous-mode"
|
||||||
SPEAK_MODE = "speak-mode"
|
SPEAK_MODE = "speak-mode"
|
||||||
|
|
||||||
def print_to_console(title, title_color, content, speak_text = False, min_typing_speed=0.05, max_typing_speed=0.01):
|
|
||||||
|
def print_to_console(
|
||||||
|
title,
|
||||||
|
title_color,
|
||||||
|
content,
|
||||||
|
speak_text=False,
|
||||||
|
min_typing_speed=0.05,
|
||||||
|
max_typing_speed=0.01):
|
||||||
global cfg
|
global cfg
|
||||||
if speak_text and cfg.speak_mode:
|
if speak_text and cfg.speak_mode:
|
||||||
speak.say_text(f"{title}. {content}")
|
speak.say_text(f"{title}. {content}")
|
||||||
@@ -34,6 +42,7 @@ def print_to_console(title, title_color, content, speak_text = False, min_typing
|
|||||||
max_typing_speed = max_typing_speed * 0.95
|
max_typing_speed = max_typing_speed * 0.95
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
|
||||||
def print_assistant_thoughts(assistant_reply):
|
def print_assistant_thoughts(assistant_reply):
|
||||||
global ai_name
|
global ai_name
|
||||||
global cfg
|
global cfg
|
||||||
@@ -55,8 +64,14 @@ def print_assistant_thoughts(assistant_reply):
|
|||||||
assistant_thoughts_criticism = None
|
assistant_thoughts_criticism = None
|
||||||
assistant_thoughts_speak = None
|
assistant_thoughts_speak = None
|
||||||
|
|
||||||
print_to_console(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
|
print_to_console(
|
||||||
print_to_console("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
f"{ai_name.upper()} THOUGHTS:",
|
||||||
|
Fore.YELLOW,
|
||||||
|
assistant_thoughts_text)
|
||||||
|
print_to_console(
|
||||||
|
"REASONING:",
|
||||||
|
Fore.YELLOW,
|
||||||
|
assistant_thoughts_reasoning)
|
||||||
if assistant_thoughts_plan:
|
if assistant_thoughts_plan:
|
||||||
print_to_console("PLAN:", Fore.YELLOW, "")
|
print_to_console("PLAN:", Fore.YELLOW, "")
|
||||||
if assistant_thoughts_plan:
|
if assistant_thoughts_plan:
|
||||||
@@ -64,12 +79,16 @@ def print_assistant_thoughts(assistant_reply):
|
|||||||
# Split the input_string using the newline character and dash
|
# Split the input_string using the newline character and dash
|
||||||
lines = assistant_thoughts_plan.split('\n')
|
lines = assistant_thoughts_plan.split('\n')
|
||||||
|
|
||||||
# Iterate through the lines and print each one with a bullet point
|
# Iterate through the lines and print each one with a bullet
|
||||||
|
# point
|
||||||
for line in lines:
|
for line in lines:
|
||||||
# Remove any "-" characters from the start of the line
|
# Remove any "-" characters from the start of the line
|
||||||
line = line.lstrip("- ")
|
line = line.lstrip("- ")
|
||||||
print_to_console("- ", Fore.GREEN, line.strip())
|
print_to_console("- ", Fore.GREEN, line.strip())
|
||||||
print_to_console("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
|
print_to_console(
|
||||||
|
"CRITICISM:",
|
||||||
|
Fore.YELLOW,
|
||||||
|
assistant_thoughts_criticism)
|
||||||
|
|
||||||
# Speak the assistant's thoughts
|
# Speak the assistant's thoughts
|
||||||
if cfg.speak_mode and assistant_thoughts_speak:
|
if cfg.speak_mode and assistant_thoughts_speak:
|
||||||
@@ -81,27 +100,45 @@ def print_assistant_thoughts(assistant_reply):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print_to_console("Error: \n", Fore.RED, str(e))
|
print_to_console("Error: \n", Fore.RED, str(e))
|
||||||
|
|
||||||
|
|
||||||
def construct_prompt():
|
def construct_prompt():
|
||||||
global ai_name
|
global ai_name
|
||||||
# Construct the prompt
|
# Construct the prompt
|
||||||
print_to_console("Welcome to Auto-GPT! ", Fore.GREEN, "Enter the name of your AI and its role below. Entering nothing will load defaults.", speak_text=True)
|
print_to_console(
|
||||||
|
"Welcome to Auto-GPT! ",
|
||||||
|
Fore.GREEN,
|
||||||
|
"Enter the name of your AI and its role below. Entering nothing will load defaults.",
|
||||||
|
speak_text=True)
|
||||||
|
|
||||||
# Get AI Name from User
|
# Get AI Name from User
|
||||||
print_to_console("Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'")
|
print_to_console(
|
||||||
|
"Name your AI: ",
|
||||||
|
Fore.GREEN,
|
||||||
|
"For example, 'Entrepreneur-GPT'")
|
||||||
ai_name = input("AI Name: ")
|
ai_name = input("AI Name: ")
|
||||||
if ai_name == "":
|
if ai_name == "":
|
||||||
ai_name = "Entrepreneur-GPT"
|
ai_name = "Entrepreneur-GPT"
|
||||||
|
|
||||||
print_to_console(f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True)
|
print_to_console(
|
||||||
|
f"{ai_name} here!",
|
||||||
|
Fore.LIGHTBLUE_EX,
|
||||||
|
"I am at your service.",
|
||||||
|
speak_text=True)
|
||||||
|
|
||||||
# Get AI Role from User
|
# Get AI Role from User
|
||||||
print_to_console("Describe your AI's role: ", Fore.GREEN, "For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
|
print_to_console(
|
||||||
|
"Describe your AI's role: ",
|
||||||
|
Fore.GREEN,
|
||||||
|
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
|
||||||
ai_role = input(f"{ai_name} is: ")
|
ai_role = input(f"{ai_name} is: ")
|
||||||
if ai_role == "":
|
if ai_role == "":
|
||||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||||
|
|
||||||
# Enter up to 5 goals for the AI
|
# Enter up to 5 goals for the AI
|
||||||
print_to_console("Enter up to 5 goals for your AI: ", Fore.GREEN, "For example: \nIncrease net worth \nGrow Twitter Account \nDevelop and manage multiple businesses autonomously'")
|
print_to_console(
|
||||||
|
"Enter up to 5 goals for your AI: ",
|
||||||
|
Fore.GREEN,
|
||||||
|
"For example: \nIncrease net worth \nGrow Twitter Account \nDevelop and manage multiple businesses autonomously'")
|
||||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||||
ai_goals = []
|
ai_goals = []
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
@@ -110,7 +147,8 @@ def construct_prompt():
|
|||||||
break
|
break
|
||||||
ai_goals.append(ai_goal)
|
ai_goals.append(ai_goal)
|
||||||
if len(ai_goals) == 0:
|
if len(ai_goals) == 0:
|
||||||
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
|
ai_goals = ["Increase net worth", "Grow Twitter Account",
|
||||||
|
"Develop and manage multiple businesses autonomously"]
|
||||||
|
|
||||||
prompt = data.load_prompt()
|
prompt = data.load_prompt()
|
||||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||||
@@ -124,6 +162,8 @@ def construct_prompt():
|
|||||||
return full_prompt
|
return full_prompt
|
||||||
|
|
||||||
# Check if the python script was executed with arguments, get those arguments
|
# Check if the python script was executed with arguments, get those arguments
|
||||||
|
|
||||||
|
|
||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
global cfg
|
global cfg
|
||||||
cfg.set_continuous_mode(False)
|
cfg.set_continuous_mode(False)
|
||||||
@@ -131,12 +171,16 @@ def parse_arguments():
|
|||||||
for arg in sys.argv[1:]:
|
for arg in sys.argv[1:]:
|
||||||
if arg == Argument.CONTINUOUS_MODE.value:
|
if arg == Argument.CONTINUOUS_MODE.value:
|
||||||
print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
|
print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||||
print_to_console("WARNING: ", Fore.RED, "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
|
print_to_console(
|
||||||
|
"WARNING: ",
|
||||||
|
Fore.RED,
|
||||||
|
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
|
||||||
cfg.set_continuous_mode(True)
|
cfg.set_continuous_mode(True)
|
||||||
elif arg == Argument.SPEAK_MODE.value:
|
elif arg == Argument.SPEAK_MODE.value:
|
||||||
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||||
cfg.set_speak_mode(True)
|
cfg.set_speak_mode(True)
|
||||||
|
|
||||||
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
parse_arguments()
|
parse_arguments()
|
||||||
@@ -152,7 +196,12 @@ user_input = "NEXT COMMAND"
|
|||||||
while True:
|
while True:
|
||||||
# Send message to AI, get response
|
# Send message to AI, get response
|
||||||
with Spinner("Thinking... "):
|
with Spinner("Thinking... "):
|
||||||
assistant_reply = chat.chat_with_ai(prompt, user_input, full_message_history, mem.permanent_memory, token_limit)
|
assistant_reply = chat.chat_with_ai(
|
||||||
|
prompt,
|
||||||
|
user_input,
|
||||||
|
full_message_history,
|
||||||
|
mem.permanent_memory,
|
||||||
|
token_limit)
|
||||||
|
|
||||||
# Print Assistant thoughts
|
# Print Assistant thoughts
|
||||||
print_assistant_thoughts(assistant_reply)
|
print_assistant_thoughts(assistant_reply)
|
||||||
@@ -163,13 +212,18 @@ while True:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print_to_console("Error: \n", Fore.RED, str(e))
|
print_to_console("Error: \n", Fore.RED, str(e))
|
||||||
|
|
||||||
|
|
||||||
if not cfg.continuous_mode:
|
if not cfg.continuous_mode:
|
||||||
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||||
# Get key press: Prompt the user to press enter to continue or escape to exit
|
# Get key press: Prompt the user to press enter to continue or escape
|
||||||
|
# to exit
|
||||||
user_input = ""
|
user_input = ""
|
||||||
print_to_console("NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
print_to_console(
|
||||||
print("Enter 'y' to authorise command or 'n' to exit program...", flush=True)
|
"NEXT ACTION: ",
|
||||||
|
Fore.CYAN,
|
||||||
|
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||||
|
print(
|
||||||
|
"Enter 'y' to authorise command or 'n' to exit program...",
|
||||||
|
flush=True)
|
||||||
while True:
|
while True:
|
||||||
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||||
if console_input.lower() == "y":
|
if console_input.lower() == "y":
|
||||||
@@ -185,21 +239,30 @@ while True:
|
|||||||
print("Exiting...", flush=True)
|
print("Exiting...", flush=True)
|
||||||
break
|
break
|
||||||
|
|
||||||
print_to_console("-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", Fore.MAGENTA, "")
|
print_to_console(
|
||||||
|
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||||
|
Fore.MAGENTA,
|
||||||
|
"")
|
||||||
else:
|
else:
|
||||||
# Print command
|
# Print command
|
||||||
print_to_console("NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
print_to_console(
|
||||||
|
"NEXT ACTION: ",
|
||||||
|
Fore.CYAN,
|
||||||
|
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||||
|
|
||||||
# Exectute command
|
# Exectute command
|
||||||
if command_name.lower() != "error":
|
if command_name.lower() != "error":
|
||||||
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
|
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
|
||||||
else:
|
else:
|
||||||
result =f"Command {command_name} threw the following error: " + arguments
|
result = f"Command {command_name} threw the following error: " + arguments
|
||||||
|
|
||||||
# Check if there's a result from the command append it to the message history
|
# Check if there's a result from the command append it to the message
|
||||||
if result != None:
|
# history
|
||||||
|
if result is not None:
|
||||||
full_message_history.append(chat.create_chat_message("system", result))
|
full_message_history.append(chat.create_chat_message("system", result))
|
||||||
print_to_console("SYSTEM: ", Fore.YELLOW, result)
|
print_to_console("SYSTEM: ", Fore.YELLOW, result)
|
||||||
else:
|
else:
|
||||||
full_message_history.append(chat.create_chat_message("system", "Unable to execute command"))
|
full_message_history.append(
|
||||||
|
chat.create_chat_message(
|
||||||
|
"system", "Unable to execute command"))
|
||||||
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||||
|
|||||||
@@ -11,8 +11,9 @@ tts_headers = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def say_text(text, voice_index = 0):
|
def say_text(text, voice_index=0):
|
||||||
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(voice_id=voices[voice_index])
|
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
||||||
|
voice_id=voices[voice_index])
|
||||||
|
|
||||||
formatted_message = {"text": text}
|
formatted_message = {"text": text}
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
@@ -27,4 +28,3 @@ def say_text(text, voice_index = 0):
|
|||||||
else:
|
else:
|
||||||
print("Request failed with status code:", response.status_code)
|
print("Request failed with status code:", response.status_code)
|
||||||
print("Response content:", response.content)
|
print("Response content:", response.content)
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import threading
|
|||||||
import itertools
|
import itertools
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
class Spinner:
|
class Spinner:
|
||||||
def __init__(self, message="Loading...", delay=0.1):
|
def __init__(self, message="Loading...", delay=0.1):
|
||||||
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
|
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
|
||||||
|
|||||||
Reference in New Issue
Block a user