Merge pull request #1 from Torantulino/master

Merging up to latest on Torantulino/Auto-GPT Master
This commit is contained in:
EricFedrowisch
2023-04-04 10:33:38 -05:00
committed by GitHub
10 changed files with 82 additions and 83 deletions

View File

@@ -20,10 +20,11 @@ Your support is greatly appreciated
Development of this free, open-source project is made possible by all the <a href="https://github.com/Torantulino/Auto-GPT/graphs/contributors">contributors</a> and <a href="https://github.com/sponsors/Torantulino">sponsors</a>. If you'd like to sponsor this project and have your avatar or company logo appear below <a href="https://github.com/sponsors/Torantulino">click here</a>. 💖
<p align="center">
<p align="center">
<a href="https://github.com/nocodeclarity"><img src="https://github.com/nocodeclarity.png" width="50px" alt="nocodeclarity" /></a>&nbsp;&nbsp;<a href="https://github.com/tjarmain"><img src="https://github.com/tjarmain.png" width="50px" alt="tjarmain" /></a>&nbsp;&nbsp;<a href="https://github.com/tekelsey"><img src="https://github.com/tekelsey.png" width="50px" alt="tekelsey" /></a>&nbsp;&nbsp;<a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="50px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/digisomni"><img src="https://github.com/digisomni.png" width="50px" alt="digisomni" /></a>&nbsp;&nbsp;
<a href="https://github.com/thepok"><img src="https://github.com/thepok.png" width="50px" alt="thepok" /></a>&nbsp;&nbsp;<a href="https://github.com/SpacingLily"><img src="https://github.com/SpacingLily.png" width="50px" alt="SpacingLily" /></a>&nbsp;&nbsp;<a href="https://github.com/m"><img src="https://github.com/m.png" width="50px" alt="m" /></a>&nbsp;&nbsp;<a href="https://github.com/zkonduit"><img src="https://github.com/zkonduit.png" width="50px" alt="zkonduit" /></a>&nbsp;&nbsp;<a href="https://github.com/maxxflyer"><img src="https://github.com/maxxflyer.png" width="50px" alt="maxxflyer" /></a>&nbsp;&nbsp;<a href="https://github.com/tekelsey"><img src="https://github.com/tekelsey.png" width="50px" alt="tekelsey" /></a>&nbsp;&nbsp;<a href="https://github.com/nocodeclarity"><img src="https://github.com/nocodeclarity.png" width="50px" alt="nocodeclarity" /></a>&nbsp;&nbsp;<a href="https://github.com/tjarmain"><img src="https://github.com/tjarmain.png" width="50px" alt="tjarmain" /></a>&nbsp;&nbsp;<a href="https://github.com/alexisneuhaus"><img src="https://github.com/alexisneuhaus.png" width="50px" alt="alexisneuhaus" /></a>&nbsp;&nbsp;<a href="https://github.com/jaumebalust"><img src="https://github.com/jaumebalust.png" width="50px" alt="jaumebalust" /></a>&nbsp;&nbsp;<a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="50px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/digisomni"><img src="https://github.com/digisomni.png" width="50px" alt="digisomni" /></a>&nbsp;&nbsp;
</p>
<p align="center">
<a href="https://github.com/alexisneuhaus"><img src="https://github.com/alexisneuhaus.png" width="30px" alt="alexisneuhaus" /></a>&nbsp;&nbsp;<a href="https://github.com/iokode"><img src="https://github.com/iokode.png" width="30px" alt="iokode" /></a>&nbsp;&nbsp;<a href="https://github.com/jaumebalust"><img src="https://github.com/jaumebalust.png" width="30px" alt="jaumebalust" /></a>&nbsp;&nbsp;
<a href="https://github.com/alexisneuhaus"><img src="https://github.com/alexisneuhaus.png" width="30px" alt="alexisneuhaus" /></a>&nbsp;&nbsp;<a href="https://github.com/iokode"><img src="https://github.com/iokode.png" width="30px" alt="iokode" /></a>&nbsp;&nbsp;<a href="https://github.com/jaumebalust"><img src="https://github.com/jaumebalust.png" width="30px" alt="jaumebalust" /></a>&nbsp;&nbsp;<a href="https://github.com/nova-land"><img src="https://github.com/nova-land.png" width="30px" alt="nova-land" /></a>&nbsp;&nbsp;<a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="30px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/Void-n-Null"><img src="https://github.com/Void-n-Null.png" width="30px" alt="Void-n-Null" /></a>&nbsp;&nbsp;<a href="https://github.com/ritesh24"><img src="https://github.com/ritesh24.png" width="30px" alt="ritesh24" /></a>&nbsp;&nbsp;<a href="https://github.com/merwanehamadi"><img src="https://github.com/merwanehamadi.png" width="30px" alt="merwanehamadi" /></a>&nbsp;&nbsp;<a href="https://github.com/raulmarindev"><img src="https://github.com/raulmarindev.png" width="30px" alt="raulmarindev" /></a>&nbsp;&nbsp;<a href="https://github.com/siduppal"><img src="https://github.com/siduppal.png" width="30px" alt="siduppal" /></a>&nbsp;&nbsp;<a href="https://github.com/goosecubedaddy"><img src="https://github.com/goosecubedaddy.png" width="30px" alt="goosecubedaddy" /></a>&nbsp;&nbsp;<a href="https://github.com/pleabargain"><img src="https://github.com/pleabargain.png" width="30px" alt="pleabargain" /></a>&nbsp;&nbsp;
</p>

View File

@@ -1,14 +0,0 @@
# I wasn't having any luck installing the requirements.txt file in Mac or Linux
# But this seems to work.
# The biggest difference is docker 5 instead of 6, because of this silliness:
#
# The conflict is caused by:
# The user requested requests>=2.26.0
# docker 6.0.1 depends on requests>=2.26.0
# googlesearch-python 1.1.0 depends on requests==2.25.1
docker==5.0.3
# I'd love to fix this in a cleaner way
# Now go ahead and install the rest of what requirements.txt says:
-r requirements.txt

View File

@@ -7,7 +7,7 @@ pyyaml==6.0
readability-lxml==0.8.1
requests
tiktoken==0.3.3
gTTS==2.3.1
docker
googlesearch-python
duckduckgo-search
google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
# Googlesearch python seems to be a bit cursed, anyone good at fixing thigns like this?

View File

@@ -74,30 +74,25 @@ def split_text(text, max_length=8192):
yield "\n".join(current_chunk)
def summarize_text(text, is_website=True):
if text == "":
def create_message(chunk, question):
return {
"role": "user",
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
}
def summarize_text(text, question):
if not text:
return "Error: No text to summarize"
print("Text length: " + str(len(text)) + " characters")
text_length = len(text)
print(f"Text length: {text_length} characters")
summaries = []
chunks = list(split_text(text))
for i, chunk in enumerate(chunks):
print("Summarizing chunk " + str(i + 1) + " / " + str(len(chunks)))
if is_website:
messages = [
{
"role": "user",
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specific information this subpage contains.: " +
chunk},
]
else:
messages = [
{
"role": "user",
"content": "Please summarize the following text, focusing on extracting concise and specific information: " +
chunk},
]
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
messages = [create_message(chunk, question)]
summary = create_chat_completion(
model=cfg.fast_llm_model,
@@ -105,25 +100,11 @@ def summarize_text(text, is_website=True):
max_tokens=300,
)
summaries.append(summary)
print("Summarized " + str(len(chunks)) + " chunks.")
print(f"Summarized {len(chunks)} chunks.")
combined_summary = "\n".join(summaries)
# Summarize the combined summary
if is_website:
messages = [
{
"role": "user",
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specific information this subpage contains.: " +
combined_summary},
]
else:
messages = [
{
"role": "user",
"content": "Please summarize the following text, focusing on extracting concise and specific infomation: " +
combined_summary},
]
messages = [create_message(combined_summary, question)]
final_summary = create_chat_completion(
model=cfg.fast_llm_model,
@@ -131,4 +112,4 @@ def summarize_text(text, is_website=True):
max_tokens=300,
)
return final_summary
return final_summary

View File

@@ -9,7 +9,7 @@ import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file
from execute_code import execute_python_file
from json_parser import fix_and_parse_json
from googlesearch import search
from duckduckgo_search import ddg
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
@@ -72,7 +72,7 @@ def execute_command(command_name, arguments):
elif command_name == "delete_agent":
return delete_agent(arguments["key"])
elif command_name == "get_text_summary":
return get_text_summary(arguments["url"])
return get_text_summary(arguments["url"], arguments["question"])
elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"])
elif command_name == "read_file":
@@ -84,7 +84,7 @@ def execute_command(command_name, arguments):
elif command_name == "delete_file":
return delete_file(arguments["file"])
elif command_name == "browse_website":
return browse_website(arguments["url"])
return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again"
@@ -112,7 +112,7 @@ def get_datetime():
def google_search(query, num_results=8):
search_results = []
for j in search(query, num_results=num_results):
for j in ddg(query, max_results=num_results):
search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4)
@@ -152,8 +152,8 @@ def google_official_search(query, num_results=8):
# Return the list of search result URLs
return search_results_links
def browse_website(url):
summary = get_text_summary(url)
def browse_website(url, question):
summary = get_text_summary(url, question)
links = get_hyperlinks(url)
# Limit links to 5
@@ -165,9 +165,9 @@ def browse_website(url):
return result
def get_text_summary(url):
def get_text_summary(url, question):
text = browse.scrape_text(url)
summary = browse.summarize_text(text)
summary = browse.summarize_text(text, question)
return """ "Result" : """ + summary

View File

@@ -61,7 +61,7 @@ class Config(metaclass=Singleton):
self.smart_token_limit = value
def set_openai_api_key(self, value: str):
self.apiopenai_api_key_key = value
self.openai_api_key = value
def set_elevenlabs_api_key(self, value: str):
self.elevenlabs_api_key = value

View File

@@ -1,6 +1,6 @@
import os
from pathlib import Path
SRC_DIR = Path(__file__).parent
def load_prompt():
try:
@@ -9,7 +9,7 @@ def load_prompt():
data_dir = file_dir / "data"
prompt_file = data_dir / "prompt.txt"
# Load the promt from data/prompt.txt
with open(prompt_file, "r") as prompt_file:
with open(SRC_DIR/ "data/prompt.txt", "r") as prompt_file:
prompt = prompt_file.read()
return prompt

View File

@@ -1,7 +1,8 @@
CONSTRAINTS:
1. ~4000 word limit for memory. Your memory is short, so immidiately save important information to long term memory and code to files.
1. ~4000 word limit for memory. Your memory is short, so immediately save important information to long term memory and code to files.
2. No user assistance
3. Exclusively use the commands listed in double quotes e.g. "command name"
COMMANDS:
@@ -9,7 +10,7 @@ COMMANDS:
2. Memory Add: "memory_add", args: "string": "<string>"
3. Memory Delete: "memory_del", args: "key": "<key>"
4. Memory Overwrite: "memory_ovr", args: "key": "<key>", "string": "<string>"
5. Browse Website: "browse_website", args: "url": "<url>"
5. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"
6. Start GPT Agent: "start_agent", args: "name": <name>, "task": "<short_task_desc>", "prompt": "<prompt>"
7. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>"
8. List GPT Agents: "list_agents", args: ""
@@ -34,9 +35,9 @@ RESOURCES:
PERFORMANCE EVALUATION:
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
2. Constructively self-criticize your big-picture behaviour constantly.
2. Constructively self-criticize your big-picture behavior constantly.
3. Reflect on past decisions and strategies to refine your approach.
4. Every command has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
You should only respond in JSON format as described below
@@ -58,4 +59,4 @@ RESPONSE FORMAT:
}
}
Ensure the response can be parsed by Python json.loads
Ensure the response can be parsed by Python json.loads

View File

@@ -52,6 +52,14 @@ def print_assistant_thoughts(assistant_reply):
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
if isinstance(assistant_reply_json, str):
try:
assistant_reply_json = json.loads(assistant_reply_json)
except json.JSONDecodeError as e:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
assistant_reply_json = {}
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
@@ -303,7 +311,7 @@ while True:
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
"Enter 'y' to authorise command or 'n' to exit program...",
f"Enter 'y' to authorise command or 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
@@ -314,16 +322,18 @@ while True:
user_input = "EXIT"
break
else:
continue
user_input = console_input
command_name = "human_feedback"
break
if user_input != "GENERATE NEXT COMMAND JSON":
print("Exiting...", flush=True)
break
print_to_console(
if user_input == "GENERATE NEXT COMMAND JSON":
print_to_console(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"")
elif user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
# Print command
print_to_console(
@@ -332,10 +342,12 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name.lower() != "error":
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
else:
if command_name.lower() == "error":
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
else:
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
# Check if there's a result from the command append it to the message
# history
@@ -347,3 +359,4 @@ while True:
chat.create_chat_message(
"system", "Unable to execute command"))
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")

View File

@@ -3,6 +3,8 @@ from playsound import playsound
import requests
from config import Config
cfg = Config()
import gtts
# TODO: Nicer names for these ids
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
@@ -12,10 +14,9 @@ tts_headers = {
"xi-api-key": cfg.elevenlabs_api_key
}
def say_text(text, voice_index=0):
def eleven_labs_speech(text, voice_index=0):
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
voice_id=voices[voice_index])
formatted_message = {"text": text}
response = requests.post(
tts_url, headers=tts_headers, json=formatted_message)
@@ -24,8 +25,24 @@ def say_text(text, voice_index=0):
with open("speech.mpeg", "wb") as f:
f.write(response.content)
playsound("speech.mpeg")
# Delete audio file
os.remove("speech.mpeg")
return True
else:
print("Request failed with status code:", response.status_code)
print("Response content:", response.content)
return False
def gtts_speech(text):
tts = gtts.gTTS(text)
tts.save("speech.mp3")
playsound("speech.mp3")
os.remove("speech.mp3")
def say_text(text, voice_index=0):
if not cfg.elevenlabs_api_key:
gtts_speech(text)
else:
success = eleven_labs_speech(text)
if not success:
gtts_speech(text)