From f8087105282e311200f70659c3c6db3fa944864c Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 14:48:35 -0500
Subject: [PATCH 01/57] Attempt to improve JSON handling in GPT-3
---
scripts/chat.py | 4 ++--
scripts/main.py | 19 +++++++++++++------
2 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/scripts/chat.py b/scripts/chat.py
index a406812d..d23d53be 100644
--- a/scripts/chat.py
+++ b/scripts/chat.py
@@ -26,7 +26,7 @@ def chat_with_ai(
full_message_history,
permanent_memory,
token_limit,
- debug=False):
+ debug=True):
while True:
try:
"""
@@ -62,7 +62,7 @@ def chat_with_ai(
print("----------- END OF CONTEXT ----------------")
response = openai.ChatCompletion.create(
- model="gpt-4",
+ model="gpt-3.5-turbo",#model="gpt-4",
messages=current_context,
)
diff --git a/scripts/main.py b/scripts/main.py
index d51ad0fc..36866a9c 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -17,6 +17,9 @@ class Argument(Enum):
CONTINUOUS_MODE = "continuous-mode"
SPEAK_MODE = "speak-mode"
+# normally 6000 for gpt-4
+TOKEN_LIMIT=4000
+
def print_to_console(
title,
@@ -30,6 +33,8 @@ def print_to_console(
speak.say_text(f"{title}. {content}")
print(title_color + title + " " + Style.RESET_ALL, end="")
if content:
+ if isinstance(content, list):
+ content = " ".join(content)
words = content.split()
for i, word in enumerate(words):
print(word, end="", flush=True)
@@ -138,7 +143,7 @@ def construct_prompt():
print_to_console(
"Enter up to 5 goals for your AI: ",
Fore.GREEN,
- "For example: \nIncrease net worth \nGrow Twitter Account \nDevelop and manage multiple businesses autonomously'")
+ "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
ai_goals = []
for i in range(5):
@@ -186,11 +191,12 @@ cfg = Config()
parse_arguments()
ai_name = ""
prompt = construct_prompt()
+print(prompt)
# Initialize variables
full_message_history = []
-token_limit = 6000 # The maximum number of tokens allowed in the API call
+token_limit = TOKEN_LIMIT # The maximum number of tokens allowed in the API call
result = None
-user_input = "NEXT COMMAND"
+user_input = "GENERATE NEXT COMMAND JSON"
# Interaction Loop
while True:
@@ -203,6 +209,7 @@ while True:
mem.permanent_memory,
token_limit)
+ print("assistant reply: "+assistant_reply)
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
@@ -227,7 +234,7 @@ while True:
while True:
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower() == "y":
- user_input = "NEXT COMMAND"
+ user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower() == "n":
user_input = "EXIT"
@@ -235,7 +242,7 @@ while True:
else:
continue
- if user_input != "NEXT COMMAND":
+ if user_input != "GENERATE NEXT COMMAND JSON":
print("Exiting...", flush=True)
break
@@ -250,7 +257,7 @@ while True:
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
- # Exectute command
+ # Execute command
if command_name.lower() != "error":
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
else:
From 9ff7e5954b19db2f69708c2f6c949f033c851bf2 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 16:38:05 -0500
Subject: [PATCH 02/57] Make compatible with gpt-3.5
I made the json parsing more forgivable. I improved the prompt, using things I learned from: Koobah/Auto-GPT
---
scripts/ai_config.py | 56 ++++++++++++++++++
scripts/ai_functions.py | 34 ++++++++++-
scripts/data/prompt.txt | 36 ++++++------
scripts/main.py | 122 ++++++++++++++++++++++++++++++++--------
tests/json_tests.py | 34 +++++++++++
5 files changed, 240 insertions(+), 42 deletions(-)
create mode 100644 scripts/ai_config.py
create mode 100644 tests/json_tests.py
diff --git a/scripts/ai_config.py b/scripts/ai_config.py
new file mode 100644
index 00000000..e35de06d
--- /dev/null
+++ b/scripts/ai_config.py
@@ -0,0 +1,56 @@
+import yaml
+import data
+
+class AIConfig:
+ def __init__(self, ai_name="", ai_role="", ai_goals=[]):
+ self.ai_name = ai_name
+ self.ai_role = ai_role
+ self.ai_goals = ai_goals
+
+ # @classmethod
+ # def create_from_user_prompts(cls):
+ # ai_name = input("Name your AI: ") or "Entrepreneur-GPT"
+ # ai_role = input(f"{ai_name} is: ") or "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
+ # print("Enter up to 5 goals for your AI: ")
+ # print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
+ # print("Enter nothing to load defaults, enter nothing when finished.")
+ # ai_goals = []
+ # for i in range(5):
+ # ai_goal = input(f"Goal {i+1}: ")
+ # if ai_goal == "":
+ # break
+ # ai_goals.append(ai_goal)
+ # if len(ai_goals) == 0:
+ # ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
+ # return cls(ai_name, ai_role, ai_goals)
+
+ @classmethod
+ def load(cls, config_file="config.yaml"):
+ # Load variables from yaml file if it exists
+ try:
+ with open(config_file) as file:
+ config_params = yaml.load(file, Loader=yaml.FullLoader)
+ except FileNotFoundError:
+ config_params = {}
+
+ ai_name = config_params.get("ai_name", "")
+ ai_role = config_params.get("ai_role", "")
+ ai_goals = config_params.get("ai_goals", [])
+
+ return cls(ai_name, ai_role, ai_goals)
+
+ def save(self, config_file="config.yaml"):
+ config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
+ with open(config_file, "w") as file:
+ documents = yaml.dump(config, file)
+
+ def construct_full_prompt(self):
+ prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
+
+ # Construct full prompt
+ full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
+ for i, goal in enumerate(self.ai_goals):
+ full_prompt += f"{i+1}. {goal}\n"
+
+ full_prompt += f"\n\n{data.load_prompt()}"
+ return full_prompt
diff --git a/scripts/ai_functions.py b/scripts/ai_functions.py
index b79c4dbc..a69daa8f 100644
--- a/scripts/ai_functions.py
+++ b/scripts/ai_functions.py
@@ -1,11 +1,16 @@
from typing import List, Optional
import json
import openai
+import dirtyjson
+from config import Config
+cfg = Config()
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
-def call_ai_function(function, args, description, model="gpt-4"):
+def call_ai_function(function, args, description, model=cfg.smart_llm_model):
+ # For each arg, if any are None, convert to "None":
+ args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma seperated string
args = ", ".join(args)
messages = [
@@ -61,3 +66,30 @@ def write_tests(code: str, focus: List[str]) -> str:
result_string = call_ai_function(function_string, args, description_string)
return result_string
+
+
+# TODO: Make debug a global config var
+def fix_json(json_str: str, schema:str = None, debug=True) -> str:
+ # Try to fix the JSON using gpt:
+ function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
+ args = [json_str, schema]
+ description_string = """Fixes the provided JSON string to make it parseable. If the schema is provided, the JSON will be made to look like the schema, otherwise it will be made to look like a valid JSON object."""
+
+ result_string = call_ai_function(
+ function_string, args, description_string, model=cfg.fast_llm_model
+ )
+ if debug:
+ print("------------ JSON FIX ATTEMPT ---------------")
+ print(f"Original JSON: {json_str}")
+ print(f"Fixed JSON: {result_string}")
+ print("----------- END OF FIX ATTEMPT ----------------")
+ try:
+ return dirtyjson.loads(result_string)
+ except:
+ # Log the exception:
+ print("Failed to fix JSON")
+ # Get the call stack:
+ import traceback
+ call_stack = traceback.format_exc()
+ print(call_stack)
+ return {}
\ No newline at end of file
diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt
index 3cdc0300..60e02ca3 100644
--- a/scripts/data/prompt.txt
+++ b/scripts/data/prompt.txt
@@ -1,6 +1,6 @@
CONSTRAINTS:
-1. 6000-word count limit for memory
+1. 4000-word count limit for memory
2. No user assistance
COMMANDS:
@@ -38,22 +38,24 @@ PERFORMANCE EVALUATION:
3. Reflect on past decisions and strategies to refine your approach.
4. Every command has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.
+You should only respond in JSON format as described below
+
RESPONSE FORMAT:
{
-"command":
-{
-"name": "command name",
-"args":
-{
-"arg name": "value"
+ "command": {
+ "name": "command name",
+ "args":{
+ "arg name": "value"
+ }
+ },
+ "thoughts":
+ {
+ "text": "thought",
+ "reasoning": "reasoning",
+ "plan": "- short bulleted\n- list that conveys\n- long-term plan",
+ "criticism": "constructive self-criticism"
+ "speak": "thoughts summary to say to user"
+ }
}
-},
-"thoughts":
-{
-"text": "thought",
-"reasoning": "reasoning",
-"plan": "short bulleted long-term plan",
-"criticism": "constructive self-criticism"
-"speak": "thoughts summary to say to user"
-}
-}
\ No newline at end of file
+
+Ensure the response can be parsed by Python json.loads
\ No newline at end of file
diff --git a/scripts/main.py b/scripts/main.py
index 36866a9c..86effcc2 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -11,16 +11,15 @@ import speak
from enum import Enum, auto
import sys
from config import Config
-
+from json_parser import fix_and_parse_json
+from ai_config import AIConfig
+import traceback
+import yaml
class Argument(Enum):
CONTINUOUS_MODE = "continuous-mode"
SPEAK_MODE = "speak-mode"
-# normally 6000 for gpt-4
-TOKEN_LIMIT=4000
-
-
def print_to_console(
title,
title_color,
@@ -53,7 +52,7 @@ def print_assistant_thoughts(assistant_reply):
global cfg
try:
# Parse and print Assistant response
- assistant_reply_json = json.loads(assistant_reply)
+ assistant_reply_json = fix_and_parse_json(assistant_reply)
assistant_thoughts = assistant_reply_json.get("thoughts")
if assistant_thoughts:
@@ -80,8 +79,13 @@ def print_assistant_thoughts(assistant_reply):
if assistant_thoughts_plan:
print_to_console("PLAN:", Fore.YELLOW, "")
if assistant_thoughts_plan:
-
- # Split the input_string using the newline character and dash
+ # If it's a list, join it into a string
+ if isinstance(assistant_thoughts_plan, list):
+ assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
+ elif isinstance(assistant_thoughts_plan, dict):
+ assistant_thoughts_plan = str(assistant_thoughts_plan)
+ # Split the input_string using the newline character and dash
+
lines = assistant_thoughts_plan.split('\n')
# Iterate through the lines and print each one with a bullet
@@ -103,11 +107,89 @@ def print_assistant_thoughts(assistant_reply):
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
# All other errors, return "Error: + error message"
except Exception as e:
- print_to_console("Error: \n", Fore.RED, str(e))
+ call_stack = traceback.format_exc()
+ print_to_console("Error: \n", Fore.RED, call_stack)
+
+def load_variables(config_file="config.yaml"):
+ # Load variables from yaml file if it exists
+ try:
+ with open(config_file) as file:
+ config = yaml.load(file, Loader=yaml.FullLoader)
+ ai_name = config.get("ai_name")
+ ai_role = config.get("ai_role")
+ ai_goals = config.get("ai_goals")
+ except FileNotFoundError:
+ ai_name = ""
+ ai_role = ""
+ ai_goals = []
+
+ # Prompt the user for input if config file is missing or empty values
+ if not ai_name:
+ ai_name = input("Name your AI: ")
+ if ai_name == "":
+ ai_name = "Entrepreneur-GPT"
+
+ if not ai_role:
+ ai_role = input(f"{ai_name} is: ")
+ if ai_role == "":
+ ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
+
+ if not ai_goals:
+ print("Enter up to 5 goals for your AI: ")
+ print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
+ print("Enter nothing to load defaults, enter nothing when finished.")
+ ai_goals = []
+ for i in range(5):
+ ai_goal = input(f"Goal {i+1}: ")
+ if ai_goal == "":
+ break
+ ai_goals.append(ai_goal)
+ if len(ai_goals) == 0:
+ ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
+
+ # Save variables to yaml file
+ config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
+ with open(config_file, "w") as file:
+ documents = yaml.dump(config, file)
+
+ prompt = data.load_prompt()
+ prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
+
+ # Construct full prompt
+ full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
+ for i, goal in enumerate(ai_goals):
+ full_prompt += f"{i+1}. {goal}\n"
+
+ full_prompt += f"\n\n{prompt}"
+ return full_prompt
def construct_prompt():
+ config = AIConfig.load()
+ if config.ai_name:
+ print_to_console(
+ f"Welcome back, {config.ai_name}!",
+ Fore.GREEN,
+ "Let's continue our journey.",
+ speak_text=True)
+ should_continue = input(f"Continue with the last settings? (Settings: {config.ai_name}, {config.ai_role}, {config.ai_goals}) (Y/n): ")
+ if should_continue.lower() == "n":
+ config = AIConfig()
+
+ if not config.ai_name:
+ config = prompt_user()
+ config.save()
+
+ # Get rid of this global:
global ai_name
+ ai_name = config.ai_name
+
+ full_prompt = config.construct_full_prompt()
+ return full_prompt
+
+
+def prompt_user():
+ ai_name = ""
# Construct the prompt
print_to_console(
"Welcome to Auto-GPT! ",
@@ -155,19 +237,8 @@ def construct_prompt():
ai_goals = ["Increase net worth", "Grow Twitter Account",
"Develop and manage multiple businesses autonomously"]
- prompt = data.load_prompt()
- prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
-
- # Construct full prompt
- full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
- for i, goal in enumerate(ai_goals):
- full_prompt += f"{i+1}. {goal}\n"
-
- full_prompt += f"\n\n{prompt}"
- return full_prompt
-
-# Check if the python script was executed with arguments, get those arguments
-
+ config = AIConfig(ai_name, ai_role, ai_goals)
+ return config
def parse_arguments():
global cfg
@@ -185,6 +256,8 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True)
+ # TODO: Better argument parsing:
+ # TODO: fill in llm values here
cfg = Config()
@@ -194,9 +267,10 @@ prompt = construct_prompt()
print(prompt)
# Initialize variables
full_message_history = []
-token_limit = TOKEN_LIMIT # The maximum number of tokens allowed in the API call
+token_limit = cfg.thinking_token_limit # The maximum number of tokens allowed in the API call
result = None
-user_input = "GENERATE NEXT COMMAND JSON"
+# Make a constant:
+user_input = "Determine which next command to use, and respond using the format specified above:"
# Interaction Loop
while True:
diff --git a/tests/json_tests.py b/tests/json_tests.py
new file mode 100644
index 00000000..69405989
--- /dev/null
+++ b/tests/json_tests.py
@@ -0,0 +1,34 @@
+import unittest
+import os
+import sys
+# Probably a better way:
+sys.path.append(os.path.abspath('../scripts'))
+from json_parser import fix_and_parse_json
+
+class TestParseJson(unittest.TestCase):
+ def test_valid_json(self):
+ # Test that a valid JSON string is parsed correctly
+ json_str = '{"name": "John", "age": 30, "city": "New York"}'
+ obj = fix_and_parse_json(json_str)
+ self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
+
+ def test_invalid_json_minor(self):
+ # Test that an invalid JSON string can be fixed with gpt
+ json_str = '{"name": "John", "age": 30, "city": "New York",}'
+ self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), {"name": "John", "age": 30, "city": "New York"})
+
+ def test_invalid_json_major_with_gpt(self):
+ # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
+ json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
+ self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=True), {"name": "John", "age": 30, "city": "New York"})
+
+ def test_invalid_json_major_without_gpt(self):
+ # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
+ json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
+ # Assert that this raises an exception:
+ with self.assertRaises(Exception):
+ fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
From 2b5a7cc48570be7bc06f7b6183a25f6e7ada113d Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 17:17:46 -0500
Subject: [PATCH 03/57] Add missing properties to config.py
---
scripts/config.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/scripts/config.py b/scripts/config.py
index bc7ebf71..e67c61f0 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -21,9 +21,21 @@ class Config(metaclass=Singleton):
def __init__(self):
self.continuous_mode = False
self.speak_mode = False
+ self.fast_llm_model = "gpt-3.5-turbo"
+ self.smart_llm_model = "gpt-3.5-turbo"
+ self.thinking_token_limit = 4000
def set_continuous_mode(self, value: bool):
self.continuous_mode = value
def set_speak_mode(self, value: bool):
self.speak_mode = value
+
+ def set_fast_llm_model(self, value: str):
+ self.fast_llm_model = value
+
+ def set_smart_llm_model(self, value: str):
+ self.smart_llm_model = value
+
+ def set_thinking_token_limit(self, value: int):
+ self.thinking_token_limit = value
From dca5b78bfeecd754b44f75ef57145925db043101 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 17:34:11 -0500
Subject: [PATCH 04/57] Add missing json_parser.py and add alternative
requirements
This adds fix_and_parse_json
Also, add requirements-alternative.txt to help install reqs in a different environment
---
scripts/json_parser.py | 12 ++++++++++++
scripts/requirements-alternative.txt | 16 ++++++++++++++++
scripts/requirements.txt | 3 ++-
3 files changed, 30 insertions(+), 1 deletion(-)
create mode 100644 scripts/json_parser.py
create mode 100644 scripts/requirements-alternative.txt
diff --git a/scripts/json_parser.py b/scripts/json_parser.py
new file mode 100644
index 00000000..87f376c6
--- /dev/null
+++ b/scripts/json_parser.py
@@ -0,0 +1,12 @@
+import dirtyjson
+from ai_functions import fix_json
+
+def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
+ try:
+ return dirtyjson.loads(json_str)
+ except Exception as e:
+ if try_to_fix_with_gpt:
+ # Now try to fix this up using the ai_functions
+ return fix_json(json_str, None, True)
+ else:
+ raise e
\ No newline at end of file
diff --git a/scripts/requirements-alternative.txt b/scripts/requirements-alternative.txt
new file mode 100644
index 00000000..a65dd4c6
--- /dev/null
+++ b/scripts/requirements-alternative.txt
@@ -0,0 +1,16 @@
+# I wasn't having any luck installing the requirements.txt file in Mac or Linux,
+# so I unpinned and installed, and got these versions:
+beautifulsoup4==4.9.3
+colorama==0.4.6
+dirtyjson==1.0.8
+# The biggest difference is docker 5 instead of 6, because of this silliness:
+# The conflict is caused by:
+# The user requested requests>=2.26.0
+# docker 6.0.1 depends on requests>=2.26.0
+# googlesearch-python 1.1.0 depends on requests==2.25.1
+docker==5.0.3
+googlesearch-python==1.1.0
+openai==0.27.2
+playsound==1.3.0
+readability-lxml==0.8.1
+requests==2.25.1
diff --git a/scripts/requirements.txt b/scripts/requirements.txt
index 0e2e52a7..30296486 100644
--- a/scripts/requirements.txt
+++ b/scripts/requirements.txt
@@ -1,8 +1,9 @@
beautifulsoup4==4.9.3
colorama==0.4.6
+dirtyjson==1.0.8
+docker==6.0.1
googlesearch_python==1.1.0
openai==0.27.0
playsound==1.2.2
readability_lxml==0.8.1
requests==2.25.1
-docker==6.0.1
From 10833c86b748dbc079f35911c1ab508162b6339e Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 17:37:30 -0500
Subject: [PATCH 05/57] Add pyyaml package to requirements
---
scripts/requirements-alternative.txt | 1 +
scripts/requirements.txt | 1 +
2 files changed, 2 insertions(+)
diff --git a/scripts/requirements-alternative.txt b/scripts/requirements-alternative.txt
index a65dd4c6..2c242aad 100644
--- a/scripts/requirements-alternative.txt
+++ b/scripts/requirements-alternative.txt
@@ -12,5 +12,6 @@ docker==5.0.3
googlesearch-python==1.1.0
openai==0.27.2
playsound==1.3.0
+pyyaml==6.0
readability-lxml==0.8.1
requests==2.25.1
diff --git a/scripts/requirements.txt b/scripts/requirements.txt
index 30296486..0e6c8e29 100644
--- a/scripts/requirements.txt
+++ b/scripts/requirements.txt
@@ -5,5 +5,6 @@ docker==6.0.1
googlesearch_python==1.1.0
openai==0.27.0
playsound==1.2.2
+pyyaml==6.0
readability_lxml==0.8.1
requests==2.25.1
From a47da497b5392fb19c1fabf40ce665d424528aff Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 18:34:42 -0500
Subject: [PATCH 06/57] Fix requirements-alternative.txt to include the
original requirements.txt
---
requirements-alternative.txt | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/requirements-alternative.txt b/requirements-alternative.txt
index 2c242aad..a2f2723d 100644
--- a/requirements-alternative.txt
+++ b/requirements-alternative.txt
@@ -1,17 +1,14 @@
-# I wasn't having any luck installing the requirements.txt file in Mac or Linux,
-# so I unpinned and installed, and got these versions:
-beautifulsoup4==4.9.3
-colorama==0.4.6
-dirtyjson==1.0.8
+# I wasn't having any luck installing the requirements.txt file in Mac or Linux
+# But this seems to work.
# The biggest difference is docker 5 instead of 6, because of this silliness:
+#
# The conflict is caused by:
# The user requested requests>=2.26.0
# docker 6.0.1 depends on requests>=2.26.0
# googlesearch-python 1.1.0 depends on requests==2.25.1
docker==5.0.3
-googlesearch-python==1.1.0
-openai==0.27.2
-playsound==1.3.0
-pyyaml==6.0
-readability-lxml==0.8.1
-requests==2.25.1
+
+# I'd love to fix this in a cleaner way
+
+# Now go ahead and install the rest of what requirements.txt says:
+-r requirements.txt
From 3e587bc7fb63abf91a1a987483721b4faa912d09 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 18:50:51 -0500
Subject: [PATCH 07/57] Make the json_parser more robust
For some reason the bot keeps prefacing its JSON. This fixes it for now.
---
scripts/ai_functions.py | 54 ++-----------------------
scripts/call_ai_function.py | 27 +++++++++++++
scripts/commands.py | 3 +-
scripts/json_parser.py | 53 +++++++++++++++++++++---
tests/json_tests.py | 81 +++++++++++++++++++++++++++++++++++++
5 files changed, 160 insertions(+), 58 deletions(-)
create mode 100644 scripts/call_ai_function.py
diff --git a/scripts/ai_functions.py b/scripts/ai_functions.py
index a69daa8f..9e8e04dc 100644
--- a/scripts/ai_functions.py
+++ b/scripts/ai_functions.py
@@ -3,46 +3,23 @@ import json
import openai
import dirtyjson
from config import Config
-
+from call_ai_function import call_ai_function
+from json_parser import fix_and_parse_json
cfg = Config()
-# This is a magic function that can do anything with no-code. See
-# https://github.com/Torantulino/AI-Functions for more info.
-def call_ai_function(function, args, description, model=cfg.smart_llm_model):
- # For each arg, if any are None, convert to "None":
- args = [str(arg) if arg is not None else "None" for arg in args]
- # parse args to comma seperated string
- args = ", ".join(args)
- messages = [
- {
- "role": "system",
- "content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
- },
- {"role": "user", "content": args},
- ]
-
- response = openai.ChatCompletion.create(
- model=model, messages=messages, temperature=0
- )
-
- return response.choices[0].message["content"]
-
-
# Evaluating code
-
def evaluate_code(code: str) -> List[str]:
function_string = "def analyze_code(code: str) -> List[str]:"
args = [code]
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
result_string = call_ai_function(function_string, args, description_string)
- return json.loads(result_string)
+ return fix_and_parse_json.loads(result_string)
# Improving code
-
def improve_code(suggestions: List[str], code: str) -> str:
function_string = (
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
@@ -68,28 +45,3 @@ def write_tests(code: str, focus: List[str]) -> str:
return result_string
-# TODO: Make debug a global config var
-def fix_json(json_str: str, schema:str = None, debug=True) -> str:
- # Try to fix the JSON using gpt:
- function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
- args = [json_str, schema]
- description_string = """Fixes the provided JSON string to make it parseable. If the schema is provided, the JSON will be made to look like the schema, otherwise it will be made to look like a valid JSON object."""
-
- result_string = call_ai_function(
- function_string, args, description_string, model=cfg.fast_llm_model
- )
- if debug:
- print("------------ JSON FIX ATTEMPT ---------------")
- print(f"Original JSON: {json_str}")
- print(f"Fixed JSON: {result_string}")
- print("----------- END OF FIX ATTEMPT ----------------")
- try:
- return dirtyjson.loads(result_string)
- except:
- # Log the exception:
- print("Failed to fix JSON")
- # Get the call stack:
- import traceback
- call_stack = traceback.format_exc()
- print(call_stack)
- return {}
\ No newline at end of file
diff --git a/scripts/call_ai_function.py b/scripts/call_ai_function.py
new file mode 100644
index 00000000..57d833b9
--- /dev/null
+++ b/scripts/call_ai_function.py
@@ -0,0 +1,27 @@
+from typing import List, Optional
+import json
+import openai
+import dirtyjson
+from config import Config
+cfg = Config()
+
+# This is a magic function that can do anything with no-code. See
+# https://github.com/Torantulino/AI-Functions for more info.
+def call_ai_function(function, args, description, model=cfg.smart_llm_model):
+ # For each arg, if any are None, convert to "None":
+ args = [str(arg) if arg is not None else "None" for arg in args]
+ # parse args to comma seperated string
+ args = ", ".join(args)
+ messages = [
+ {
+ "role": "system",
+ "content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
+ },
+ {"role": "user", "content": args},
+ ]
+
+ response = openai.ChatCompletion.create(
+ model=model, messages=messages, temperature=0
+ )
+
+ return response.choices[0].message["content"]
diff --git a/scripts/commands.py b/scripts/commands.py
index 61085632..37cbdcd1 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -8,12 +8,13 @@ from config import Config
import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file
from execute_code import execute_python_file
+from json_parser import fix_and_parse_json
cfg = Config()
def get_command(response):
try:
- response_json = json.loads(response)
+ response_json = fix_and_parse_json(response)
command = response_json["command"]
command_name = command["name"]
arguments = command["args"]
diff --git a/scripts/json_parser.py b/scripts/json_parser.py
index 87f376c6..16fb8fc7 100644
--- a/scripts/json_parser.py
+++ b/scripts/json_parser.py
@@ -1,12 +1,53 @@
import dirtyjson
-from ai_functions import fix_json
+from call_ai_function import call_ai_function
+from config import Config
+cfg = Config()
def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
try:
return dirtyjson.loads(json_str)
except Exception as e:
- if try_to_fix_with_gpt:
- # Now try to fix this up using the ai_functions
- return fix_json(json_str, None, True)
- else:
- raise e
\ No newline at end of file
+ # Let's do something manually - sometimes GPT responds with something BEFORE the braces:
+ # "I'm sorry, I don't understand. Please try again."{"text": "I'm sorry, I don't understand. Please try again.", "confidence": 0.0}
+ # So let's try to find the first brace and then parse the rest of the string
+ try:
+ brace_index = json_str.index("{")
+ json_str = json_str[brace_index:]
+ last_brace_index = json_str.rindex("}")
+ json_str = json_str[:last_brace_index+1]
+ return dirtyjson.loads(json_str)
+ except Exception as e:
+ if try_to_fix_with_gpt:
+ # Now try to fix this up using the ai_functions
+ return fix_json(json_str, None, True)
+ else:
+ raise e
+
+# TODO: Make debug a global config var
+def fix_json(json_str: str, schema:str = None, debug=True) -> str:
+ # Try to fix the JSON using gpt:
+ function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
+ args = [json_str, schema]
+ description_string = """Fixes the provided JSON string to make it parseable. If the schema is provided, the JSON will be made to look like the schema, otherwise it will be made to look like a valid JSON object."""
+
+ # If it doesn't already start with a "`", add one:
+ if not json_str.startswith("`"):
+ json_str = "```json\n" + json_str + "\n```"
+ result_string = call_ai_function(
+ function_string, args, description_string, model=cfg.fast_llm_model
+ )
+ if debug:
+ print("------------ JSON FIX ATTEMPT ---------------")
+ print(f"Original JSON: {json_str}")
+ print(f"Fixed JSON: {result_string}")
+ print("----------- END OF FIX ATTEMPT ----------------")
+ try:
+ return dirtyjson.loads(result_string)
+ except:
+ # Log the exception:
+ print("Failed to fix JSON")
+ # Get the call stack:
+ import traceback
+ call_stack = traceback.format_exc()
+ print(call_stack)
+ return {}
diff --git a/tests/json_tests.py b/tests/json_tests.py
index 69405989..459da610 100644
--- a/tests/json_tests.py
+++ b/tests/json_tests.py
@@ -29,6 +29,87 @@ class TestParseJson(unittest.TestCase):
with self.assertRaises(Exception):
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
+ def test_invalid_json_leading_sentence_with_gpt(self):
+ # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
+ json_str = """I suggest we start by browsing the repository to find any issues that we can fix.
+
+{
+ "command": {
+ "name": "browse_website",
+ "args":{
+ "url": "https://github.com/Torantulino/Auto-GPT"
+ }
+ },
+ "thoughts":
+ {
+ "text": "I suggest we start browsing the repository to find any issues that we can fix.",
+ "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
+ "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
+ "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
+ "speak": "I will start browsing the repository to find any issues we can fix."
+ }
+}"""
+ good_obj = {
+ "command": {
+ "name": "browse_website",
+ "args":{
+ "url": "https://github.com/Torantulino/Auto-GPT"
+ }
+ },
+ "thoughts":
+ {
+ "text": "I suggest we start browsing the repository to find any issues that we can fix.",
+ "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
+ "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
+ "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
+ "speak": "I will start browsing the repository to find any issues we can fix."
+ }
+ }
+ # Assert that this raises an exception:
+ self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
+
+
+
+ def test_invalid_json_leading_sentence_with_gpt(self):
+ # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
+ json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
+
+{
+ "command": {
+ "name": "browse_website",
+ "args":{
+ "url": "https://github.com/Torantulino/Auto-GPT"
+ }
+ },
+ "thoughts":
+ {
+ "text": "Browsing the repository to identify potential bugs",
+ "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
+ "plan": "- Analyze the repository for potential bugs and areas of improvement",
+ "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
+ "speak": "I am browsing the repository to identify potential bugs."
+ }
+}"""
+ good_obj = {
+ "command": {
+ "name": "browse_website",
+ "args":{
+ "url": "https://github.com/Torantulino/Auto-GPT"
+ }
+ },
+ "thoughts":
+ {
+ "text": "Browsing the repository to identify potential bugs",
+ "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
+ "plan": "- Analyze the repository for potential bugs and areas of improvement",
+ "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
+ "speak": "I am browsing the repository to identify potential bugs."
+ }
+}
+ # Assert that this raises an exception:
+ self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
+
+
if __name__ == '__main__':
unittest.main()
\ No newline at end of file
From b458b1c0f21c360d4a2280fa588b9846ea16ea03 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 01:01:45 +0100
Subject: [PATCH 08/57] Create LICENSE
---
LICENSE | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
create mode 100644 LICENSE
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..601935b8
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Toran Bruce Richards
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
From f9031cfc47a0ebac8af5a79f260efb8be969b489 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 02:44:58 +0100
Subject: [PATCH 09/57] Adds new public major sponsors to readme.
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index b2acfbc5..9fdf06da 100644
--- a/README.md
+++ b/README.md
@@ -17,9 +17,9 @@ Your support is greatly appreciated
Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. 💖
-
-
+
+
From 8cf4bf9278e09ad27b1ff81c3622fce2abbee385 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 02:50:34 +0100
Subject: [PATCH 10/57] =?UTF-8?q?Adds=20minor=20sponsors=20=F0=9F=92=96?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/README.md b/README.md
index 9fdf06da..dd91e08d 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,8 @@ Your support is greatly appreciated
+
+
From 80ccd10d0b703a861c8b0d88585e907b4538f6c9 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 21:35:28 -0500
Subject: [PATCH 11/57] Use gpt-4 by default for the main thought process
Allow specifying the llm through dotenv
Move more things into config
---
.env.template | 2 ++
scripts/call_ai_function.py | 2 --
scripts/chat.py | 11 ++++-------
scripts/config.py | 35 +++++++++++++++++++++++++++++++++--
scripts/data.py | 10 +++++++++-
scripts/main.py | 13 ++++---------
scripts/speak.py | 11 ++++-------
7 files changed, 56 insertions(+), 28 deletions(-)
diff --git a/.env.template b/.env.template
index a598aa7b..cbf0cd9b 100644
--- a/.env.template
+++ b/.env.template
@@ -1,2 +1,4 @@
OPENAI_API_KEY=your-openai-api-key
ELEVENLABS_API_KEY=your-elevenlabs-api-key
+SMART_LLM_MODEL="gpt-4"
+FAST_LLM_MODEL="gpt-3.5-turbo"
\ No newline at end of file
diff --git a/scripts/call_ai_function.py b/scripts/call_ai_function.py
index 57d833b9..7afb3b5d 100644
--- a/scripts/call_ai_function.py
+++ b/scripts/call_ai_function.py
@@ -1,7 +1,5 @@
from typing import List, Optional
-import json
import openai
-import dirtyjson
from config import Config
cfg = Config()
diff --git a/scripts/chat.py b/scripts/chat.py
index 69764959..d9b75b20 100644
--- a/scripts/chat.py
+++ b/scripts/chat.py
@@ -2,12 +2,8 @@ import os
import time
import openai
from dotenv import load_dotenv
-
-# Load environment variables from .env file
-load_dotenv()
-
-# Initialize the OpenAI API client
-openai.api_key = os.getenv("OPENAI_API_KEY")
+from config import Config
+cfg = Config()
def create_chat_message(role, content):
@@ -65,8 +61,9 @@ def chat_with_ai(
f"{message['role'].capitalize()}: {message['content']}")
print("----------- END OF CONTEXT ----------------")
+ # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",#model="gpt-4",
+ model=cfg.smart_llm_model,
messages=current_context,
)
diff --git a/scripts/config.py b/scripts/config.py
index e67c61f0..4c892b8f 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -1,3 +1,9 @@
+import os
+
+from dotenv import load_dotenv
+# Load environment variables from .env file
+load_dotenv()
+
class Singleton(type):
"""
Singleton metaclass for ensuring only one instance of a class.
@@ -21,9 +27,25 @@ class Config(metaclass=Singleton):
def __init__(self):
self.continuous_mode = False
self.speak_mode = False
- self.fast_llm_model = "gpt-3.5-turbo"
- self.smart_llm_model = "gpt-3.5-turbo"
+ # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
+ self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
+ self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.thinking_token_limit = 4000
+ # Initialize the OpenAI API client
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
+ self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
+ # Print values:
+ print("Config values:")
+ print(f"continuous_mode: {self.continuous_mode}")
+ print(f"speak_mode: {self.speak_mode}")
+ print(f"fast_llm_model: {self.fast_llm_model}")
+ print(f"smart_llm_model: {self.smart_llm_model}")
+ print(f"thinking_token_limit: {self.thinking_token_limit}")
+ print(f"openai_api_key: {self.openai_api_key}")
+ print(f"elevenlabs_api_key: {self.elevenlabs_api_key}")
+
+
+
def set_continuous_mode(self, value: bool):
self.continuous_mode = value
@@ -39,3 +61,12 @@ class Config(metaclass=Singleton):
def set_thinking_token_limit(self, value: int):
self.thinking_token_limit = value
+
+ def set_openai_api_key(self, value: str):
+ self.apiopenai_api_key_key = value
+
+ def set_elevenlabs_api_key(self, value: str):
+ self.elevenlabs_api_key = value
+
+
+
diff --git a/scripts/data.py b/scripts/data.py
index 19473557..0a72cbbf 100644
--- a/scripts/data.py
+++ b/scripts/data.py
@@ -1,7 +1,15 @@
+import os
+from pathlib import Path
+
+
def load_prompt():
try:
+ # get directory of this file:
+ file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
+ data_dir = file_dir / "data"
+ prompt_file = data_dir / "prompt.txt"
# Load the promt from data/prompt.txt
- with open("data/prompt.txt", "r") as prompt_file:
+ with open(prompt_file, "r") as prompt_file:
prompt = prompt_file.read()
return prompt
diff --git a/scripts/main.py b/scripts/main.py
index 0d4a5648..40f8f4c4 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -11,17 +11,11 @@ import speak
from enum import Enum, auto
import sys
from config import Config
-from dotenv import load_dotenv
from json_parser import fix_and_parse_json
from ai_config import AIConfig
import traceback
import yaml
-
-# Load environment variables from .env file
-load_dotenv()
-
-
class Argument(Enum):
CONTINUOUS_MODE = "continuous-mode"
SPEAK_MODE = "speak-mode"
@@ -262,11 +256,12 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True)
- # TODO: Better argument parsing:
- # TODO: fill in llm values here
+
+
+# TODO: Better argument parsing:
+# TODO: fill in llm values here
cfg = Config()
-
parse_arguments()
ai_name = ""
prompt = construct_prompt()
diff --git a/scripts/speak.py b/scripts/speak.py
index a0f29fc5..2fbcbed2 100644
--- a/scripts/speak.py
+++ b/scripts/speak.py
@@ -1,20 +1,17 @@
import os
from playsound import playsound
import requests
-from dotenv import load_dotenv
-
-
-# Load environment variables from .env file
-load_dotenv()
+from config import Config
+cfg = Config()
+# TODO: Nicer names for these ids
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
tts_headers = {
"Content-Type": "application/json",
- "xi-api-key": os.getenv("ELEVENLABS_API_KEY")
+ "xi-api-key": cfg.elevenlabs_api_key
}
-
def say_text(text, voice_index=0):
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
voice_id=voices[voice_index])
From 744c5fa25ba2c51f5127d6e692bad31df68b53c5 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 21:38:21 -0500
Subject: [PATCH 12/57] Remove hardcoding of gpt-3.5-turbo in favor of config
---
scripts/browse.py | 6 ++++--
scripts/commands.py | 2 +-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/scripts/browse.py b/scripts/browse.py
index 33911f20..178bb58a 100644
--- a/scripts/browse.py
+++ b/scripts/browse.py
@@ -3,7 +3,9 @@ import requests
from bs4 import BeautifulSoup
from readability import Document
import openai
+from config import Config
+cfg = Config()
def scrape_text(url):
response = requests.get(url)
@@ -100,7 +102,7 @@ def summarize_text(text, is_website=True):
]
response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
+ model=cfg.fast_llm_model,
messages=messages,
max_tokens=300,
)
@@ -128,7 +130,7 @@ def summarize_text(text, is_website=True):
]
response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
+ model=cfg.fast_llm_model,
messages=messages,
max_tokens=300,
)
diff --git a/scripts/commands.py b/scripts/commands.py
index 37cbdcd1..ff65f1a1 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -164,7 +164,7 @@ def shutdown():
quit()
-def start_agent(name, task, prompt, model="gpt-3.5-turbo"):
+def start_agent(name, task, prompt, model=cfg.fast_llm_model):
global cfg
# Remove underscores from name
From ae9448cb89999a9876320c40c827723e80b4e79d Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 21:51:07 -0500
Subject: [PATCH 13/57] Consolidate calls to openai
Starting to abstract away the calls to openai
---
scripts/agent_manager.py | 10 +++-------
scripts/browse.py | 8 +++-----
scripts/call_ai_function.py | 6 ++++--
scripts/chat.py | 6 +++---
scripts/llm_utils.py | 16 ++++++++++++++++
5 files changed, 29 insertions(+), 17 deletions(-)
create mode 100644 scripts/llm_utils.py
diff --git a/scripts/agent_manager.py b/scripts/agent_manager.py
index 9939332b..9ac801f9 100644
--- a/scripts/agent_manager.py
+++ b/scripts/agent_manager.py
@@ -1,4 +1,5 @@
import openai
+from llm_utils import create_chat_completion
next_key = 0
agents = {} # key, (task, full_message_history, model)
@@ -13,13 +14,11 @@ def create_agent(task, prompt, model):
messages = [{"role": "user", "content": prompt}, ]
# Start GTP3 instance
- response = openai.ChatCompletion.create(
+ agent_reply = create_chat_completion(
model=model,
messages=messages,
)
- agent_reply = response.choices[0].message["content"]
-
# Update full message history
messages.append({"role": "assistant", "content": agent_reply})
@@ -42,14 +41,11 @@ def message_agent(key, message):
messages.append({"role": "user", "content": message})
# Start GTP3 instance
- response = openai.ChatCompletion.create(
+ agent_reply = create_chat_completion(
model=model,
messages=messages,
)
- # Get agent response
- agent_reply = response.choices[0].message["content"]
-
# Update full message history
messages.append({"role": "assistant", "content": agent_reply})
diff --git a/scripts/browse.py b/scripts/browse.py
index 178bb58a..b965d66a 100644
--- a/scripts/browse.py
+++ b/scripts/browse.py
@@ -4,6 +4,7 @@ from bs4 import BeautifulSoup
from readability import Document
import openai
from config import Config
+from llm_utils import create_chat_completion
cfg = Config()
@@ -101,13 +102,11 @@ def summarize_text(text, is_website=True):
chunk},
]
- response = openai.ChatCompletion.create(
+ summary = create_chat_completion(
model=cfg.fast_llm_model,
messages=messages,
max_tokens=300,
)
-
- summary = response.choices[0].message.content
summaries.append(summary)
print("Summarized " + str(len(chunks)) + " chunks.")
@@ -129,11 +128,10 @@ def summarize_text(text, is_website=True):
combined_summary},
]
- response = openai.ChatCompletion.create(
+ final_summary = create_chat_completion(
model=cfg.fast_llm_model,
messages=messages,
max_tokens=300,
)
- final_summary = response.choices[0].message.content
return final_summary
diff --git a/scripts/call_ai_function.py b/scripts/call_ai_function.py
index 7afb3b5d..63ee77b9 100644
--- a/scripts/call_ai_function.py
+++ b/scripts/call_ai_function.py
@@ -3,6 +3,8 @@ import openai
from config import Config
cfg = Config()
+from llm_utils import create_chat_completion
+
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
@@ -18,8 +20,8 @@ def call_ai_function(function, args, description, model=cfg.smart_llm_model):
{"role": "user", "content": args},
]
- response = openai.ChatCompletion.create(
+ response = create_chat_completion(
model=model, messages=messages, temperature=0
)
- return response.choices[0].message["content"]
+ return response
diff --git a/scripts/chat.py b/scripts/chat.py
index d9b75b20..817a5e9d 100644
--- a/scripts/chat.py
+++ b/scripts/chat.py
@@ -5,6 +5,7 @@ from dotenv import load_dotenv
from config import Config
cfg = Config()
+from llm_utils import create_chat_completion
def create_chat_message(role, content):
"""
@@ -62,13 +63,11 @@ def chat_with_ai(
print("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
- response = openai.ChatCompletion.create(
+ assistant_reply = create_chat_completion(
model=cfg.smart_llm_model,
messages=current_context,
)
- assistant_reply = response.choices[0].message["content"]
-
# Update full message history
full_message_history.append(
create_chat_message(
@@ -79,5 +78,6 @@ def chat_with_ai(
return assistant_reply
except openai.error.RateLimitError:
+ # TODO: WHen we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10)
diff --git a/scripts/llm_utils.py b/scripts/llm_utils.py
new file mode 100644
index 00000000..41f39625
--- /dev/null
+++ b/scripts/llm_utils.py
@@ -0,0 +1,16 @@
+import openai
+from config import Config
+cfg = Config()
+
+openai.api_key = cfg.openai_api_key
+
+# Overly simple abstraction until we create something better
+def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
+ response = openai.ChatCompletion.create(
+ model=model,
+ messages=messages,
+ temperature=temperature,
+ max_tokens=max_tokens
+ )
+
+ return response.choices[0].message["content"]
From 30f8ed95e10e6b1fb3f7126e9f39d30508745069 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 21:52:50 -0500
Subject: [PATCH 14/57] Remove unused imports
---
scripts/agent_manager.py | 1 -
scripts/ai_functions.py | 2 --
scripts/browse.py | 3 ---
scripts/call_ai_function.py | 2 --
scripts/chat.py | 1 -
5 files changed, 9 deletions(-)
diff --git a/scripts/agent_manager.py b/scripts/agent_manager.py
index 9ac801f9..ef0227de 100644
--- a/scripts/agent_manager.py
+++ b/scripts/agent_manager.py
@@ -1,4 +1,3 @@
-import openai
from llm_utils import create_chat_completion
next_key = 0
diff --git a/scripts/ai_functions.py b/scripts/ai_functions.py
index 9e8e04dc..0d27132d 100644
--- a/scripts/ai_functions.py
+++ b/scripts/ai_functions.py
@@ -1,7 +1,5 @@
from typing import List, Optional
import json
-import openai
-import dirtyjson
from config import Config
from call_ai_function import call_ai_function
from json_parser import fix_and_parse_json
diff --git a/scripts/browse.py b/scripts/browse.py
index b965d66a..f096c5f3 100644
--- a/scripts/browse.py
+++ b/scripts/browse.py
@@ -1,8 +1,5 @@
-from googlesearch import search
import requests
from bs4 import BeautifulSoup
-from readability import Document
-import openai
from config import Config
from llm_utils import create_chat_completion
diff --git a/scripts/call_ai_function.py b/scripts/call_ai_function.py
index 63ee77b9..0c864b49 100644
--- a/scripts/call_ai_function.py
+++ b/scripts/call_ai_function.py
@@ -1,5 +1,3 @@
-from typing import List, Optional
-import openai
from config import Config
cfg = Config()
diff --git a/scripts/chat.py b/scripts/chat.py
index 817a5e9d..36568f58 100644
--- a/scripts/chat.py
+++ b/scripts/chat.py
@@ -1,4 +1,3 @@
-import os
import time
import openai
from dotenv import load_dotenv
From aed8c5b4694205551c9d11c910c262547b2f301a Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 03:57:38 +0100
Subject: [PATCH 15/57] Fixes openai key not set error.
---
scripts/config.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/scripts/config.py b/scripts/config.py
index 4c892b8f..844b1e2f 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -44,7 +44,8 @@ class Config(metaclass=Singleton):
print(f"openai_api_key: {self.openai_api_key}")
print(f"elevenlabs_api_key: {self.elevenlabs_api_key}")
-
+ # Initialize the OpenAI API client
+ openai.api_key = self.openai_api_key
def set_continuous_mode(self, value: bool):
From 0c1636565f2dfe22caaa3c2cdde0f7a1235513ae Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 03:57:51 +0100
Subject: [PATCH 16/57] Fixes missing openai import
---
scripts/config.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/config.py b/scripts/config.py
index 844b1e2f..8ad70c1e 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -1,5 +1,5 @@
import os
-
+import openai
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
From afceca0625ca8e49415e2487cd43161180db5506 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 21:57:38 -0500
Subject: [PATCH 17/57] Remove commented code from ai_config
---
scripts/ai_config.py | 17 -----------------
1 file changed, 17 deletions(-)
diff --git a/scripts/ai_config.py b/scripts/ai_config.py
index e35de06d..2529628f 100644
--- a/scripts/ai_config.py
+++ b/scripts/ai_config.py
@@ -7,23 +7,6 @@ class AIConfig:
self.ai_role = ai_role
self.ai_goals = ai_goals
- # @classmethod
- # def create_from_user_prompts(cls):
- # ai_name = input("Name your AI: ") or "Entrepreneur-GPT"
- # ai_role = input(f"{ai_name} is: ") or "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
- # print("Enter up to 5 goals for your AI: ")
- # print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
- # print("Enter nothing to load defaults, enter nothing when finished.")
- # ai_goals = []
- # for i in range(5):
- # ai_goal = input(f"Goal {i+1}: ")
- # if ai_goal == "":
- # break
- # ai_goals.append(ai_goal)
- # if len(ai_goals) == 0:
- # ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
- # return cls(ai_name, ai_role, ai_goals)
-
@classmethod
def load(cls, config_file="config.yaml"):
# Load variables from yaml file if it exists
From 4173b07bcec0f54ec0605e7bbfd58d8fd62e4a6b Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 22:03:48 -0500
Subject: [PATCH 18/57] Remove excessive debug text
---
scripts/config.py | 19 ++++++++-----------
scripts/json_parser.py | 7 +++----
2 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/scripts/config.py b/scripts/config.py
index 8ad70c1e..fb127648 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -30,19 +30,16 @@ class Config(metaclass=Singleton):
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
- self.thinking_token_limit = 4000
- # Initialize the OpenAI API client
+
+ # TODO: Make this not so hard-coded
+ # This is the token limit that the main prompt needs to know. GPT-4 has a much bigger limit than GPT-3
+ if (self.smart_llm_model.startswith("gpt-3")):
+ self.thinking_token_limit = 4000
+ else:
+ self.thinking_token_limit = 6000
+
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
- # Print values:
- print("Config values:")
- print(f"continuous_mode: {self.continuous_mode}")
- print(f"speak_mode: {self.speak_mode}")
- print(f"fast_llm_model: {self.fast_llm_model}")
- print(f"smart_llm_model: {self.smart_llm_model}")
- print(f"thinking_token_limit: {self.thinking_token_limit}")
- print(f"openai_api_key: {self.openai_api_key}")
- print(f"elevenlabs_api_key: {self.elevenlabs_api_key}")
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
diff --git a/scripts/json_parser.py b/scripts/json_parser.py
index 16fb8fc7..dbc511e1 100644
--- a/scripts/json_parser.py
+++ b/scripts/json_parser.py
@@ -24,7 +24,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
raise e
# TODO: Make debug a global config var
-def fix_json(json_str: str, schema:str = None, debug=True) -> str:
+def fix_json(json_str: str, schema:str = None, debug=False) -> str:
# Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [json_str, schema]
@@ -44,10 +44,9 @@ def fix_json(json_str: str, schema:str = None, debug=True) -> str:
try:
return dirtyjson.loads(result_string)
except:
- # Log the exception:
- print("Failed to fix JSON")
# Get the call stack:
import traceback
call_stack = traceback.format_exc()
- print(call_stack)
+ # TODO: Handle this sort of thing better
+ print(f"Failed to fix JSON: '{json_str}' "+call_stack)
return {}
From 7fd2ce2bc647f7502f6b39ea08cf8dda892ea1d9 Mon Sep 17 00:00:00 2001
From: Taylor Brown
Date: Sun, 2 Apr 2023 22:10:53 -0500
Subject: [PATCH 19/57] Clean up where last_run_settings go
---
.gitignore | 1 +
scripts/ai_config.py | 10 +++++++---
scripts/main.py | 6 +++++-
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/.gitignore b/.gitignore
index 1313d98b..a4e3cc2d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,4 @@ package-lock.json
scripts/auto_gpt_workspace/*
*.mpeg
.env
+last_run_ai_settings.yaml
\ No newline at end of file
diff --git a/scripts/ai_config.py b/scripts/ai_config.py
index 2529628f..945fcfb2 100644
--- a/scripts/ai_config.py
+++ b/scripts/ai_config.py
@@ -1,14 +1,18 @@
import yaml
import data
+
class AIConfig:
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
+ # Soon this will go in a folder where it remembers more stuff about the run(s)
+ SAVE_FILE = "last_run_ai_settings.yaml"
+
@classmethod
- def load(cls, config_file="config.yaml"):
+ def load(cls, config_file=SAVE_FILE):
# Load variables from yaml file if it exists
try:
with open(config_file) as file:
@@ -22,10 +26,10 @@ class AIConfig:
return cls(ai_name, ai_role, ai_goals)
- def save(self, config_file="config.yaml"):
+ def save(self, config_file=SAVE_FILE):
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
with open(config_file, "w") as file:
- documents = yaml.dump(config, file)
+ yaml.dump(config, file)
def construct_full_prompt(self):
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
diff --git a/scripts/main.py b/scripts/main.py
index 40f8f4c4..ef4ba587 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -172,7 +172,11 @@ def construct_prompt():
Fore.GREEN,
"Let's continue our journey.",
speak_text=True)
- should_continue = input(f"Continue with the last settings? (Settings: {config.ai_name}, {config.ai_role}, {config.ai_goals}) (Y/n): ")
+ should_continue = input(f"""Continue with the last settings?
+Name: {config.ai_name}
+Role: {config.ai_role}
+Goals: {config.ai_goals}
+Continue (Y/n): """)
if should_continue.lower() == "n":
config = AIConfig()
From c86b6e20139294e818a4ca8fa512dba4e4694a84 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:23:54 +0100
Subject: [PATCH 20/57] Updates requirements.txt
---
requirements.txt | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 60c2d3ea..e731354b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,11 +1,13 @@
-beautifulsoup4==4.9.3
+beautifulsoup4
colorama==0.4.6
-dirtyjson==1.0.8
-docker==5.0.3
-googlesearch-python==1.1.0
+dirtyjson==1.0.
openai==0.27.2
playsound==1.3.0
python-dotenv==1.0.0
pyyaml==6.0
readability-lxml==0.8.1
-requests==2.25.1
+requests
+tiktoken==0.3.3
+docker
+# googlesearch-python
+# Googlesearch python seems to be a bit cursed, anyone good at fixing thigns like this?
\ No newline at end of file
From 1e73ee2958669255e2b9912eb8859025221c13ef Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:24:36 +0100
Subject: [PATCH 21/57] Adds todo comment.
---
scripts/agent_manager.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/agent_manager.py b/scripts/agent_manager.py
index ef0227de..ad120c40 100644
--- a/scripts/agent_manager.py
+++ b/scripts/agent_manager.py
@@ -4,7 +4,7 @@ next_key = 0
agents = {} # key, (task, full_message_history, model)
# Create new GPT agent
-
+# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model):
global next_key
From 5a669d96ebd5c857c288c8b1f9e3b06a9fb203ba Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:25:43 +0100
Subject: [PATCH 22/57] Evaluated code should not be JSON.
---
scripts/ai_functions.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/scripts/ai_functions.py b/scripts/ai_functions.py
index 0d27132d..05aa93a2 100644
--- a/scripts/ai_functions.py
+++ b/scripts/ai_functions.py
@@ -13,7 +13,8 @@ def evaluate_code(code: str) -> List[str]:
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
result_string = call_ai_function(function_string, args, description_string)
- return fix_and_parse_json.loads(result_string)
+
+ return result_string
# Improving code
From 04710ae57bc5ca25dc345ed98e42df0b7708c9c6 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:26:09 +0100
Subject: [PATCH 23/57] =?UTF-8?q?Implements=20counting=20of=20exact=20toke?=
=?UTF-8?q?ns=20=F0=9F=9A=80?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
scripts/token_counter.py | 57 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 57 insertions(+)
create mode 100644 scripts/token_counter.py
diff --git a/scripts/token_counter.py b/scripts/token_counter.py
new file mode 100644
index 00000000..a28a9868
--- /dev/null
+++ b/scripts/token_counter.py
@@ -0,0 +1,57 @@
+import tiktoken
+from typing import List, Dict
+
+def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
+ """
+ Returns the number of tokens used by a list of messages.
+
+ Args:
+ messages (list): A list of messages, each of which is a dictionary containing the role and content of the message.
+ model (str): The name of the model to use for tokenization. Defaults to "gpt-3.5-turbo-0301".
+
+ Returns:
+ int: The number of tokens used by the list of messages.
+ """
+ try:
+ encoding = tiktoken.encoding_for_model(model)
+ except KeyError:
+ print("Warning: model not found. Using cl100k_base encoding.")
+ encoding = tiktoken.get_encoding("cl100k_base")
+ if model == "gpt-3.5-turbo":
+ # !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
+ return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
+ elif model == "gpt-4":
+ # !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
+ return count_message_tokens(messages, model="gpt-4-0314")
+ elif model == "gpt-3.5-turbo-0301":
+ tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
+ tokens_per_name = -1 # if there's a name, the role is omitted
+ elif model == "gpt-4-0314":
+ tokens_per_message = 3
+ tokens_per_name = 1
+ else:
+ raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
+ num_tokens = 0
+ for message in messages:
+ num_tokens += tokens_per_message
+ for key, value in message.items():
+ num_tokens += len(encoding.encode(value))
+ if key == "name":
+ num_tokens += tokens_per_name
+ num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
+ return num_tokens
+
+def count_string_tokens(string: str, model_name: str) -> int:
+ """
+ Returns the number of tokens in a text string.
+
+ Args:
+ string (str): The text string.
+ model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
+
+ Returns:
+ int: The number of tokens in the text string.
+ """
+ encoding = tiktoken.encoding_for_model(model_name)
+ num_tokens = len(encoding.encode(string))
+ return num_tokens
From 41daf07219b04f472fc730f9ec83630f3831ac90 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:28:06 +0100
Subject: [PATCH 24/57] Vastly improves context window management. Now uses
tokens and biggest context possible.
---
scripts/chat.py | 56 ++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 51 insertions(+), 5 deletions(-)
diff --git a/scripts/chat.py b/scripts/chat.py
index 36568f58..86a70b09 100644
--- a/scripts/chat.py
+++ b/scripts/chat.py
@@ -2,10 +2,13 @@ import time
import openai
from dotenv import load_dotenv
from config import Config
+import token_counter
+
cfg = Config()
from llm_utils import create_chat_completion
+
def create_chat_message(role, content):
"""
Create a chat message with the given role and content.
@@ -20,13 +23,15 @@ def create_chat_message(role, content):
return {"role": role, "content": content}
+
+# TODO: Change debug from hardcode to argument
def chat_with_ai(
prompt,
user_input,
full_message_history,
permanent_memory,
token_limit,
- debug=True):
+ debug=False):
while True:
try:
"""
@@ -42,16 +47,55 @@ def chat_with_ai(
Returns:
str: The AI's response.
"""
+ model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
+ # Reserve 1000 tokens for the response
+ if debug:
+ print(f"Token limit: {token_limit}")
+ send_token_limit = token_limit - 1000
+
current_context = [
create_chat_message(
"system", prompt), create_chat_message(
- "system", f"Permanent memory: {permanent_memory}")]
- current_context.extend(
- full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
+ "system", f"Permanent memory: {permanent_memory}")]
+
+ # Add messages from the full message history until we reach the token limit
+ next_message_to_add_index = len(full_message_history) - 1
+ current_tokens_used = 0
+ insertion_index = len(current_context)
+
+ # Count the currently used tokens
+ current_tokens_used = token_counter.count_message_tokens(current_context, model)
+ current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later)
+
+ while next_message_to_add_index >= 0:
+ # print (f"CURRENT TOKENS USED: {current_tokens_used}")
+ message_to_add = full_message_history[next_message_to_add_index]
+
+ tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
+ if current_tokens_used + tokens_to_add > send_token_limit:
+ break
+
+ # Add the most recent message to the start of the current context, after the two system prompts.
+ current_context.insert(insertion_index, full_message_history[next_message_to_add_index])
+
+ # Count the currently used tokens
+ current_tokens_used += tokens_to_add
+
+ # Move to the next most recent message in the full message history
+ next_message_to_add_index -= 1
+
+ # Append user input, the length of this is accounted for above
current_context.extend([create_chat_message("user", user_input)])
+ # Calculate remaining tokens
+ tokens_remaining = token_limit - current_tokens_used
+ # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
+
# Debug print the current context
if debug:
+ print(f"Token limit: {token_limit}")
+ print(f"Send Token Count: {current_tokens_used}")
+ print(f"Tokens remaining for response: {tokens_remaining}")
print("------------ CONTEXT SENT TO AI ---------------")
for message in current_context:
# Skip printing the prompt
@@ -59,12 +103,14 @@ def chat_with_ai(
continue
print(
f"{message['role'].capitalize()}: {message['content']}")
+ print()
print("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
assistant_reply = create_chat_completion(
- model=cfg.smart_llm_model,
+ model=model,
messages=current_context,
+ max_tokens=tokens_remaining,
)
# Update full message history
From 099a5e1090bb93d2aa33e44718e5faa83d4ecce9 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:30:06 +0100
Subject: [PATCH 25/57] Handles incorrect AI formatting in a more forgiving
way.
---
scripts/commands.py | 12 +++++++++++-
scripts/main.py | 29 ++++++++++++++++-------------
2 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/scripts/commands.py b/scripts/commands.py
index ff65f1a1..f8290471 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -15,9 +15,19 @@ cfg = Config()
def get_command(response):
try:
response_json = fix_and_parse_json(response)
+
+ if "command" not in response_json:
+ return "Error:" , "Missing 'command' object in JSON"
+
command = response_json["command"]
+
+ if "name" not in command:
+ return "Error:", "Missing 'name' field in 'command' object"
+
command_name = command["name"]
- arguments = command["args"]
+
+ # Use an empty dictionary if 'args' field is not present in 'command' object
+ arguments = command.get("args", {})
if not arguments:
arguments = {}
diff --git a/scripts/main.py b/scripts/main.py
index ef4ba587..2f01ccd2 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -54,19 +54,22 @@ def print_assistant_thoughts(assistant_reply):
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
- assistant_thoughts = assistant_reply_json.get("thoughts")
- if assistant_thoughts:
- assistant_thoughts_text = assistant_thoughts.get("text")
- assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
- assistant_thoughts_plan = assistant_thoughts.get("plan")
- assistant_thoughts_criticism = assistant_thoughts.get("criticism")
- assistant_thoughts_speak = assistant_thoughts.get("speak")
- else:
- assistant_thoughts_text = None
- assistant_thoughts_reasoning = None
- assistant_thoughts_plan = None
- assistant_thoughts_criticism = None
- assistant_thoughts_speak = None
+ try:
+ assistant_thoughts = assistant_reply_json.get("thoughts")
+ if assistant_thoughts:
+ assistant_thoughts_text = assistant_thoughts.get("text")
+ assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
+ assistant_thoughts_plan = assistant_thoughts.get("plan")
+ assistant_thoughts_criticism = assistant_thoughts.get("criticism")
+ assistant_thoughts_speak = assistant_thoughts.get("speak")
+ else:
+ assistant_thoughts_text = None
+ assistant_thoughts_reasoning = None
+ assistant_thoughts_plan = None
+ assistant_thoughts_criticism = None
+ assistant_thoughts_speak = None
+ except Exception as e:
+ assistant_thoughts_text = "The AI's response was unreadable."
print_to_console(
f"{ai_name.upper()} THOUGHTS:",
From 59d52e9bc70f359a7d29da7c0459e4436f162427 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:30:39 +0100
Subject: [PATCH 26/57] Sets actual token limits.
---
scripts/config.py | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/scripts/config.py b/scripts/config.py
index fb127648..44d99bff 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -30,13 +30,8 @@ class Config(metaclass=Singleton):
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
-
- # TODO: Make this not so hard-coded
- # This is the token limit that the main prompt needs to know. GPT-4 has a much bigger limit than GPT-3
- if (self.smart_llm_model.startswith("gpt-3")):
- self.thinking_token_limit = 4000
- else:
- self.thinking_token_limit = 6000
+ self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
+ self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
@@ -57,8 +52,11 @@ class Config(metaclass=Singleton):
def set_smart_llm_model(self, value: str):
self.smart_llm_model = value
- def set_thinking_token_limit(self, value: int):
- self.thinking_token_limit = value
+ def set_fast_token_limit(self, value: int):
+ self.fast_token_limit = value
+
+ def set_smart_token_limit(self, value: int):
+ self.smart_token_limit = value
def set_openai_api_key(self, value: str):
self.apiopenai_api_key_key = value
From b1cc474b9210c1b730f3fe70a3d27d2f34f399a7 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:32:40 +0100
Subject: [PATCH 27/57] removes unused commands.
---
scripts/commands.py | 29 +----------------------------
1 file changed, 1 insertion(+), 28 deletions(-)
diff --git a/scripts/commands.py b/scripts/commands.py
index f8290471..65b273f5 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -44,8 +44,6 @@ def execute_command(command_name, arguments):
try:
if command_name == "google":
return google_search(arguments["input"])
- elif command_name == "check_notifications":
- return check_notifications(arguments["website"])
elif command_name == "memory_add":
return commit_memory(arguments["string"])
elif command_name == "memory_del":
@@ -63,12 +61,6 @@ def execute_command(command_name, arguments):
return list_agents()
elif command_name == "delete_agent":
return delete_agent(arguments["key"])
- elif command_name == "navigate_website":
- return navigate_website(arguments["action"], arguments["username"])
- elif command_name == "register_account":
- return register_account(
- arguments["username"],
- arguments["website"])
elif command_name == "get_text_summary":
return get_text_summary(arguments["url"])
elif command_name == "get_hyperlinks":
@@ -216,23 +208,4 @@ def delete_agent(key):
result = agents.delete_agent(key)
if not result:
return f"Agent {key} does not exist."
- return f"Agent {key} deleted."
-
-
-def navigate_website(action, username):
- _text = "Navigating website with action " + action + " and username " + username
- print(_text)
- return "Command not implemented yet."
-
-
-def register_account(username, website):
- _text = "Registering account with username " + \
- username + " and website " + website
- print(_text)
- return "Command not implemented yet."
-
-
-def check_notifications(website):
- _text = "Checking notifications from " + website
- print(_text)
- return "Command not implemented yet."
+ return f"Agent {key} deleted."
\ No newline at end of file
From 6a0a1355f00182cfd61643363df881e488b718ad Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:33:01 +0100
Subject: [PATCH 28/57] Fixes google search command.
---
scripts/commands.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/commands.py b/scripts/commands.py
index 65b273f5..90ccf14c 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -102,7 +102,7 @@ def get_datetime():
def google_search(query, num_results=8):
search_results = []
- for j in browse.search(query, num_results=num_results):
+ for j in search(query, num_results=num_results):
search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4)
From ba6df3674b368e19b4513b62a8b9a5b8299bdd9e Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:33:18 +0100
Subject: [PATCH 29/57] Fixes overwrite memory command.
---
scripts/commands.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/commands.py b/scripts/commands.py
index 90ccf14c..e2f4257f 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -150,7 +150,7 @@ def delete_memory(key):
def overwrite_memory(key, string):
- if key >= 0 and key < len(mem.permanent_memory):
+ if int(key) >= 0 and key < len(mem.permanent_memory):
_text = "Overwriting memory with key " + \
str(key) + " and string " + string
mem.permanent_memory[key] = string
From 2bdd6b7b83084b0f070edf66111baa7901f940ea Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:33:35 +0100
Subject: [PATCH 30/57] Adds missing google search import.
---
scripts/commands.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/scripts/commands.py b/scripts/commands.py
index e2f4257f..2e332711 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -9,6 +9,7 @@ import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file
from execute_code import execute_python_file
from json_parser import fix_and_parse_json
+from googlesearch import search
cfg = Config()
From c9be6edf9e8d5d5552b6a61ed599c4d6c2db1522 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:34:17 +0100
Subject: [PATCH 31/57] =?UTF-8?q?Improves=20parsing=20of=20AI=20JSON=20out?=
=?UTF-8?q?put.=20This=20makes=20gpt3.5=20turbo=20fully=20possible!=20?=
=?UTF-8?q?=F0=9F=9A=80?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
scripts/json_parser.py | 41 +++++++++++++++++++++++++++++++++--------
1 file changed, 33 insertions(+), 8 deletions(-)
diff --git a/scripts/json_parser.py b/scripts/json_parser.py
index dbc511e1..8154b584 100644
--- a/scripts/json_parser.py
+++ b/scripts/json_parser.py
@@ -4,6 +4,25 @@ from config import Config
cfg = Config()
def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
+ json_schema = """
+ {
+ "command": {
+ "name": "command name",
+ "args":{
+ "arg name": "value"
+ }
+ },
+ "thoughts":
+ {
+ "text": "thought",
+ "reasoning": "reasoning",
+ "plan": "- short bulleted\n- list that conveys\n- long-term plan",
+ "criticism": "constructive self-criticism",
+ "speak": "thoughts summary to say to user"
+ }
+ }
+ """
+
try:
return dirtyjson.loads(json_str)
except Exception as e:
@@ -18,17 +37,23 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
return dirtyjson.loads(json_str)
except Exception as e:
if try_to_fix_with_gpt:
+ print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
# Now try to fix this up using the ai_functions
- return fix_json(json_str, None, True)
+ ai_fixed_json = fix_json(json_str, json_schema, False)
+ if ai_fixed_json != "failed":
+ return dirtyjson.loads(ai_fixed_json)
+ else:
+ print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
+ return json_str
else:
raise e
# TODO: Make debug a global config var
-def fix_json(json_str: str, schema:str = None, debug=False) -> str:
+def fix_json(json_str: str, schema: str, debug=False) -> str:
# Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [json_str, schema]
- description_string = """Fixes the provided JSON string to make it parseable. If the schema is provided, the JSON will be made to look like the schema, otherwise it will be made to look like a valid JSON object."""
+ description_string = """Fixes the provided JSON string to make it parseable and fully complient with the provided schema.\n If an object or field specifed in the schema isn't contained within the correct JSON, it is ommited.\n This function is brilliant at guessing when the format is incorrect."""
# If it doesn't already start with a "`", add one:
if not json_str.startswith("`"):
@@ -39,14 +64,14 @@ def fix_json(json_str: str, schema:str = None, debug=False) -> str:
if debug:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")
+ print("-----------")
print(f"Fixed JSON: {result_string}")
print("----------- END OF FIX ATTEMPT ----------------")
try:
return dirtyjson.loads(result_string)
except:
# Get the call stack:
- import traceback
- call_stack = traceback.format_exc()
- # TODO: Handle this sort of thing better
- print(f"Failed to fix JSON: '{json_str}' "+call_stack)
- return {}
+ # import traceback
+ # call_stack = traceback.format_exc()
+ # print(f"Failed to fix JSON: '{json_str}' "+call_stack)
+ return "failed"
From f426b5103894cfec32fca9238e8a569919d50da1 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:34:39 +0100
Subject: [PATCH 32/57] Changes python version to 3.10 in execute_code.
---
scripts/execute_code.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/scripts/execute_code.py b/scripts/execute_code.py
index cfd818d4..614ef6fc 100644
--- a/scripts/execute_code.py
+++ b/scripts/execute_code.py
@@ -5,6 +5,8 @@ import os
def execute_python_file(file):
workspace_folder = "auto_gpt_workspace"
+ print (f"Executing file '{file}' in workspace '{workspace_folder}'")
+
if not file.endswith(".py"):
return "Error: Invalid file type. Only .py files are allowed."
@@ -20,7 +22,7 @@ def execute_python_file(file):
# You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python
container = client.containers.run(
- 'python:3.8',
+ 'python:3.10',
f'python {file}',
volumes={
os.path.abspath(workspace_folder): {
@@ -36,6 +38,9 @@ def execute_python_file(file):
logs = container.logs().decode('utf-8')
container.remove()
+ # print(f"Execution complete. Output: {output}")
+ # print(f"Logs: {logs}")
+
return logs
except Exception as e:
From e50e826857c20e3f71c7ce2193073b0192f8b928 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:35:08 +0100
Subject: [PATCH 33/57] Tweaks welcome back prompt.
---
scripts/main.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/main.py b/scripts/main.py
index 2f01ccd2..caf65ecf 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -171,15 +171,15 @@ def construct_prompt():
config = AIConfig.load()
if config.ai_name:
print_to_console(
- f"Welcome back, {config.ai_name}!",
+ f"Welcome back! ",
Fore.GREEN,
- "Let's continue our journey.",
+ f"Would you like me to return to being {config.ai_name}?",
speak_text=True)
should_continue = input(f"""Continue with the last settings?
Name: {config.ai_name}
Role: {config.ai_role}
-Goals: {config.ai_goals}
-Continue (Y/n): """)
+Goals: {config.ai_goals}
+Continue (y/n): """)
if should_continue.lower() == "n":
config = AIConfig()
From 109b9288a7ba1818fcb4861d88d3115b543924a2 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:37:18 +0100
Subject: [PATCH 34/57] Removes logging and uses new config token limit.
---
scripts/main.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/scripts/main.py b/scripts/main.py
index caf65ecf..48093cf4 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -272,10 +272,9 @@ cfg = Config()
parse_arguments()
ai_name = ""
prompt = construct_prompt()
-print(prompt)
+# print(prompt)
# Initialize variables
full_message_history = []
-token_limit = cfg.thinking_token_limit # The maximum number of tokens allowed in the API call
result = None
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
@@ -289,9 +288,9 @@ while True:
user_input,
full_message_history,
mem.permanent_memory,
- token_limit)
+ cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
- print("assistant reply: "+assistant_reply)
+ # print("assistant reply: "+assistant_reply)
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
From 3093ec9369606d2803a8aa8f7e8c2d5e4309e560 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 11:37:43 +0100
Subject: [PATCH 35/57] Tweaks prompt for better results on both models!
---
scripts/data/prompt.txt | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt
index 60e02ca3..d17fa27a 100644
--- a/scripts/data/prompt.txt
+++ b/scripts/data/prompt.txt
@@ -1,6 +1,6 @@
CONSTRAINTS:
-1. 4000-word count limit for memory
+1. ~4000 word limit for memory. Your memory is short, so immidiately save important information to long term memory and code to files.
2. No user assistance
COMMANDS:
@@ -18,9 +18,9 @@ COMMANDS:
11. Read file: "read_file", args: "file": ""
12. Append to file: "append_to_file", args: "file": "", "text": ""
13. Delete file: "delete_file", args: "file": ""
-14. Evaluate Code: "evaluate_code", args: "code": ""
-15. Get Improved Code: "improve_code", args: "suggestions": "", "code": ""
-16. Write Tests: "write_tests", args: "code": "", "focus": ""
+14. Evaluate Code: "evaluate_code", args: "code": ""
+15. Get Improved Code: "improve_code", args: "suggestions": "", "code": ""
+16. Write Tests: "write_tests", args: "code": "", "focus": ""
17. Execute Python File: "execute_python_file", args: "file": ""
18. Task Complete (Shutdown): "task_complete", args: "reason": ""
@@ -53,7 +53,7 @@ RESPONSE FORMAT:
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
- "criticism": "constructive self-criticism"
+ "criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
}
}
From 439a7ffe7de60e72696d3063bb57622bbfc8b89d Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 13:06:22 +0100
Subject: [PATCH 36/57] Adds discord to readme
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index dd91e08d..f576fd5a 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
# Auto-GPT: An Autonomous GPT-4 Experiment


+[](https://discord.gg/PQ7VX6TY4t)
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, autonomously develops and manages businesses to increase net worth. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
From f72afc7558b537ed0bf97da16acdba1f5ac6ddde Mon Sep 17 00:00:00 2001
From: Veylkh <41055376+Veylkh@users.noreply.github.com>
Date: Mon, 3 Apr 2023 14:28:22 +0200
Subject: [PATCH 37/57] dirtyjson -> json
Fixes the missing UserDict (caused by migration from python 3.9 to ^3.10)
---
requirements.txt | 1 -
scripts/json_parser.py | 13 ++++++-------
2 files changed, 6 insertions(+), 8 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index e731354b..22682e87 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,5 @@
beautifulsoup4
colorama==0.4.6
-dirtyjson==1.0.
openai==0.27.2
playsound==1.3.0
python-dotenv==1.0.0
diff --git a/scripts/json_parser.py b/scripts/json_parser.py
index 8154b584..11ff9ed2 100644
--- a/scripts/json_parser.py
+++ b/scripts/json_parser.py
@@ -1,4 +1,4 @@
-import dirtyjson
+import json
from call_ai_function import call_ai_function
from config import Config
cfg = Config()
@@ -24,7 +24,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
"""
try:
- return dirtyjson.loads(json_str)
+ return json.loads(json_str)
except Exception as e:
# Let's do something manually - sometimes GPT responds with something BEFORE the braces:
# "I'm sorry, I don't understand. Please try again."{"text": "I'm sorry, I don't understand. Please try again.", "confidence": 0.0}
@@ -34,21 +34,20 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
json_str = json_str[brace_index:]
last_brace_index = json_str.rindex("}")
json_str = json_str[:last_brace_index+1]
- return dirtyjson.loads(json_str)
+ return json.loads(json_str)
except Exception as e:
if try_to_fix_with_gpt:
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_str, json_schema, False)
if ai_fixed_json != "failed":
- return dirtyjson.loads(ai_fixed_json)
+ return json.loads(ai_fixed_json)
else:
print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
return json_str
else:
raise e
-# TODO: Make debug a global config var
def fix_json(json_str: str, schema: str, debug=False) -> str:
# Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
@@ -68,10 +67,10 @@ def fix_json(json_str: str, schema: str, debug=False) -> str:
print(f"Fixed JSON: {result_string}")
print("----------- END OF FIX ATTEMPT ----------------")
try:
- return dirtyjson.loads(result_string)
+ return json.loads(result_string)
except:
# Get the call stack:
# import traceback
# call_stack = traceback.format_exc()
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
- return "failed"
+ return "failed"
\ No newline at end of file
From 5c97a71c744957c3b9116b50df3d00e4c8e17715 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 13:39:05 +0100
Subject: [PATCH 38/57] ignores outputs.
---
.gitignore | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.gitignore b/.gitignore
index a4e3cc2d..6b8f00b5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,4 +7,5 @@ package-lock.json
scripts/auto_gpt_workspace/*
*.mpeg
.env
-last_run_ai_settings.yaml
\ No newline at end of file
+last_run_ai_settings.yaml
+outputs/*
\ No newline at end of file
From 7649ca2ca2c5a50862e040ad5be5f12c82588132 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 15:13:34 +0100
Subject: [PATCH 39/57] Adds missing googlesearch-python requirement.
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 22682e87..5f51bdf5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,5 +8,5 @@ readability-lxml==0.8.1
requests
tiktoken==0.3.3
docker
-# googlesearch-python
+googlesearch-python
# Googlesearch python seems to be a bit cursed, anyone good at fixing thigns like this?
\ No newline at end of file
From c8149b0415bc4551117dd1df51a479c6c54a7852 Mon Sep 17 00:00:00 2001
From: 0xcha05 <103983696+0xcha05@users.noreply.github.com>
Date: Mon, 3 Apr 2023 19:56:01 +0530
Subject: [PATCH 40/57] better arg parsing
---
README.md | 6 ++++--
scripts/main.py | 34 +++++++++++++++++++---------------
2 files changed, 23 insertions(+), 17 deletions(-)
diff --git a/README.md b/README.md
index f576fd5a..769ea766 100644
--- a/README.md
+++ b/README.md
@@ -96,7 +96,8 @@ python scripts/main.py
## 🗣️ Speech Mode
Use this to use TTS for Auto-GPT
```
-python scripts/main.py speak-mode
+python scripts/main.py --speak
+
```
## 💀 Continuous Mode ⚠️
@@ -107,7 +108,8 @@ Use at your own risk.
1. Run the `main.py` Python script in your terminal:
```
-python scripts/main.py continuous-mode
+python scripts/main.py --continuous
+
```
2. To exit the program, press Ctrl + C
diff --git a/scripts/main.py b/scripts/main.py
index 48093cf4..1ccd0364 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -15,10 +15,8 @@ from json_parser import fix_and_parse_json
from ai_config import AIConfig
import traceback
import yaml
+import argparse
-class Argument(Enum):
- CONTINUOUS_MODE = "continuous-mode"
- SPEAK_MODE = "speak-mode"
def print_to_console(
title,
@@ -251,21 +249,27 @@ def parse_arguments():
global cfg
cfg.set_continuous_mode(False)
cfg.set_speak_mode(False)
- for arg in sys.argv[1:]:
- if arg == Argument.CONTINUOUS_MODE.value:
- print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
- print_to_console(
- "WARNING: ",
- Fore.RED,
- "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
- cfg.set_continuous_mode(True)
- elif arg == Argument.SPEAK_MODE.value:
- print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
- cfg.set_speak_mode(True)
+
+ parser = argparse.ArgumentParser(description='Process arguments.')
+ parser.add_argument('--continuous', action='store_true', help='Enable Continuous Mode')
+ parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
+ args = parser.parse_args()
+
+ if args.continuous:
+ print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
+ print_to_console(
+ "WARNING: ",
+ Fore.RED,
+ "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
+ cfg.set_continuous_mode(True)
+
+ if args.speak:
+ print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
+ cfg.set_speak_mode(True)
+
-# TODO: Better argument parsing:
# TODO: fill in llm values here
cfg = Config()
From 51e293f64cf17efbbba18965037a8004387945a6 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Mon, 3 Apr 2023 15:46:46 +0100
Subject: [PATCH 41/57] Adds --gpt3only mode!
---
README.md | 7 ++++++-
scripts/main.py | 6 +++++-
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 769ea766..913731dd 100644
--- a/README.md
+++ b/README.md
@@ -105,7 +105,6 @@ Run the AI **without** user authorisation, 100% automated.
Continuous mode is not recommended.
It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise.
Use at your own risk.
-
1. Run the `main.py` Python script in your terminal:
```
python scripts/main.py --continuous
@@ -113,6 +112,12 @@ python scripts/main.py --continuous
```
2. To exit the program, press Ctrl + C
+## GPT3.5 ONLY Mode
+If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT!
+```
+python scripts/main.py --gpt3only
+```
+
## ⚠️ Limitations
This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
diff --git a/scripts/main.py b/scripts/main.py
index 1ccd0364..4e799743 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -253,6 +253,8 @@ def parse_arguments():
parser = argparse.ArgumentParser(description='Process arguments.')
parser.add_argument('--continuous', action='store_true', help='Enable Continuous Mode')
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
+ parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
+ parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
args = parser.parse_args()
if args.continuous:
@@ -267,7 +269,9 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True)
-
+ if args.gpt3only:
+ print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
+ cfg.set_smart_llm_model(cfg.fast_llm_model)
# TODO: fill in llm values here
From 4416aa1aa1c0dba9e37bf6a2694e28aa6bbedf88 Mon Sep 17 00:00:00 2001
From: yousefissa
Date: Mon, 3 Apr 2023 08:48:43 -0700
Subject: [PATCH 42/57] create file dir if it doesnt exist during write_to_file
---
scripts/file_operations.py | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/scripts/file_operations.py b/scripts/file_operations.py
index 62b3dc4b..d7c7a1b0 100644
--- a/scripts/file_operations.py
+++ b/scripts/file_operations.py
@@ -29,13 +29,14 @@ def read_file(filename):
def write_to_file(filename, text):
- try:
- filepath = safe_join(working_directory, filename)
- with open(filepath, "w") as f:
- f.write(text)
- return "File written to successfully."
- except Exception as e:
- return "Error: " + str(e)
+ filepath = safe_join(working_directory, filename)
+ directory = os.path.dirname(filepath)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ with open(filepath, "w") as f:
+ f.write(text)
+ return "File written to successfully."
+
def append_to_file(filename, text):
From 9ef4fab084633e4289226a6dd059a598085ec876 Mon Sep 17 00:00:00 2001
From: yousefissa
Date: Mon, 3 Apr 2023 08:50:07 -0700
Subject: [PATCH 43/57] error handling back
---
scripts/file_operations.py | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/scripts/file_operations.py b/scripts/file_operations.py
index d7c7a1b0..81ad4715 100644
--- a/scripts/file_operations.py
+++ b/scripts/file_operations.py
@@ -29,14 +29,16 @@ def read_file(filename):
def write_to_file(filename, text):
- filepath = safe_join(working_directory, filename)
- directory = os.path.dirname(filepath)
- if not os.path.exists(directory):
- os.makedirs(directory)
- with open(filepath, "w") as f:
- f.write(text)
- return "File written to successfully."
-
+ try:
+ filepath = safe_join(working_directory, filename)
+ directory = os.path.dirname(filepath)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ with open(filepath, "w") as f:
+ f.write(text)
+ return "File written to successfully."
+ except Exception as e:
+ return "Error: " + str(e)
def append_to_file(filename, text):
From 8a5c9800e7a2e782bf2b472741a3b272bf42b7d3 Mon Sep 17 00:00:00 2001
From: yousefissa
Date: Mon, 3 Apr 2023 13:00:50 -0700
Subject: [PATCH 44/57] fix assistant plan variables being referenced before
assignment
---
scripts/main.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/scripts/main.py b/scripts/main.py
index 4e799743..2304baf4 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -52,6 +52,10 @@ def print_assistant_thoughts(assistant_reply):
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
+ assistant_thoughts_reasoning = None
+ assistant_thoughts_plan = None
+ assistant_thoughts_speak = None
+ assistant_thoughts_criticism = None
try:
assistant_thoughts = assistant_reply_json.get("thoughts")
if assistant_thoughts:
@@ -66,7 +70,7 @@ def print_assistant_thoughts(assistant_reply):
assistant_thoughts_plan = None
assistant_thoughts_criticism = None
assistant_thoughts_speak = None
- except Exception as e:
+ except Exception:
assistant_thoughts_text = "The AI's response was unreadable."
print_to_console(
@@ -86,7 +90,7 @@ def print_assistant_thoughts(assistant_reply):
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dash
-
+
lines = assistant_thoughts_plan.split('\n')
# Iterate through the lines and print each one with a bullet
@@ -111,6 +115,7 @@ def print_assistant_thoughts(assistant_reply):
call_stack = traceback.format_exc()
print_to_console("Error: \n", Fore.RED, call_stack)
+
def load_variables(config_file="config.yaml"):
# Load variables from yaml file if it exists
try:
From f2ba7f21c510fabe226fac661df0e9112674708a Mon Sep 17 00:00:00 2001
From: russellocean
Date: Mon, 3 Apr 2023 16:44:10 -0400
Subject: [PATCH 45/57] Added support for Google Custom Search API This pull
request adds functionality to the project to allow for a choice between the
original Google search method and the Google Custom Search API. The
google_search method uses the original method of scraping the HTML from the
search results page, using googlesearch-python, while the
google_official_search method uses the Google Custom Search API to retrieve
search results.
How to test:
To test the functionality, ensure that you have valid API keys and search engine IDs for both the Google search method and the Google Custom Search API. You can set these values in your environment variables as described in the README.md file.
Additional Notes:
This pull request only adds functionality and makes improvements to existing code. No new features or major changes have been introduced.
---
.DS_Store | Bin 0 -> 8196 bytes
.env.template | 4 ---
README.md | 52 ++++++++++++++++++++++++++++++-----
auto_gpt_workspace/.DS_Store | Bin 0 -> 6148 bytes
requirements.txt | 1 +
scripts/commands.py | 45 +++++++++++++++++++++++++++++-
scripts/config.py | 11 ++++++--
7 files changed, 98 insertions(+), 15 deletions(-)
create mode 100644 .DS_Store
delete mode 100644 .env.template
create mode 100644 auto_gpt_workspace/.DS_Store
diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..2287363ff8124dfd2cf4b68bc7738e40b201829b
GIT binary patch
literal 8196
zcmeHM&2G~`5T0#QoU{T(6-WhvWQl7U(m$k%OPVH!O2DB(Z~zqSB(zo>JH$=`iX!C>
zyaKPlod@AvIKel&P82%{LLi~4-IaE~{h1kiznQf&4iSk`udze4MMP;-#_R@~1%=nS
z&XtvDW)mo&PbaQlw_V@HI6Eh;Co}_^0nLDBKr^5jxCjj3oy{e^G8Q8oE0m88G%^JMrZ6oF`tqkg(BuhVF~YGz%%BLR3RJ2hN-;!~IS8eW^I6cy@LGoTrmXMl6}6y>Q&9`)n-yZDH;FF$3GiY#|V
z4uuK19|<{QB1h-cBReilM>w=MW~{DHr5#xJ$fqtf6FOfLI-6v{atPTPbUNtOXdpFP
z6ku#oJ!wPS`x_hfAm~J2G``s#*9pQx;k#H_O|4xsjEu2iysn=6gKB7ny`W@uU-Iag
z@4NP-Z`seC=Af3ndF*?k<#^4mRB#$ih`f5?cnyD0@_Sw*kh)d1kjbPowd|eYaC>KW
z?=WW`6!u3sb9lI0$eH_l4@aX+`qupik4|eXx9$0#q<2U}k4Cz##?$sQdVj=NzuNU&
z-#ddm+ghhGIIU9)(b1*1@VuL3mAy&%8E@W(j0GRCN4oHl2ehDB!(0=yA^IMC7QpKt
zu+k=JD)f}9REbLyA|rNv19!}k(UENsm*yCVNoVL3c8{>nC%`=J
zrjgL#GcdcqyBwYL4%QvnYs-@pVn5s1jf*QYS04Jj(>_Ld^xiK#GqZNHOYJVym%PuO
zO#^pOhYd%bhQz)lOTUr6Fj0Q+k5QgrAra4O(mNcR34UZ;%<==L>!KOBS5T32oG!&r+1@W}twQ8$Y5HGRTgI6PZP^nE@G#Im`N$sH&@&NimK8UZQ
zGrL<+t2Ysq8JPVh^OI!14Z9ft5cNT)3Qz)o0xDt7#o-g7IO&w+tY-#M=zH`ah2$_&
z?I4=UX2)-2fY#1|1lrJt5Ze{
zbv1}PiLUicz;z35v$Q-KtyVWScgnI>sg28Ww6jqu%dO4r@z^aauB`7KHE*KZSl#P4
z2Z3LymU)X4ctGO=3wM4$j#PYwkxA7gn~@n{2AF}r&Vb$53dO%Z5q^JWfEoC9255hf
zsDz%y%%I*naL_FPVjkUE(59b8s*x5wi|~n
xW=UtQM7=~Mp}frCYY7_qD8^hmikDEepkI=K=vmARq6dY41T+miFatlzzzfnIS;hbW
literal 0
HcmV?d00001
diff --git a/requirements.txt b/requirements.txt
index 5f51bdf5..e1a98ee7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,4 +9,5 @@ requests
tiktoken==0.3.3
docker
googlesearch-python
+google-api-python-client # If using Google's Custom Search JSON API (https://developers.google.com/custom-search/v1/overview) Won't result in a 403 error
# Googlesearch python seems to be a bit cursed, anyone good at fixing thigns like this?
\ No newline at end of file
diff --git a/scripts/commands.py b/scripts/commands.py
index 2e332711..f94fdc41 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -10,6 +10,9 @@ from file_operations import read_file, write_to_file, append_to_file, delete_fil
from execute_code import execute_python_file
from json_parser import fix_and_parse_json
from googlesearch import search
+from googleapiclient.discovery import build
+from googleapiclient.errors import HttpError
+
cfg = Config()
@@ -44,7 +47,13 @@ def get_command(response):
def execute_command(command_name, arguments):
try:
if command_name == "google":
- return google_search(arguments["input"])
+ print("Using Google search method")
+ # Check if the Google API key is set and use the official search method
+ # If the API key is not set or has only whitespaces, use the unofficial search method
+ if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
+ return google_official_search(arguments["input"])
+ else:
+ return google_search(arguments["input"])
elif command_name == "memory_add":
return commit_memory(arguments["string"])
elif command_name == "memory_del":
@@ -108,6 +117,40 @@ def google_search(query, num_results=8):
return json.dumps(search_results, ensure_ascii=False, indent=4)
+def google_official_search(query, num_results=8):
+ from googleapiclient.discovery import build
+ from googleapiclient.errors import HttpError
+ import json
+
+ try:
+ # Get the Google API key and Custom Search Engine ID from the config file
+ api_key = cfg.google_api_key
+ custom_search_engine_id = cfg.custom_search_engine_id
+
+ # Initialize the Custom Search API service
+ service = build("customsearch", "v1", developerKey=api_key)
+
+ # Send the search query and retrieve the results
+ result = service.cse().list(q=query, cx=custom_search_engine_id, num=num_results).execute()
+
+ # Extract the search result items from the response
+ search_results = result.get("items", [])
+
+ # Create a list of only the URLs from the search results
+ search_results_links = [item["link"] for item in search_results]
+
+ except HttpError as e:
+ # Handle errors in the API call
+ error_details = json.loads(e.content.decode())
+
+ # Check if the error is related to an invalid or missing API key
+ if error_details.get("error", {}).get("code") == 403 and "invalid API key" in error_details.get("error", {}).get("message", ""):
+ return "Error: The provided Google API key is invalid or missing."
+ else:
+ return f"Error: {e}"
+
+ # Return the list of search result URLs
+ return search_results_links
def browse_website(url):
summary = get_text_summary(url)
diff --git a/scripts/config.py b/scripts/config.py
index 44d99bff..51e11757 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -35,11 +35,13 @@ class Config(metaclass=Singleton):
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
+
+ self.google_api_key = os.getenv("GOOGLE_API_KEY")
+ self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
-
def set_continuous_mode(self, value: bool):
self.continuous_mode = value
@@ -63,6 +65,9 @@ class Config(metaclass=Singleton):
def set_elevenlabs_api_key(self, value: str):
self.elevenlabs_api_key = value
-
-
+ def set_google_api_key(self, value: str):
+ self.google_api_key = value
+
+ def set_custom_search_engine_id(self, value: str):
+ self.custom_search_engine_id = value
\ No newline at end of file
From 30d07d9102b528ee7d0a324d860904f2211339b6 Mon Sep 17 00:00:00 2001
From: russellocean
Date: Mon, 3 Apr 2023 16:49:05 -0400
Subject: [PATCH 46/57] Added google-api-python-client to requirements.txt
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index e1a98ee7..063931a9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,5 +9,5 @@ requests
tiktoken==0.3.3
docker
googlesearch-python
-google-api-python-client # If using Google's Custom Search JSON API (https://developers.google.com/custom-search/v1/overview) Won't result in a 403 error
+google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
# Googlesearch python seems to be a bit cursed, anyone good at fixing thigns like this?
\ No newline at end of file
From cb6e8ee665deca39db20fde78a976693776d9aa3 Mon Sep 17 00:00:00 2001
From: russellocean
Date: Mon, 3 Apr 2023 16:58:55 -0400
Subject: [PATCH 47/57] Template update and Added .DS_Store to .gitignore
---
.DS_Store | Bin 8196 -> 0 bytes
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 .DS_Store
diff --git a/.DS_Store b/.DS_Store
deleted file mode 100644
index 2287363ff8124dfd2cf4b68bc7738e40b201829b..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 8196
zcmeHM&2G~`5T0#QoU{T(6-WhvWQl7U(m$k%OPVH!O2DB(Z~zqSB(zo>JH$=`iX!C>
zyaKPlod@AvIKel&P82%{LLi~4-IaE~{h1kiznQf&4iSk`udze4MMP;-#_R@~1%=nS
z&XtvDW)mo&PbaQlw_V@HI6Eh;Co}_^0nLDBKr^5jxCjj3oy{e^G8Q8oE0m88G%^JMrZ6oF`tqkg(BuhVF~YGz%%BLR3RJ2hN-;!~IS8eW^I6cy@LGoTrmXMl6}6y>Q&9`)n-yZDH;FF$3GiY#|V
z4uuK19|<{QB1h-cBReilM>w=MW~{DHr5#xJ$fqtf6FOfLI-6v{atPTPbUNtOXdpFP
z6ku#oJ!wPS`x_hfAm~J2G``s#*9pQx;k#H_O|4xsjEu2iysn=6gKB7ny`W@uU-Iag
z@4NP-Z`seC=Af3ndF*?k<#^4mRB#$ih`f5?cnyD0@_Sw*kh)d1kjbPowd|eYaC>KW
z?=WW`6!u3sb9lI0$eH_l4@aX+`qupik4|eXx9$0#q<2U}k4Cz##?$sQdVj=NzuNU&
z-#ddm+ghhGIIU9)(b1*1@VuL3mAy&%8E@W(j0GRCN4oHl2ehDB!(0=yA^IMC7QpKt
zu+k=JD)f}9REbLyA|rNv19!}k(UENsm*yCVNoVL3c8{>nC%`=J
zrjgL#GcdcqyBwYL4%QvnYs-@pVn5s1jf*QYS04Jj(>_Ld^xiK#GqZNHOYJVym%PuO
zO#^pOhYd%bhQz)lOTUr6Fj0Q+k5QgrAra4O(mNcR34UZ;%<==L>!KOB
Date: Mon, 3 Apr 2023 17:07:53 -0400
Subject: [PATCH 48/57] Delete .DS_Store
---
auto_gpt_workspace/.DS_Store | Bin 6148 -> 0 bytes
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 auto_gpt_workspace/.DS_Store
diff --git a/auto_gpt_workspace/.DS_Store b/auto_gpt_workspace/.DS_Store
deleted file mode 100644
index aa6e57de2aafac3a20e6c9f853a58e8eaf02c8e3..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 6148
zcmeHK%}T>S5T32oG!&r+1@W}twQ8$Y5HGRTgI6PZP^nE@G#Im`N$sH&@&NimK8UZQ
zGrL<+t2Ysq8JPVh^OI!14Z9ft5cNT)3Qz)o0xDt7#o-g7IO&w+tY-#M=zH`ah2$_&
z?I4=UX2)-2fY#1|1lrJt5Ze{
zbv1}PiLUicz;z35v$Q-KtyVWScgnI>sg28Ww6jqu%dO4r@z^aauB`7KHE*KZSl#P4
z2Z3LymU)X4ctGO=3wM4$j#PYwkxA7gn~@n{2AF}r&Vb$53dO%Z5q^JWfEoC9255hf
zsDz%y%%I*naL_FPVjkUE(59b8s*x5wi|~n
xW=UtQM7=~Mp}frCYY7_qD8^hmikDEepkI=K=vmARq6dY41T+miFatlzzzfnIS;hbW
From 064a2af9b594de65107abd4f00eb91079e14d51e Mon Sep 17 00:00:00 2001
From: russellocean
Date: Mon, 3 Apr 2023 17:29:55 -0400
Subject: [PATCH 49/57] Added back .env.template
---
.env.template | 6 ++++++
1 file changed, 6 insertions(+)
create mode 100644 .env.template
diff --git a/.env.template b/.env.template
new file mode 100644
index 00000000..c64d8502
--- /dev/null
+++ b/.env.template
@@ -0,0 +1,6 @@
+OPENAI_API_KEY=your-openai-api-key
+ELEVENLABS_API_KEY=your-elevenlabs-api-key
+SMART_LLM_MODEL="gpt-4"
+FAST_LLM_MODEL="gpt-3.5-turbo"
+GOOGLE_API_KEY=
+CUSTOM_SEARCH_ENGINE_ID=
\ No newline at end of file
From 79f0882dfcf50bc5ab9ca29e2ed3eb628650facc Mon Sep 17 00:00:00 2001
From: kminer
Date: Mon, 3 Apr 2023 15:35:01 -0600
Subject: [PATCH 50/57] fix: OpenAPI key variable name typo
---
scripts/config.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/config.py b/scripts/config.py
index 44d99bff..8a9ba00c 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -59,7 +59,7 @@ class Config(metaclass=Singleton):
self.smart_token_limit = value
def set_openai_api_key(self, value: str):
- self.apiopenai_api_key_key = value
+ self.openai_api_key = value
def set_elevenlabs_api_key(self, value: str):
self.elevenlabs_api_key = value
From 04c43432c24da884e476e996dda3269f65f446e4 Mon Sep 17 00:00:00 2001
From: yousefissa
Date: Mon, 3 Apr 2023 14:53:19 -0700
Subject: [PATCH 51/57] cleanup method
---
scripts/main.py | 66 ++++++++++++++++---------------------------------
1 file changed, 21 insertions(+), 45 deletions(-)
diff --git a/scripts/main.py b/scripts/main.py
index 2304baf4..01b972c5 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -49,68 +49,44 @@ def print_assistant_thoughts(assistant_reply):
global ai_name
global cfg
try:
- # Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
- try:
- assistant_thoughts = assistant_reply_json.get("thoughts")
- if assistant_thoughts:
- assistant_thoughts_text = assistant_thoughts.get("text")
- assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
- assistant_thoughts_plan = assistant_thoughts.get("plan")
- assistant_thoughts_criticism = assistant_thoughts.get("criticism")
- assistant_thoughts_speak = assistant_thoughts.get("speak")
- else:
- assistant_thoughts_text = None
- assistant_thoughts_reasoning = None
- assistant_thoughts_plan = None
- assistant_thoughts_criticism = None
- assistant_thoughts_speak = None
- except Exception:
- assistant_thoughts_text = "The AI's response was unreadable."
+ assistant_thoughts = assistant_reply_json.get("thoughts", {})
+ assistant_thoughts_text = assistant_thoughts.get("text")
+
+ if assistant_thoughts:
+ assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
+ assistant_thoughts_plan = assistant_thoughts.get("plan")
+ assistant_thoughts_criticism = assistant_thoughts.get("criticism")
+ assistant_thoughts_speak = assistant_thoughts.get("speak")
+
+ print_to_console(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
+ print_to_console("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
- print_to_console(
- f"{ai_name.upper()} THOUGHTS:",
- Fore.YELLOW,
- assistant_thoughts_text)
- print_to_console(
- "REASONING:",
- Fore.YELLOW,
- assistant_thoughts_reasoning)
if assistant_thoughts_plan:
print_to_console("PLAN:", Fore.YELLOW, "")
- if assistant_thoughts_plan:
- # If it's a list, join it into a string
- if isinstance(assistant_thoughts_plan, list):
- assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
- elif isinstance(assistant_thoughts_plan, dict):
- assistant_thoughts_plan = str(assistant_thoughts_plan)
- # Split the input_string using the newline character and dash
+ if isinstance(assistant_thoughts_plan, list):
+ assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
+ elif isinstance(assistant_thoughts_plan, dict):
+ assistant_thoughts_plan = str(assistant_thoughts_plan)
- lines = assistant_thoughts_plan.split('\n')
+ lines = assistant_thoughts_plan.split('\n')
+ for line in lines:
+ line = line.lstrip("- ")
+ print_to_console("- ", Fore.GREEN, line.strip())
- # Iterate through the lines and print each one with a bullet
- # point
- for line in lines:
- # Remove any "-" characters from the start of the line
- line = line.lstrip("- ")
- print_to_console("- ", Fore.GREEN, line.strip())
- print_to_console(
- "CRITICISM:",
- Fore.YELLOW,
- assistant_thoughts_criticism)
+ print_to_console("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
- # Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak:
speak.say_text(assistant_thoughts_speak)
except json.decoder.JSONDecodeError:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
- # All other errors, return "Error: + error message"
+
except Exception as e:
call_stack = traceback.format_exc()
print_to_console("Error: \n", Fore.RED, call_stack)
From 8753eba22cd23b0ceff97f95a27f3ff43929176f Mon Sep 17 00:00:00 2001
From: yousefissa
Date: Mon, 3 Apr 2023 14:55:30 -0700
Subject: [PATCH 52/57] comments
---
scripts/main.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/scripts/main.py b/scripts/main.py
index 01b972c5..97c05d7d 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -49,6 +49,7 @@ def print_assistant_thoughts(assistant_reply):
global ai_name
global cfg
try:
+ # Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
assistant_thoughts_reasoning = None
@@ -69,24 +70,27 @@ def print_assistant_thoughts(assistant_reply):
if assistant_thoughts_plan:
print_to_console("PLAN:", Fore.YELLOW, "")
+ # If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
+ # Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split('\n')
for line in lines:
line = line.lstrip("- ")
print_to_console("- ", Fore.GREEN, line.strip())
print_to_console("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
-
+ # Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak:
speak.say_text(assistant_thoughts_speak)
except json.decoder.JSONDecodeError:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
+ # All other errors, return "Error: + error message"
except Exception as e:
call_stack = traceback.format_exc()
print_to_console("Error: \n", Fore.RED, call_stack)
From 7e529e19d9b97c2e99720bd6f1bc829f7c5e8597 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Tue, 4 Apr 2023 00:24:22 +0100
Subject: [PATCH 53/57] Removes print.
---
scripts/commands.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/commands.py b/scripts/commands.py
index f94fdc41..8ad95336 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -47,7 +47,7 @@ def get_command(response):
def execute_command(command_name, arguments):
try:
if command_name == "google":
- print("Using Google search method")
+
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial search method
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
From 2af9cf853aac6b91a7fac26dfccc28d1c6bfa4cb Mon Sep 17 00:00:00 2001
From: Preston Jensen
Date: Mon, 3 Apr 2023 20:12:11 -0600
Subject: [PATCH 54/57] human feedback in manual mode
---
scripts/main.py | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/scripts/main.py b/scripts/main.py
index 97c05d7d..03f656b0 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -303,7 +303,7 @@ while True:
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
- "Enter 'y' to authorise command or 'n' to exit program...",
+ f"Enter 'y' to authorise command or 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
@@ -314,16 +314,18 @@ while True:
user_input = "EXIT"
break
else:
- continue
+ user_input = console_input
+ command_name = "human_feedback"
+ break
- if user_input != "GENERATE NEXT COMMAND JSON":
- print("Exiting...", flush=True)
- break
-
- print_to_console(
+ if user_input == "GENERATE NEXT COMMAND JSON":
+ print_to_console(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"")
+ elif user_input == "EXIT":
+ print("Exiting...", flush=True)
+ break
else:
# Print command
print_to_console(
@@ -332,10 +334,12 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
- if command_name.lower() != "error":
- result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
- else:
+ if command_name.lower() == "error":
result = f"Command {command_name} threw the following error: " + arguments
+ elif command_name == "human_feedback":
+ result = f"Human feedback: {user_input}"
+ else:
+ result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
# Check if there's a result from the command append it to the message
# history
@@ -347,3 +351,4 @@ while True:
chat.create_chat_message(
"system", "Unable to execute command"))
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
+
From 82da7f1681a1c2cb5dad310e396589b8a3d3ce71 Mon Sep 17 00:00:00 2001
From: yousefissa
Date: Mon, 3 Apr 2023 22:19:32 -0700
Subject: [PATCH 55/57] fix assistant thoughts failure on string type
---
scripts/main.py | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/scripts/main.py b/scripts/main.py
index 97c05d7d..a1c08018 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -52,6 +52,14 @@ def print_assistant_thoughts(assistant_reply):
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
+ # Check if assistant_reply_json is a string and attempt to parse it into a JSON object
+ if isinstance(assistant_reply_json, str):
+ try:
+ assistant_reply_json = json.loads(assistant_reply_json)
+ except json.JSONDecodeError as e:
+ print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
+ assistant_reply_json = {}
+
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
@@ -90,10 +98,6 @@ def print_assistant_thoughts(assistant_reply):
except json.decoder.JSONDecodeError:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
- # All other errors, return "Error: + error message"
- except Exception as e:
- call_stack = traceback.format_exc()
- print_to_console("Error: \n", Fore.RED, call_stack)
def load_variables(config_file="config.yaml"):
From 570c161e5e78a7edee74aacfc1164efa26680d1d Mon Sep 17 00:00:00 2001
From: yousefissa
Date: Mon, 3 Apr 2023 22:21:42 -0700
Subject: [PATCH 56/57] add final exception handling back
---
scripts/main.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/scripts/main.py b/scripts/main.py
index a1c08018..86afec39 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -98,6 +98,10 @@ def print_assistant_thoughts(assistant_reply):
except json.decoder.JSONDecodeError:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
+ # All other errors, return "Error: + error message"
+ except Exception as e:
+ call_stack = traceback.format_exc()
+ print_to_console("Error: \n", Fore.RED, call_stack)
def load_variables(config_file="config.yaml"):
From 4650882d971a8edd2ac3ae29eef9bbdb210baeeb Mon Sep 17 00:00:00 2001
From: Zhaofeng Miao <522856232@qq.com>
Date: Tue, 4 Apr 2023 16:37:23 +0800
Subject: [PATCH 57/57] fix(prompt): fix typos
---
scripts/data/prompt.txt | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt
index d17fa27a..2cb73a8d 100644
--- a/scripts/data/prompt.txt
+++ b/scripts/data/prompt.txt
@@ -1,6 +1,6 @@
CONSTRAINTS:
-1. ~4000 word limit for memory. Your memory is short, so immidiately save important information to long term memory and code to files.
+1. ~4000 word limit for memory. Your memory is short, so immediately save important information to long term memory and code to files.
2. No user assistance
COMMANDS:
@@ -34,9 +34,9 @@ RESOURCES:
PERFORMANCE EVALUATION:
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
-2. Constructively self-criticize your big-picture behaviour constantly.
+2. Constructively self-criticize your big-picture behavior constantly.
3. Reflect on past decisions and strategies to refine your approach.
-4. Every command has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.
+4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
You should only respond in JSON format as described below
@@ -58,4 +58,4 @@ RESPONSE FORMAT:
}
}
-Ensure the response can be parsed by Python json.loads
\ No newline at end of file
+Ensure the response can be parsed by Python json.loads