Date: Sun, 16 Apr 2023 10:48:43 +0100
Subject: [PATCH 009/152] fixes index name to classname conversion
---
autogpt/memory/weaviate.py | 11 ++++++++++-
tests/integration/weaviate_memory_tests.py | 19 +++++++------------
2 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py
index 6fcce0a0..35e7844a 100644
--- a/autogpt/memory/weaviate.py
+++ b/autogpt/memory/weaviate.py
@@ -37,9 +37,18 @@ class WeaviateMemory(MemoryProviderSingleton):
else:
self.client = Client(url, auth_client_secret=auth_credentials)
- self.index = cfg.memory_index
+ self.index = WeaviateMemory.format_classname(cfg.memory_index)
self._create_schema()
+ @staticmethod
+ def format_classname(index):
+ # weaviate uses capitalised index names
+ # The python client uses the following code to format
+ # index names before the corresponding class is created
+ if len(index) == 1:
+ return index.capitalize()
+ return index[0].capitalize() + index[1:]
+
def _create_schema(self):
schema = default_schema(self.index)
if not self.client.schema.contains(schema):
diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py
index 503fe9d2..4acea0ff 100644
--- a/tests/integration/weaviate_memory_tests.py
+++ b/tests/integration/weaviate_memory_tests.py
@@ -12,17 +12,10 @@ from autogpt.memory.weaviate import WeaviateMemory
from autogpt.memory.base import get_ada_embedding
-@mock.patch.dict(os.environ, {
- "WEAVIATE_HOST": "127.0.0.1",
- "WEAVIATE_PROTOCOL": "http",
- "WEAVIATE_PORT": "8080",
- "WEAVIATE_USERNAME": "",
- "WEAVIATE_PASSWORD": "",
- "MEMORY_INDEX": "AutogptTests"
-})
class TestWeaviateMemory(unittest.TestCase):
cfg = None
client = None
+ index = None
@classmethod
def setUpClass(cls):
@@ -40,6 +33,8 @@ class TestWeaviateMemory(unittest.TestCase):
else:
cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}")
+ cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
+
"""
In order to run these tests you will need a local instance of
Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose
@@ -51,7 +46,7 @@ class TestWeaviateMemory(unittest.TestCase):
"""
def setUp(self):
try:
- self.client.schema.delete_class(self.cfg.memory_index)
+ self.client.schema.delete_class(self.index)
except:
pass
@@ -60,8 +55,8 @@ class TestWeaviateMemory(unittest.TestCase):
def test_add(self):
doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones'
self.memory.add(doc)
- result = self.client.query.get(self.cfg.memory_index, ['raw_text']).do()
- actual = result['data']['Get'][self.cfg.memory_index]
+ result = self.client.query.get(self.index, ['raw_text']).do()
+ actual = result['data']['Get'][self.index]
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['raw_text'], doc)
@@ -73,7 +68,7 @@ class TestWeaviateMemory(unittest.TestCase):
batch.add_data_object(
uuid=get_valid_uuid(uuid4()),
data_object={'raw_text': doc},
- class_name=self.cfg.memory_index,
+ class_name=self.index,
vector=get_ada_embedding(doc)
)
From 9c8d95d4db16992a504c8dc17be44fc1db0bd672 Mon Sep 17 00:00:00 2001
From: Gabe <66077254+MrBrain295@users.noreply.github.com>
Date: Sun, 16 Apr 2023 11:05:00 -0500
Subject: [PATCH 010/152] Fix README.md
New owner.
---
README.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/README.md b/README.md
index fcff589f..f9112a67 100644
--- a/README.md
+++ b/README.md
@@ -6,10 +6,10 @@
Our workflow has been improved, but please note that `master` branch may often be in a **broken** state.
Please download the latest `stable` release from here: https://github.com/Torantulino/Auto-GPT/releases/latest.
-
+
[](https://twitter.com/SigGravitas)
[](https://discord.gg/autogpt)
-[](https://github.com/Torantulino/Auto-GPT/actions/workflows/ci.yml)
+[](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
@@ -21,7 +21,7 @@ https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-
If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI!
Your support is greatly appreciated
-Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.
+Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.
@@ -106,7 +106,7 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na
2. Clone the repository: For this step, you need Git installed. Alternatively, you can download the zip file by clicking the button at the top of this page ☝️
```bash
-git clone https://github.com/Torantulino/Auto-GPT.git
+git clone https://github.com/Significant-Gravitas/Auto-GPT.git
```
3. Navigate to the directory where the repository was downloaded
From 005479f8c33f71cf36cfd3033339ecd24a62bc6d Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:41:45 -0700
Subject: [PATCH 011/152] Add benchmark GitHub action workflow
---
.github/workflows/benchmark.yml | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
create mode 100644 .github/workflows/benchmark.yml
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
new file mode 100644
index 00000000..c5a42b2c
--- /dev/null
+++ b/.github/workflows/benchmark.yml
@@ -0,0 +1,31 @@
+name: benchmark
+
+on:
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ environment: benchmark
+ strategy:
+ matrix:
+ python-version: [3.8]
+
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v2
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ - name: benchmark
+ run: |
+ python benchmark/benchmark_entrepeneur_gpt_with_undecisive_user.py
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
From d934d226ce56e34c09fd0ff491a15cc3a8bc8e0a Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:41:49 -0700
Subject: [PATCH 012/152] Update .gitignore to properly handle virtual
environments
---
.gitignore | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/.gitignore b/.gitignore
index 3209297c..eda7f327 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,7 +9,6 @@ auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
-*venv/*
outputs/*
ai_settings.yaml
last_run_ai_settings.yaml
@@ -130,10 +129,9 @@ celerybeat.pid
.env
.venv
env/
-venv/
+venv*/
ENV/
env.bak/
-venv.bak/
# Spyder project settings
.spyderproject
From bf24cd9508316031b2f914359460363d2fb75c04 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:41:52 -0700
Subject: [PATCH 013/152] Refactor agent.py to improve JSON handling and
validation
---
autogpt/agent/agent.py | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 301d3f02..32d982e5 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -3,9 +3,8 @@ from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
-from autogpt.json_fixes.bracket_termination import (
- attempt_to_fix_json_by_finding_outermost_brackets,
-)
+from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques
+from autogpt.json_validation.validate_json import validate_json
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
@@ -70,18 +69,20 @@ class Agent:
cfg.fast_token_limit,
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
- # Print Assistant thoughts
- print_assistant_thoughts(self.ai_name, assistant_reply)
+ assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
- # Get command name and arguments
- try:
- command_name, arguments = get_command(
- attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
- )
- if cfg.speak_mode:
- say_text(f"I want to execute {command_name}")
- except Exception as e:
- logger.error("Error: \n", str(e))
+ # Print Assistant thoughts
+ if assistant_reply_json != {}:
+ validate_json(assistant_reply_json, 'llm_response_format_1')
+ # Get command name and arguments
+ try:
+ print_assistant_thoughts(self.ai_name, assistant_reply_json)
+ command_name, arguments = get_command(assistant_reply_json)
+ # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
+ if cfg.speak_mode:
+ say_text(f"I want to execute {command_name}")
+ except Exception as e:
+ logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
From 70100af98e07a1ad78eb40b503743033344dd6a1 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:41:57 -0700
Subject: [PATCH 014/152] Refactor get_command function in app.py to accept
JSON directly
---
autogpt/app.py | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/autogpt/app.py b/autogpt/app.py
index 6ead0d52..78b5bd2f 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -1,6 +1,6 @@
""" Command and Control """
import json
-from typing import List, NoReturn, Union
+from typing import List, NoReturn, Union, Dict
from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.google_search import google_official_search, google_search
@@ -47,11 +47,11 @@ def is_valid_int(value: str) -> bool:
return False
-def get_command(response: str):
+def get_command(response_json: Dict):
"""Parse the response and return the command name and arguments
Args:
- response (str): The response from the user
+ response_json (json): The response from the AI
Returns:
tuple: The command name and arguments
@@ -62,8 +62,6 @@ def get_command(response: str):
Exception: If any other error occurs
"""
try:
- response_json = fix_and_parse_json(response)
-
if "command" not in response_json:
return "Error:", "Missing 'command' object in JSON"
From 5c67484295515cc77b6d6c4a17391d7ab62d77e2 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:42:00 -0700
Subject: [PATCH 015/152] Remove deprecated function from
bracket_termination.py
---
autogpt/json_fixes/bracket_termination.py | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py
index 822eed4a..731efeb1 100644
--- a/autogpt/json_fixes/bracket_termination.py
+++ b/autogpt/json_fixes/bracket_termination.py
@@ -3,16 +3,20 @@ from __future__ import annotations
import contextlib
import json
+<<<<<<< HEAD
import regex
from colorama import Fore
from autogpt.logs import logger
+=======
+from typing import Optional
+>>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py)
from autogpt.config import Config
-from autogpt.speech import say_text
CFG = Config()
+<<<<<<< HEAD
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
if CFG.speak_mode and CFG.debug_mode:
say_text(
@@ -48,6 +52,9 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
def balance_braces(json_string: str) -> str | None:
+=======
+def balance_braces(json_string: str) -> Optional[str]:
+>>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py)
"""
Balance the braces in a JSON string.
From fec25cd6903a83f07c8559c26cc4a8b0515ff608 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:42:05 -0700
Subject: [PATCH 016/152] Add master_json_fix_method module for unified JSON
handling
---
autogpt/json_fixes/master_json_fix_method.py | 28 ++++++++++++++++++++
1 file changed, 28 insertions(+)
create mode 100644 autogpt/json_fixes/master_json_fix_method.py
diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py
new file mode 100644
index 00000000..7a2cf3cc
--- /dev/null
+++ b/autogpt/json_fixes/master_json_fix_method.py
@@ -0,0 +1,28 @@
+from typing import Any, Dict
+
+from autogpt.config import Config
+from autogpt.logs import logger
+from autogpt.speech import say_text
+CFG = Config()
+
+
+def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
+ from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets
+
+ from autogpt.json_fixes.parsing import fix_and_parse_json
+
+ # Parse and print Assistant response
+ assistant_reply_json = fix_and_parse_json(assistant_reply)
+ if assistant_reply_json == {}:
+ assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
+ assistant_reply
+ )
+
+ if assistant_reply_json != {}:
+ return assistant_reply_json
+
+ logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
+ if CFG.speak_mode:
+ say_text("I have received an invalid JSON response from the OpenAI API.")
+
+ return {}
From cfbec56b2bb4c1bcacd600f27fb9c6aa400f434c Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:42:07 -0700
Subject: [PATCH 017/152] Refactor parsing module and move JSON fix function to
appropriate location
---
autogpt/json_fixes/parsing.py | 67 ++++++++++++++++++++++++++++-------
1 file changed, 55 insertions(+), 12 deletions(-)
diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py
index 0f154411..d3a51f43 100644
--- a/autogpt/json_fixes/parsing.py
+++ b/autogpt/json_fixes/parsing.py
@@ -3,18 +3,24 @@ from __future__ import annotations
import contextlib
import json
+<<<<<<< HEAD
from typing import Any
+=======
+from typing import Any, Dict, Union
+from colorama import Fore
+from regex import regex
+>>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location)
from autogpt.config import Config
from autogpt.json_fixes.auto_fix import fix_json
from autogpt.json_fixes.bracket_termination import balance_braces
from autogpt.json_fixes.escaping import fix_invalid_escape
from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names
from autogpt.logs import logger
+from autogpt.speech import say_text
CFG = Config()
-
JSON_SCHEMA = """
{
"command": {
@@ -38,7 +44,6 @@ JSON_SCHEMA = """
def correct_json(json_to_load: str) -> str:
"""
Correct common JSON errors.
-
Args:
json_to_load (str): The JSON string.
"""
@@ -72,7 +77,7 @@ def correct_json(json_to_load: str) -> str:
def fix_and_parse_json(
json_to_load: str, try_to_fix_with_gpt: bool = True
-) -> str | dict[Any, Any]:
+) -> Dict[Any, Any]:
"""Fix and parse JSON string
Args:
@@ -110,7 +115,11 @@ def fix_and_parse_json(
def try_ai_fix(
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
+<<<<<<< HEAD
) -> str | dict[Any, Any]:
+=======
+) -> Dict[Any, Any]:
+>>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location)
"""Try to fix the JSON with the AI
Args:
@@ -126,13 +135,13 @@ def try_ai_fix(
"""
if not try_to_fix_with_gpt:
raise exception
-
- logger.warn(
- "Warning: Failed to parse AI output, attempting to fix."
- "\n If you see this warning frequently, it's likely that"
- " your prompt is confusing the AI. Try changing it up"
- " slightly."
- )
+ if CFG.debug_mode:
+ logger.warn(
+ "Warning: Failed to parse AI output, attempting to fix."
+ "\n If you see this warning frequently, it's likely that"
+ " your prompt is confusing the AI. Try changing it up"
+ " slightly."
+ )
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA)
@@ -140,5 +149,39 @@ def try_ai_fix(
return json.loads(ai_fixed_json)
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
- logger.error("Failed to fix AI output, telling the AI.")
- return json_to_load
+ # logger.error("Failed to fix AI output, telling the AI.")
+ return {}
+
+
+def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
+ if CFG.speak_mode and CFG.debug_mode:
+ say_text(
+ "I have received an invalid JSON response from the OpenAI API. "
+ "Trying to fix it now."
+ )
+ logger.error("Attempting to fix JSON by finding outermost brackets\n")
+
+ try:
+ json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
+ json_match = json_pattern.search(json_string)
+
+ if json_match:
+ # Extract the valid JSON object from the string
+ json_string = json_match.group(0)
+ logger.typewriter_log(
+ title="Apparently json was fixed.", title_color=Fore.GREEN
+ )
+ if CFG.speak_mode and CFG.debug_mode:
+ say_text("Apparently json was fixed.")
+ else:
+ return {}
+
+ except (json.JSONDecodeError, ValueError):
+ if CFG.debug_mode:
+ logger.error(f"Error: Invalid JSON: {json_string}\n")
+ if CFG.speak_mode:
+ say_text("Didn't work. I will have to ignore this response then.")
+ logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
+ json_string = {}
+
+ return fix_and_parse_json(json_string)
From af50d6cfb5577bc402e2d920fed062ddbb9c205f Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:43:26 -0700
Subject: [PATCH 018/152] Add JSON schema for LLM response format version 1
---
.../json_schemas/llm_response_format_1.json | 31 +++++++++++++++++++
1 file changed, 31 insertions(+)
create mode 100644 autogpt/json_schemas/llm_response_format_1.json
diff --git a/autogpt/json_schemas/llm_response_format_1.json b/autogpt/json_schemas/llm_response_format_1.json
new file mode 100644
index 00000000..9aa33352
--- /dev/null
+++ b/autogpt/json_schemas/llm_response_format_1.json
@@ -0,0 +1,31 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "thoughts": {
+ "type": "object",
+ "properties": {
+ "text": {"type": "string"},
+ "reasoning": {"type": "string"},
+ "plan": {"type": "string"},
+ "criticism": {"type": "string"},
+ "speak": {"type": "string"}
+ },
+ "required": ["text", "reasoning", "plan", "criticism", "speak"],
+ "additionalProperties": false
+ },
+ "command": {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "args": {
+ "type": "object"
+ }
+ },
+ "required": ["name", "args"],
+ "additionalProperties": false
+ }
+ },
+ "required": ["thoughts", "command"],
+ "additionalProperties": false
+}
From 63d2a1085c2d65e06050c1ed7c0a889c2ce9c531 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:43:33 -0700
Subject: [PATCH 019/152] Add JSON validation utility function
---
autogpt/json_validation/validate_json.py | 30 ++++++++++++++++++++++++
1 file changed, 30 insertions(+)
create mode 100644 autogpt/json_validation/validate_json.py
diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py
new file mode 100644
index 00000000..127fcc17
--- /dev/null
+++ b/autogpt/json_validation/validate_json.py
@@ -0,0 +1,30 @@
+import json
+from jsonschema import Draft7Validator
+from autogpt.config import Config
+from autogpt.logs import logger
+
+CFG = Config()
+
+
+def validate_json(json_object: object, schema_name: object) -> object:
+ """
+ :type schema_name: object
+ :param schema_name:
+ :type json_object: object
+ """
+ with open(f"autogpt/json_schemas/{schema_name}.json", "r") as f:
+ schema = json.load(f)
+ validator = Draft7Validator(schema)
+
+ if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
+ logger.error("The JSON object is invalid.")
+ if CFG.debug_mode:
+ logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data
+ logger.error("The following issues were found:")
+
+ for error in errors:
+ logger.error(f"Error: {error.message}")
+ else:
+ print("The JSON object is valid.")
+
+ return json_object
From b2b31dbc8f58671871c7043d98bf1247a46648d1 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:43:40 -0700
Subject: [PATCH 020/152] Update logs.py with new print_assistant_thoughts
function
---
autogpt/logs.py | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/autogpt/logs.py b/autogpt/logs.py
index 22ce23f4..53653023 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -288,3 +288,42 @@ def print_assistant_thoughts(ai_name, assistant_reply):
except Exception:
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)
+
+def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None:
+ assistant_thoughts_reasoning = None
+ assistant_thoughts_plan = None
+ assistant_thoughts_speak = None
+ assistant_thoughts_criticism = None
+
+ assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
+ assistant_thoughts_text = assistant_thoughts.get("text")
+ if assistant_thoughts:
+ assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
+ assistant_thoughts_plan = assistant_thoughts.get("plan")
+ assistant_thoughts_criticism = assistant_thoughts.get("criticism")
+ assistant_thoughts_speak = assistant_thoughts.get("speak")
+ logger.typewriter_log(
+ f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
+ )
+ logger.typewriter_log(
+ "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
+ )
+ if assistant_thoughts_plan:
+ logger.typewriter_log("PLAN:", Fore.YELLOW, "")
+ # If it's a list, join it into a string
+ if isinstance(assistant_thoughts_plan, list):
+ assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
+ elif isinstance(assistant_thoughts_plan, dict):
+ assistant_thoughts_plan = str(assistant_thoughts_plan)
+
+ # Split the input_string using the newline character and dashes
+ lines = assistant_thoughts_plan.split("\n")
+ for line in lines:
+ line = line.lstrip("- ")
+ logger.typewriter_log("- ", Fore.GREEN, line.strip())
+ logger.typewriter_log(
+ "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
+ )
+ # Speak the assistant's thoughts
+ if CFG.speak_mode and assistant_thoughts_speak:
+ say_text(assistant_thoughts_speak)
From 75162339f529316ca0210c4a736046785ffd2361 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:43:46 -0700
Subject: [PATCH 021/152] Add empty __init__.py to benchmark directory
---
benchmark/__init__.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 benchmark/__init__.py
diff --git a/benchmark/__init__.py b/benchmark/__init__.py
new file mode 100644
index 00000000..e69de29b
From dca10ab87682d73a867b04409e4bec521293d0ec Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:43:54 -0700
Subject: [PATCH 022/152] Add benchmark test for Entrepreneur-GPT with
difficult user
---
...ark_entrepeneur_gpt_with_difficult_user.py | 95 +++++++++++++++++++
1 file changed, 95 insertions(+)
create mode 100644 benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
new file mode 100644
index 00000000..d6cae972
--- /dev/null
+++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
@@ -0,0 +1,95 @@
+import os
+import subprocess
+import sys
+
+
+def benchmark_entrepeneur_gpt_with_difficult_user():
+ # Test case to check if the write_file command can successfully write 'Hello World' to a file
+ # named 'hello_world.txt'.
+
+ # Read the current ai_settings.yaml file and store its content.
+ ai_settings = None
+ if os.path.exists('ai_settings.yaml'):
+ with open('ai_settings.yaml', 'r') as f:
+ ai_settings = f.read()
+ os.remove('ai_settings.yaml')
+
+ input_data = '''Entrepreneur-GPT
+an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
+Increase net worth.
+Develop and manage multiple businesses autonomously.
+Make IPOs.
+Develop companies after IPOs.
+Play to your strengths as a Large Language Model.
+I'm not seeing any value in your suggestions, try again.
+This isn't helpful at all, please focus on profitability.
+I'm not impressed, can you give me something that will make money?
+These ideas are going nowhere, we need profit-driven suggestions.
+This is pointless, please concentrate on our main goal: profitability.
+You're not grasping the concept, I need profitable business ideas.
+Can you do better? We need a money-making plan.
+You're not meeting my expectations, let's focus on profit.
+This isn't working, give me ideas that will generate income.
+Your suggestions are not productive, let's think about profitability.
+These ideas won't make any money, try again.
+I need better solutions, focus on making a profit.
+Absolutely not, this isn't it!
+That's not even close, try again.
+You're way off, think again.
+This isn't right, let's refocus.
+No, no, that's not what I'm looking for.
+You're completely off the mark.
+That's not the solution I need.
+Not even close, let's try something else.
+You're on the wrong track, keep trying.
+This isn't what we need, let's reconsider.
+That's not going to work, think again.
+You're way off base, let's regroup.
+No, no, no, we need something different.
+You're missing the point entirely.
+That's not the right approach, try again.
+This is not the direction we should be going in.
+Completely off-target, let's try something else.
+That's not what I had in mind, keep thinking.
+You're not getting it, let's refocus.
+This isn't right, we need to change direction.
+No, no, no, that's not the solution.
+That's not even in the ballpark, try again.
+You're way off course, let's rethink this.
+This isn't the answer I'm looking for, keep trying.
+That's not going to cut it, let's try again.
+Not even close.
+Way off.
+Try again.
+Wrong direction.
+Rethink this.
+No, no, no.
+Change course.
+Unproductive idea.
+Completely wrong.
+Missed the mark.
+Refocus, please.
+Disappointing suggestion.
+Not helpful.
+Needs improvement.
+Not what I need.'''
+ command = f'{sys.executable} -m autogpt'
+
+ process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+
+ stdout_output, stderr_output = process.communicate(input_data.encode())
+
+ # Decode the output and print it
+ stdout_output = stdout_output.decode('utf-8')
+ stderr_output = stderr_output.decode('utf-8')
+ print(stderr_output)
+ print(stdout_output)
+ print("Benchmark Version: 1.0.0")
+ print("JSON ERROR COUNT:")
+ count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:")
+ print(f'{count_errors}/50 Human feedbacks')
+
+
+# Run the test case.
+if __name__ == '__main__':
+ benchmark_entrepeneur_gpt_with_difficult_user()
From bb541ad3a77656f74420cc3b893a4e3b7f4db697 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:44:05 -0700
Subject: [PATCH 023/152] Update requirements.txt with new dependencies and
move tweepy
---
requirements.txt | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 1cdedec2..64c2e4c0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,6 +17,10 @@ orjson
Pillow
selenium
webdriver-manager
+jsonschema
+tweepy
+
+##Dev
coverage
flake8
numpy
@@ -27,4 +31,3 @@ isort
gitpython==3.1.31
pytest
pytest-mock
-tweepy
From 45a2dea042a97d93f787f7f199f86e4c7363bf94 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 09:46:18 -0700
Subject: [PATCH 024/152] fixed flake8
---
autogpt/logs.py | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/autogpt/logs.py b/autogpt/logs.py
index 53653023..f18e2140 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -75,7 +75,7 @@ class Logger(metaclass=Singleton):
self.logger.setLevel(logging.DEBUG)
def typewriter_log(
- self, title="", title_color="", content="", speak_text=False, level=logging.INFO
+ self, title="", title_color="", content="", speak_text=False, level=logging.INFO
):
if speak_text and CFG.speak_mode:
say_text(f"{title}. {content}")
@@ -91,18 +91,18 @@ class Logger(metaclass=Singleton):
)
def debug(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.DEBUG)
def warn(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.WARN)
@@ -176,10 +176,10 @@ class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str:
if hasattr(record, "color"):
record.title_color = (
- getattr(record, "color")
- + getattr(record, "title")
- + " "
- + Style.RESET_ALL
+ getattr(record, "color")
+ + getattr(record, "title")
+ + " "
+ + Style.RESET_ALL
)
else:
record.title_color = getattr(record, "title")
@@ -289,6 +289,7 @@ def print_assistant_thoughts(ai_name, assistant_reply):
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)
+
def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None:
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
From 3944f29addc1a2ea908e7ff8a78e36f21bd5c9db Mon Sep 17 00:00:00 2001
From: Eesa Hamza
Date: Sun, 16 Apr 2023 21:40:09 +0300
Subject: [PATCH 025/152] Fixed new backends not being added to supported
memory
---
README.md | 2 +-
autogpt/memory/__init__.py | 4 ++++
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 21f3ccf2..bfcd395c 100644
--- a/README.md
+++ b/README.md
@@ -195,7 +195,7 @@ python -m autogpt --help
```bash
python -m autogpt --ai-settings
```
-* Specify one of 3 memory backends: `local`, `redis`, `pinecone` or `no_memory`
+* Specify a memory backend
```bash
python -m autogpt --use-memory
```
diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py
index e2ee44a4..f5afb8c9 100644
--- a/autogpt/memory/__init__.py
+++ b/autogpt/memory/__init__.py
@@ -23,12 +23,16 @@ except ImportError:
try:
from autogpt.memory.weaviate import WeaviateMemory
+
+ supported_memory.append("weaviate")
except ImportError:
# print("Weaviate not installed. Skipping import.")
WeaviateMemory = None
try:
from autogpt.memory.milvus import MilvusMemory
+
+ supported_memory.append("milvus")
except ImportError:
# print("pymilvus not installed. Skipping import.")
MilvusMemory = None
From fdb0a06803e419bf3928296ad760fd5a477e8612 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 11:36:51 -0700
Subject: [PATCH 026/152] fix conflict
---
autogpt/json_fixes/bracket_termination.py | 45 -----------------------
autogpt/json_fixes/parsing.py | 9 -----
2 files changed, 54 deletions(-)
diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py
index 731efeb1..dd9a8376 100644
--- a/autogpt/json_fixes/bracket_termination.py
+++ b/autogpt/json_fixes/bracket_termination.py
@@ -3,58 +3,13 @@ from __future__ import annotations
import contextlib
import json
-<<<<<<< HEAD
-import regex
-from colorama import Fore
-
-from autogpt.logs import logger
-=======
from typing import Optional
->>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py)
from autogpt.config import Config
CFG = Config()
-<<<<<<< HEAD
-def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
- if CFG.speak_mode and CFG.debug_mode:
- say_text(
- "I have received an invalid JSON response from the OpenAI API. "
- "Trying to fix it now."
- )
- logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
-
- try:
- json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
- json_match = json_pattern.search(json_string)
-
- if json_match:
- # Extract the valid JSON object from the string
- json_string = json_match.group(0)
- logger.typewriter_log(
- title="Apparently json was fixed.", title_color=Fore.GREEN
- )
- if CFG.speak_mode and CFG.debug_mode:
- say_text("Apparently json was fixed.")
- else:
- raise ValueError("No valid JSON object found")
-
- except (json.JSONDecodeError, ValueError):
- if CFG.debug_mode:
- logger.error(f"Error: Invalid JSON: {json_string}\n")
- if CFG.speak_mode:
- say_text("Didn't work. I will have to ignore this response then.")
- logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
- json_string = {}
-
- return json_string
-
-
-def balance_braces(json_string: str) -> str | None:
-=======
def balance_braces(json_string: str) -> Optional[str]:
->>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py)
"""
Balance the braces in a JSON string.
diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py
index d3a51f43..1e391eed 100644
--- a/autogpt/json_fixes/parsing.py
+++ b/autogpt/json_fixes/parsing.py
@@ -3,14 +3,9 @@ from __future__ import annotations
import contextlib
import json
-<<<<<<< HEAD
-from typing import Any
-
-=======
from typing import Any, Dict, Union
from colorama import Fore
from regex import regex
->>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location)
from autogpt.config import Config
from autogpt.json_fixes.auto_fix import fix_json
from autogpt.json_fixes.bracket_termination import balance_braces
@@ -115,11 +110,7 @@ def fix_and_parse_json(
def try_ai_fix(
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
-<<<<<<< HEAD
-) -> str | dict[Any, Any]:
-=======
) -> Dict[Any, Any]:
->>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location)
"""Try to fix the JSON with the AI
Args:
From dc80a5a2ec6c7ceb2055894684ca7b680039a4c7 Mon Sep 17 00:00:00 2001
From: Jakub Bober
Date: Sun, 16 Apr 2023 21:01:18 +0200
Subject: [PATCH 027/152] Add "Memory Backend Setup" subtitle
Add the subtitle to match the Table of Contents
---
README.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/README.md b/README.md
index 21f3ccf2..7fce2e8f 100644
--- a/README.md
+++ b/README.md
@@ -280,6 +280,8 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that
* `milvus` will use the milvus cache that you configured
* `weaviate` will use the weaviate cache that you configured
+## Memory Backend Setup
+
### Redis Setup
> _**CAUTION**_ \
This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all
From 9b6bce4592800f6436bd877daba135cfee6b8f7d Mon Sep 17 00:00:00 2001
From: Eesa Hamza
Date: Sun, 16 Apr 2023 22:10:48 +0300
Subject: [PATCH 028/152] Improve the error logging for OAI Issues
---
autogpt/llm_utils.py | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 2075f934..25dbabd4 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -5,9 +5,10 @@ import time
import openai
from openai.error import APIError, RateLimitError
-from colorama import Fore
+from colorama import Fore, Style
from autogpt.config import Config
+from autogpt.logs import logger
CFG = Config()
@@ -70,6 +71,7 @@ def create_chat_completion(
"""
response = None
num_retries = 10
+ warned_user = False
if CFG.debug_mode:
print(
Fore.GREEN
@@ -101,6 +103,11 @@ def create_chat_completion(
Fore.RED + "Error: ",
f"Reached rate limit, passing..." + Fore.RESET,
)
+ if not warned_user:
+ logger.double_check(
+ f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " +
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}")
+ warned_user = True
except APIError as e:
if e.http_status == 502:
pass
@@ -115,7 +122,17 @@ def create_chat_completion(
)
time.sleep(backoff)
if response is None:
- raise RuntimeError(f"Failed to get response after {num_retries} retries")
+ logger.typewriter_log(
+ "FAILED TO GET RESPONSE FROM OPENAI",
+ Fore.RED,
+ "Auto-GPT has failed to get a response from OpenAI's services. " +
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`."
+ )
+ logger.double_check()
+ if CFG.debug_mode:
+ raise RuntimeError(f"Failed to get response after {num_retries} retries")
+ else:
+ quit(1)
return response.choices[0].message["content"]
From 2d24876530f61a20e0c68fc449312fc84e142914 Mon Sep 17 00:00:00 2001
From: Eesa Hamza
Date: Sun, 16 Apr 2023 22:16:43 +0300
Subject: [PATCH 029/152] Fix linter issues
---
autogpt/llm_utils.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 25dbabd4..3630108e 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -123,7 +123,7 @@ def create_chat_completion(
time.sleep(backoff)
if response is None:
logger.typewriter_log(
- "FAILED TO GET RESPONSE FROM OPENAI",
+ "FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
"Auto-GPT has failed to get a response from OpenAI's services. " +
f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`."
From 7b7d7c1d74b299966e607cf7dc6cf2cea64993ba Mon Sep 17 00:00:00 2001
From: Bates Jernigan
Date: Sun, 16 Apr 2023 16:33:52 -0400
Subject: [PATCH 030/152] add space on warning message
---
autogpt/memory/local.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py
index 6c7ee1b3..9b911eef 100644
--- a/autogpt/memory/local.py
+++ b/autogpt/memory/local.py
@@ -54,7 +54,7 @@ class LocalCache(MemoryProviderSingleton):
self.data = CacheContent()
else:
print(
- f"Warning: The file '{self.filename}' does not exist."
+ f"Warning: The file '{self.filename}' does not exist. "
"Local memory would not be saved to a file."
)
self.data = CacheContent()
From 627533bed631a15504b3584bf2aa70fe7b23aa86 Mon Sep 17 00:00:00 2001
From: 0xArty
Date: Sun, 16 Apr 2023 21:55:53 +0100
Subject: [PATCH 031/152] minimall add pytest (#1859)
* minimall add pytest
* updated docs and pytest command
* proveted milvus integration test running if milvus is not installed
---
.pre-commit-config.yaml | 8 +-
README.md | 19 +++-
requirements.txt | 7 ++
tests/integration/milvus_memory_tests.py | 71 ++++++++-------
tests/local_cache_test.py | 35 +++++---
tests/milvus_memory_test.py | 109 ++++++++++++-----------
tests/smoke_test.py | 82 ++++++++---------
tests/unit/test_commands.py | 34 +++----
8 files changed, 208 insertions(+), 157 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fb75cd59..dd1d0ec9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -30,4 +30,10 @@ repos:
language: python
types: [ python ]
exclude: .+/(dist|.venv|venv|build)/.+
- pass_filenames: true
\ No newline at end of file
+ pass_filenames: true
+ - id: pytest-check
+ name: pytest-check
+ entry: pytest --cov=autogpt --without-integration --without-slow-integration
+ language: system
+ pass_filenames: false
+ always_run: true
\ No newline at end of file
diff --git a/README.md b/README.md
index 58ed4d97..f60aa9ff 100644
--- a/README.md
+++ b/README.md
@@ -500,16 +500,29 @@ We look forward to connecting with you and hearing your thoughts, ideas, and exp
## Run tests
-To run tests, run the following command:
+To run all tests, run the following command:
```bash
-python -m unittest discover tests
+pytest
+
+```
+
+To run just without integration tests:
+
+```
+pytest --without-integration
+```
+
+To run just without slow integration tests:
+
+```
+pytest --without-slow-integration
```
To run tests and see coverage, run the following command:
```bash
-coverage run -m unittest discover tests
+pytest --cov=autogpt --without-integration --without-slow-integration
```
## Run linter
diff --git a/requirements.txt b/requirements.txt
index 64c2e4c0..843b66bf 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -29,5 +29,12 @@ black
sourcery
isort
gitpython==3.1.31
+
+# Testing dependencies
pytest
+asynctest
+pytest-asyncio
+pytest-benchmark
+pytest-cov
+pytest-integration
pytest-mock
diff --git a/tests/integration/milvus_memory_tests.py b/tests/integration/milvus_memory_tests.py
index 96934cd6..ec38bf2f 100644
--- a/tests/integration/milvus_memory_tests.py
+++ b/tests/integration/milvus_memory_tests.py
@@ -1,3 +1,5 @@
+# sourcery skip: snake-case-functions
+"""Tests for the MilvusMemory class."""
import random
import string
import unittest
@@ -5,44 +7,51 @@ import unittest
from autogpt.config import Config
from autogpt.memory.milvus import MilvusMemory
+try:
-class TestMilvusMemory(unittest.TestCase):
- def random_string(self, length):
- return "".join(random.choice(string.ascii_letters) for _ in range(length))
+ class TestMilvusMemory(unittest.TestCase):
+ """Tests for the MilvusMemory class."""
- def setUp(self):
- cfg = Config()
- cfg.milvus_addr = "localhost:19530"
- self.memory = MilvusMemory(cfg)
- self.memory.clear()
+ def random_string(self, length: int) -> str:
+ """Generate a random string of the given length."""
+ return "".join(random.choice(string.ascii_letters) for _ in range(length))
- # Add example texts to the cache
- self.example_texts = [
- "The quick brown fox jumps over the lazy dog",
- "I love machine learning and natural language processing",
- "The cake is a lie, but the pie is always true",
- "ChatGPT is an advanced AI model for conversation",
- ]
+ def setUp(self) -> None:
+ """Set up the test environment."""
+ cfg = Config()
+ cfg.milvus_addr = "localhost:19530"
+ self.memory = MilvusMemory(cfg)
+ self.memory.clear()
- for text in self.example_texts:
- self.memory.add(text)
+ # Add example texts to the cache
+ self.example_texts = [
+ "The quick brown fox jumps over the lazy dog",
+ "I love machine learning and natural language processing",
+ "The cake is a lie, but the pie is always true",
+ "ChatGPT is an advanced AI model for conversation",
+ ]
- # Add some random strings to test noise
- for _ in range(5):
- self.memory.add(self.random_string(10))
+ for text in self.example_texts:
+ self.memory.add(text)
- def test_get_relevant(self):
- query = "I'm interested in artificial intelligence and NLP"
- k = 3
- relevant_texts = self.memory.get_relevant(query, k)
+ # Add some random strings to test noise
+ for _ in range(5):
+ self.memory.add(self.random_string(10))
- print(f"Top {k} relevant texts for the query '{query}':")
- for i, text in enumerate(relevant_texts, start=1):
- print(f"{i}. {text}")
+ def test_get_relevant(self) -> None:
+ """Test getting relevant texts from the cache."""
+ query = "I'm interested in artificial intelligence and NLP"
+ num_relevant = 3
+ relevant_texts = self.memory.get_relevant(query, num_relevant)
- self.assertEqual(len(relevant_texts), k)
- self.assertIn(self.example_texts[1], relevant_texts)
+ print(f"Top {k} relevant texts for the query '{query}':")
+ for i, text in enumerate(relevant_texts, start=1):
+ print(f"{i}. {text}")
+ self.assertEqual(len(relevant_texts), k)
+ self.assertIn(self.example_texts[1], relevant_texts)
-if __name__ == "__main__":
- unittest.main()
+except:
+ print(
+ "Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed."
+ )
diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py
index 91c922b0..fa596320 100644
--- a/tests/local_cache_test.py
+++ b/tests/local_cache_test.py
@@ -1,3 +1,5 @@
+# sourcery skip: snake-case-functions
+"""Tests for LocalCache class"""
import os
import sys
import unittest
@@ -5,7 +7,8 @@ import unittest
from autogpt.memory.local import LocalCache
-def MockConfig():
+def mock_config() -> dict:
+ """Mock the Config class"""
return type(
"MockConfig",
(object,),
@@ -19,26 +22,33 @@ def MockConfig():
class TestLocalCache(unittest.TestCase):
- def setUp(self):
- self.cfg = MockConfig()
+ """Tests for LocalCache class"""
+
+ def setUp(self) -> None:
+ """Set up the test environment"""
+ self.cfg = mock_config()
self.cache = LocalCache(self.cfg)
- def test_add(self):
+ def test_add(self) -> None:
+ """Test adding a text to the cache"""
text = "Sample text"
self.cache.add(text)
self.assertIn(text, self.cache.data.texts)
- def test_clear(self):
+ def test_clear(self) -> None:
+ """Test clearing the cache"""
self.cache.clear()
- self.assertEqual(self.cache.data, [""])
+ self.assertEqual(self.cache.data.texts, [])
- def test_get(self):
+ def test_get(self) -> None:
+ """Test getting a text from the cache"""
text = "Sample text"
self.cache.add(text)
result = self.cache.get(text)
self.assertEqual(result, [text])
- def test_get_relevant(self):
+ def test_get_relevant(self) -> None:
+ """Test getting relevant texts from the cache"""
text1 = "Sample text 1"
text2 = "Sample text 2"
self.cache.add(text1)
@@ -46,12 +56,9 @@ class TestLocalCache(unittest.TestCase):
result = self.cache.get_relevant(text1, 1)
self.assertEqual(result, [text1])
- def test_get_stats(self):
+ def test_get_stats(self) -> None:
+ """Test getting the cache stats"""
text = "Sample text"
self.cache.add(text)
stats = self.cache.get_stats()
- self.assertEqual(stats, (1, self.cache.data.embeddings.shape))
-
-
-if __name__ == "__main__":
- unittest.main()
+ self.assertEqual(stats, (4, self.cache.data.embeddings.shape))
diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py
index 0113fa1c..e0e2f7fc 100644
--- a/tests/milvus_memory_test.py
+++ b/tests/milvus_memory_test.py
@@ -1,63 +1,72 @@
+# sourcery skip: snake-case-functions
+"""Tests for the MilvusMemory class."""
import os
import sys
import unittest
-from autogpt.memory.milvus import MilvusMemory
+try:
+ from autogpt.memory.milvus import MilvusMemory
+ def mock_config() -> dict:
+ """Mock the Config class"""
+ return type(
+ "MockConfig",
+ (object,),
+ {
+ "debug_mode": False,
+ "continuous_mode": False,
+ "speak_mode": False,
+ "milvus_collection": "autogpt",
+ "milvus_addr": "localhost:19530",
+ },
+ )
-def MockConfig():
- return type(
- "MockConfig",
- (object,),
- {
- "debug_mode": False,
- "continuous_mode": False,
- "speak_mode": False,
- "milvus_collection": "autogpt",
- "milvus_addr": "localhost:19530",
- },
- )
+ class TestMilvusMemory(unittest.TestCase):
+ """Tests for the MilvusMemory class."""
+ def setUp(self) -> None:
+ """Set up the test environment"""
+ self.cfg = MockConfig()
+ self.memory = MilvusMemory(self.cfg)
-class TestMilvusMemory(unittest.TestCase):
- def setUp(self):
- self.cfg = MockConfig()
- self.memory = MilvusMemory(self.cfg)
+ def test_add(self) -> None:
+ """Test adding a text to the cache"""
+ text = "Sample text"
+ self.memory.clear()
+ self.memory.add(text)
+ result = self.memory.get(text)
+ self.assertEqual([text], result)
- def test_add(self):
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual([text], result)
+ def test_clear(self) -> None:
+ """Test clearing the cache"""
+ self.memory.clear()
+ self.assertEqual(self.memory.collection.num_entities, 0)
- def test_clear(self):
- self.memory.clear()
- self.assertEqual(self.memory.collection.num_entities, 0)
+ def test_get(self) -> None:
+ """Test getting a text from the cache"""
+ text = "Sample text"
+ self.memory.clear()
+ self.memory.add(text)
+ result = self.memory.get(text)
+ self.assertEqual(result, [text])
- def test_get(self):
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual(result, [text])
+ def test_get_relevant(self) -> None:
+ """Test getting relevant texts from the cache"""
+ text1 = "Sample text 1"
+ text2 = "Sample text 2"
+ self.memory.clear()
+ self.memory.add(text1)
+ self.memory.add(text2)
+ result = self.memory.get_relevant(text1, 1)
+ self.assertEqual(result, [text1])
- def test_get_relevant(self):
- text1 = "Sample text 1"
- text2 = "Sample text 2"
- self.memory.clear()
- self.memory.add(text1)
- self.memory.add(text2)
- result = self.memory.get_relevant(text1, 1)
- self.assertEqual(result, [text1])
+ def test_get_stats(self) -> None:
+ """Test getting the cache stats"""
+ text = "Sample text"
+ self.memory.clear()
+ self.memory.add(text)
+ stats = self.memory.get_stats()
+ self.assertEqual(15, len(stats))
- def test_get_stats(self):
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- stats = self.memory.get_stats()
- self.assertEqual(15, len(stats))
-
-
-if __name__ == "__main__":
- unittest.main()
+except:
+ print("Milvus not installed, skipping tests")
diff --git a/tests/smoke_test.py b/tests/smoke_test.py
index 50e97b7b..1b9d643f 100644
--- a/tests/smoke_test.py
+++ b/tests/smoke_test.py
@@ -1,31 +1,34 @@
+"""Smoke test for the autogpt package."""
import os
import subprocess
import sys
-import unittest
+
+import pytest
from autogpt.commands.file_operations import delete_file, read_file
-env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
+@pytest.mark.integration_test
+def test_write_file() -> None:
+ """
+ Test case to check if the write_file command can successfully write 'Hello World' to a file
+ named 'hello_world.txt'.
-class TestCommands(unittest.TestCase):
- def test_write_file(self):
- # Test case to check if the write_file command can successfully write 'Hello World' to a file
- # named 'hello_world.txt'.
+ Read the current ai_settings.yaml file and store its content.
+ """
+ env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
+ ai_settings = None
+ if os.path.exists("ai_settings.yaml"):
+ with open("ai_settings.yaml", "r") as f:
+ ai_settings = f.read()
+ os.remove("ai_settings.yaml")
- # Read the current ai_settings.yaml file and store its content.
- ai_settings = None
- if os.path.exists("ai_settings.yaml"):
- with open("ai_settings.yaml", "r") as f:
- ai_settings = f.read()
- os.remove("ai_settings.yaml")
-
- try:
- if os.path.exists("hello_world.txt"):
- # Clean up any existing 'hello_world.txt' file before testing.
- delete_file("hello_world.txt")
- # Prepare input data for the test.
- input_data = """write_file-GPT
+ try:
+ if os.path.exists("hello_world.txt"):
+ # Clean up any existing 'hello_world.txt' file before testing.
+ delete_file("hello_world.txt")
+ # Prepare input data for the test.
+ input_data = """write_file-GPT
an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task.
Use the write_file command to write 'Hello World' into a file named "hello_world.txt".
Use the task_complete command to complete the task.
@@ -33,31 +36,24 @@ Do not use any other commands.
y -5
EOF"""
- command = f"{sys.executable} -m autogpt"
+ command = f"{sys.executable} -m autogpt"
- # Execute the script with the input data.
- process = subprocess.Popen(
- command,
- stdin=subprocess.PIPE,
- shell=True,
- env={**os.environ, **env_vars},
- )
- process.communicate(input_data.encode())
-
- # Read the content of the 'hello_world.txt' file created during the test.
- content = read_file("hello_world.txt")
- finally:
- if ai_settings:
- # Restore the original ai_settings.yaml file.
- with open("ai_settings.yaml", "w") as f:
- f.write(ai_settings)
-
- # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'.
- self.assertEqual(
- content, "Hello World", f"Expected 'Hello World', got {content}"
+ # Execute the script with the input data.
+ process = subprocess.Popen(
+ command,
+ stdin=subprocess.PIPE,
+ shell=True,
+ env={**os.environ, **env_vars},
)
+ process.communicate(input_data.encode())
+ # Read the content of the 'hello_world.txt' file created during the test.
+ content = read_file("hello_world.txt")
+ finally:
+ if ai_settings:
+ # Restore the original ai_settings.yaml file.
+ with open("ai_settings.yaml", "w") as f:
+ f.write(ai_settings)
-# Run the test case.
-if __name__ == "__main__":
- unittest.main()
+ # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'.
+ assert content == "Hello World", f"Expected 'Hello World', got {content}"
diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py
index e15709aa..ecbac9b7 100644
--- a/tests/unit/test_commands.py
+++ b/tests/unit/test_commands.py
@@ -1,18 +1,22 @@
+"""Unit tests for the commands module"""
+from unittest.mock import MagicMock, patch
+
+import pytest
+
import autogpt.agent.agent_manager as agent_manager
-from autogpt.app import start_agent, list_agents, execute_command
-import unittest
-from unittest.mock import patch, MagicMock
+from autogpt.app import execute_command, list_agents, start_agent
-class TestCommands(unittest.TestCase):
- def test_make_agent(self):
- with patch("openai.ChatCompletion.create") as mock:
- obj = MagicMock()
- obj.response.choices[0].messages[0].content = "Test message"
- mock.return_value = obj
- start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
- agents = list_agents()
- self.assertEqual("List of agents:\n0: chat", agents)
- start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
- agents = list_agents()
- self.assertEqual("List of agents:\n0: chat\n1: write", agents)
+@pytest.mark.integration_test
+def test_make_agent() -> None:
+ """Test the make_agent command"""
+ with patch("openai.ChatCompletion.create") as mock:
+ obj = MagicMock()
+ obj.response.choices[0].messages[0].content = "Test message"
+ mock.return_value = obj
+ start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
+ agents = list_agents()
+ assert "List of agents:\n0: chat" == agents
+ start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
+ agents = list_agents()
+ assert "List of agents:\n0: chat\n1: write" == agents
From 4269326ddfd81227e78b0745093f52e4ac1ba078 Mon Sep 17 00:00:00 2001
From: 0xf333 <0x333@tuta.io>
Date: Sun, 16 Apr 2023 17:03:18 -0400
Subject: [PATCH 032/152] Fix: Update run_continuous.sh to pass all
command-line arguments
Description:
- Modified `run_continuous.sh` to include the `--continuous` flag directly in the command:
- Removed the unused `argument` variable.
- Added the `--continuous` flag to the `./run.sh` command.
- Ensured all command-line arguments are passed through to `run.sh` and the `autogpt` module.
This change improves the usability of the `run_continuous.sh` script by allowing users to provide
additional command-line arguments along with the `--continuous` flag. It ensures that all arguments
are properly passed to the `run.sh` script and eventually to the `autogpt` module, preventing
confusion and providing more flexible usage.
Suggestion from:
https://github.com/Significant-Gravitas/Auto-GPT/pull/1941#discussion_r1167977442
---
run_continuous.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/run_continuous.sh b/run_continuous.sh
index 14c9cfd2..43034f8e 100755
--- a/run_continuous.sh
+++ b/run_continuous.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-argument="--continuous"
-./run.sh "$argument"
+
+./run.sh --continuous "$@"
From 147d3733bf068d8c71a901b8a0e31cfda5c4a687 Mon Sep 17 00:00:00 2001
From: 0xArty
Date: Sun, 16 Apr 2023 16:03:22 +0100
Subject: [PATCH 033/152] Change ci to pytest
---
.github/workflows/ci.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 366aaf67..39f3aea9 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -36,7 +36,7 @@ jobs:
- name: Run unittest tests with coverage
run: |
- coverage run --source=autogpt -m unittest discover tests
+ pytest --cov=autogpt --without-integration --without-slow-integration
- name: Generate coverage report
run: |
From 955a5b0a4357802a8142585ad78105f6342738ad Mon Sep 17 00:00:00 2001
From: 0xArty
Date: Sun, 16 Apr 2023 16:13:16 +0100
Subject: [PATCH 034/152] Marked local chache tests as integration tests as
they require api keys
---
tests/local_cache_test.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py
index fa596320..bb108626 100644
--- a/tests/local_cache_test.py
+++ b/tests/local_cache_test.py
@@ -4,6 +4,8 @@ import os
import sys
import unittest
+import pytest
+
from autogpt.memory.local import LocalCache
@@ -21,6 +23,7 @@ def mock_config() -> dict:
)
+@pytest.mark.integration_test
class TestLocalCache(unittest.TestCase):
"""Tests for LocalCache class"""
From 5ff7fc340b908281c6eb976358947e87f289c0f7 Mon Sep 17 00:00:00 2001
From: endolith
Date: Sun, 16 Apr 2023 08:47:11 -0400
Subject: [PATCH 035/152] Remove extraneous noqa E722 comment
E722 is "Do not use bare except, specify exception instead" but
except json.JSONDecodeError
is not a bare except
---
autogpt/json_fixes/auto_fix.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py
index 9fcf909a..0d3bd73c 100644
--- a/autogpt/json_fixes/auto_fix.py
+++ b/autogpt/json_fixes/auto_fix.py
@@ -45,7 +45,7 @@ def fix_json(json_string: str, schema: str) -> str:
try:
json.loads(result_string) # just check the validity
return result_string
- except json.JSONDecodeError: # noqa: E722
+ except json.JSONDecodeError:
# Get the call stack:
# import traceback
# call_stack = traceback.format_exc()
From 8f0d553e4eaed9757f87ec33ec202cc7e570d8d5 Mon Sep 17 00:00:00 2001
From: Benedict Hobart
Date: Sun, 16 Apr 2023 15:45:38 +0000
Subject: [PATCH 036/152] Improve dev containers so autogpt can browse the web
---
.devcontainer/Dockerfile | 7 ++++++-
.devcontainer/devcontainer.json | 1 +
autogpt/commands/web_selenium.py | 1 +
3 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index f3b2e2db..379f6310 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,6 +1,6 @@
# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster
ARG VARIANT=3-bullseye
-FROM python:3.8
+FROM --platform=linux/amd64 python:3.8
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
@@ -10,6 +10,11 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# They are installed by the base image (python) which does not have the patch.
RUN python3 -m pip install --upgrade setuptools
+# Install Chrome for web browsing
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+ && curl -sSL https://dl.google.com/linux/direct/google-chrome-stable_current_$(dpkg --print-architecture).deb -o /tmp/chrome.deb \
+ && apt-get -y install /tmp/chrome.deb
+
# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
# COPY requirements.txt /tmp/pip-tmp/
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 5fefd9c1..f26810fb 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -11,6 +11,7 @@
"userGid": "1000",
"upgradePackages": "true"
},
+ "ghcr.io/devcontainers/features/desktop-lite:1": {},
"ghcr.io/devcontainers/features/python:1": "none",
"ghcr.io/devcontainers/features/node:1": "none",
"ghcr.io/devcontainers/features/git:1": {
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index 1d078d76..8c652294 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -75,6 +75,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options)
else:
+ options.add_argument("--no-sandbox")
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options
)
From 21ccaf2ce892aab71d54649846aee6768f4e7403 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 14:16:48 -0700
Subject: [PATCH 037/152] Refactor variable names and remove unnecessary blank
lines in __main__.py
---
autogpt/__main__.py | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
index 29ccddbf..7fe6aec3 100644
--- a/autogpt/__main__.py
+++ b/autogpt/__main__.py
@@ -3,13 +3,10 @@ import logging
from colorama import Fore
from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments
-
from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger
from autogpt.memory import get_memory
-
from autogpt.prompt import construct_prompt
-
# Load environment variables from .env file
@@ -21,13 +18,13 @@ def main() -> None:
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
- prompt = construct_prompt()
+ master_prompt = construct_prompt()
# print(prompt)
# Initialize variables
full_message_history = []
next_action_count = 0
# Make a constant:
- user_input = (
+ triggering_prompt = (
"Determine which next command to use, and respond using the"
" format specified above:"
)
@@ -43,8 +40,8 @@ def main() -> None:
memory=memory,
full_message_history=full_message_history,
next_action_count=next_action_count,
- prompt=prompt,
- user_input=user_input,
+ master_prompt=master_prompt,
+ triggering_prompt=triggering_prompt,
)
agent.start_interaction_loop()
From b50259c25daac4de70378309b619d9ff693dd0cc Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 14:16:48 -0700
Subject: [PATCH 038/152] Update variable names, improve comments, and modify
input handling in agent.py
---
autogpt/agent/agent.py | 43 +++++++++++++++++++++++++-----------------
1 file changed, 26 insertions(+), 17 deletions(-)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 32d982e5..3be17a89 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -19,9 +19,18 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
- prompt: The prompt to use.
- user_input: The user input.
+ master_prompt: The master prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
+ Currently, the dynamic and customizable information in the master prompt are ai_name, description and goals.
+ triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
+ Determine which next command to use, and respond using the format specified above:
+ The triggering prompt is not part of the master prompt because between the master prompt and the triggering
+ prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
+ MASTER PROMPT
+ CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
+ TRIGGERING PROMPT
+
+ The triggering prompt reminds the AI about its short term meta task (defining the next task)
"""
def __init__(
@@ -30,15 +39,15 @@ class Agent:
memory,
full_message_history,
next_action_count,
- prompt,
- user_input,
+ master_prompt,
+ triggering_prompt,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history = full_message_history
self.next_action_count = next_action_count
- self.prompt = prompt
- self.user_input = user_input
+ self.master_prompt = master_prompt
+ self.triggering_prompt = triggering_prompt
def start_interaction_loop(self):
# Interaction Loop
@@ -62,8 +71,8 @@ class Agent:
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat_with_ai(
- self.prompt,
- self.user_input,
+ self.master_prompt,
+ self.triggering_prompt,
self.full_message_history,
self.memory,
cfg.fast_token_limit,
@@ -88,7 +97,7 @@ class Agent:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
- self.user_input = ""
+ user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
@@ -106,14 +115,14 @@ class Agent:
Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
if console_input.lower().rstrip() == "y":
- self.user_input = "GENERATE NEXT COMMAND JSON"
+ user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
)
- self.user_input = "GENERATE NEXT COMMAND JSON"
+ user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print(
"Invalid input format. Please enter 'y -n' where n is"
@@ -122,20 +131,20 @@ class Agent:
continue
break
elif console_input.lower() == "n":
- self.user_input = "EXIT"
+ user_input = "EXIT"
break
else:
- self.user_input = console_input
+ user_input = console_input
command_name = "human_feedback"
break
- if self.user_input == "GENERATE NEXT COMMAND JSON":
+ if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
- elif self.user_input == "EXIT":
+ elif user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
@@ -153,7 +162,7 @@ class Agent:
f"Command {command_name} threw the following error: {arguments}"
)
elif command_name == "human_feedback":
- result = f"Human feedback: {self.user_input}"
+ result = f"Human feedback: {user_input}"
else:
result = (
f"Command {command_name} returned: "
@@ -165,7 +174,7 @@ class Agent:
memory_to_add = (
f"Assistant Reply: {assistant_reply} "
f"\nResult: {result} "
- f"\nHuman Feedback: {self.user_input} "
+ f"\nHuman Feedback: {user_input} "
)
self.memory.add(memory_to_add)
From b5e0127b16bb88f6b6e18ada0efabc1422c9f3de Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 14:16:48 -0700
Subject: [PATCH 039/152] Only print JSON object validation message in debug
mode
---
autogpt/json_validation/validate_json.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py
index 127fcc17..440c3b0b 100644
--- a/autogpt/json_validation/validate_json.py
+++ b/autogpt/json_validation/validate_json.py
@@ -24,7 +24,7 @@ def validate_json(json_object: object, schema_name: object) -> object:
for error in errors:
logger.error(f"Error: {error.message}")
- else:
+ elif CFG.debug_mode:
print("The JSON object is valid.")
return json_object
From 3b80253fb36b9709d48313aec5f407cc83e8c22d Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 14:16:48 -0700
Subject: [PATCH 040/152] Update process creation in benchmark script
---
benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
index d6cae972..f7f1dac9 100644
--- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
+++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
@@ -73,9 +73,12 @@ Disappointing suggestion.
Not helpful.
Needs improvement.
Not what I need.'''
+ # TODO: add questions above, to distract it even more.
+
command = f'{sys.executable} -m autogpt'
- process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=True)
stdout_output, stderr_output = process.communicate(input_data.encode())
From 89e0e8992795accfc41183723064dcdab9719f8e Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 14:22:58 -0700
Subject: [PATCH 041/152] change master prompt to system prompt
---
autogpt/__main__.py | 4 ++--
autogpt/agent/agent.py | 14 +++++++-------
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
index 7fe6aec3..5f462234 100644
--- a/autogpt/__main__.py
+++ b/autogpt/__main__.py
@@ -18,7 +18,7 @@ def main() -> None:
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
- master_prompt = construct_prompt()
+ system_prompt = construct_prompt()
# print(prompt)
# Initialize variables
full_message_history = []
@@ -40,7 +40,7 @@ def main() -> None:
memory=memory,
full_message_history=full_message_history,
next_action_count=next_action_count,
- master_prompt=master_prompt,
+ system_prompt=system_prompt,
triggering_prompt=triggering_prompt,
)
agent.start_interaction_loop()
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 3be17a89..9853f6a0 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -19,14 +19,14 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
- master_prompt: The master prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
- Currently, the dynamic and customizable information in the master prompt are ai_name, description and goals.
+ system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
+ Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
Determine which next command to use, and respond using the format specified above:
- The triggering prompt is not part of the master prompt because between the master prompt and the triggering
+ The triggering prompt is not part of the system prompt because between the system prompt and the triggering
prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
- MASTER PROMPT
+ SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
@@ -39,14 +39,14 @@ class Agent:
memory,
full_message_history,
next_action_count,
- master_prompt,
+ system_prompt,
triggering_prompt,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history = full_message_history
self.next_action_count = next_action_count
- self.master_prompt = master_prompt
+ self.system_prompt = system_prompt
self.triggering_prompt = triggering_prompt
def start_interaction_loop(self):
@@ -71,7 +71,7 @@ class Agent:
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat_with_ai(
- self.master_prompt,
+ self.system_prompt,
self.triggering_prompt,
self.full_message_history,
self.memory,
From 4f33e1bf89e580355dfcf6890779799c584e9563 Mon Sep 17 00:00:00 2001
From: k-boikov
Date: Sun, 16 Apr 2023 18:38:08 +0300
Subject: [PATCH 042/152] add utf-8 encoding to file handlers for logging
---
autogpt/logs.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/autogpt/logs.py b/autogpt/logs.py
index f18e2140..c1e436db 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -46,7 +46,9 @@ class Logger(metaclass=Singleton):
self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log
- self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
+ self.file_handler = logging.FileHandler(
+ os.path.join(log_dir, log_file), 'a', 'utf-8'
+ )
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
@@ -54,7 +56,9 @@ class Logger(metaclass=Singleton):
self.file_handler.setFormatter(info_formatter)
# Error handler error.log
- error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
+ error_handler = logging.FileHandler(
+ os.path.join(log_dir, error_file), 'a', 'utf-8'
+ )
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
From 4eb8e7823d63ff4f8d67b8927da842ea7ab3ab21 Mon Sep 17 00:00:00 2001
From: 0xf333 <0x333@tuta.io>
Date: Sun, 16 Apr 2023 18:07:41 -0400
Subject: [PATCH 043/152] Fix: Remove quotes around $@ in run_continuous.sh
Description:
Per maintainer's request, removed quotes around `$@` in `run_continuous.sh`.
This change allows the script to forward arguments as is. Please note that
this modification might cause issues if any of the command-line arguments
contain spaces or special characters. However, this update aligns with the
preferred format for the repository.
Suggestion from:
https://github.com/Significant-Gravitas/Auto-GPT/pull/1941#discussion_r1168035557
---
run_continuous.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/run_continuous.sh b/run_continuous.sh
index 43034f8e..1f4436c8 100755
--- a/run_continuous.sh
+++ b/run_continuous.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-./run.sh --continuous "$@"
+./run.sh --continuous $@
From 1513be4acdcc85b27869219938ed90610a7db673 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Sun, 16 Apr 2023 15:31:53 -0700
Subject: [PATCH 044/152] hotfix user input
---
autogpt/agent/agent.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 9853f6a0..dca614c7 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -55,6 +55,8 @@ class Agent:
loop_count = 0
command_name = None
arguments = None
+ user_input = ""
+
while True:
# Discontinue if continuous limit is reached
loop_count += 1
@@ -97,7 +99,6 @@ class Agent:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
- user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
From c71c61dc584a41d72e2b27b02fe75a9f64e3e029 Mon Sep 17 00:00:00 2001
From: Adrian Scott
Date: Sun, 16 Apr 2023 18:14:16 -0500
Subject: [PATCH 045/152] Added one space after period for better formatting
---
autogpt/memory/local.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py
index 6c7ee1b3..9b911eef 100644
--- a/autogpt/memory/local.py
+++ b/autogpt/memory/local.py
@@ -54,7 +54,7 @@ class LocalCache(MemoryProviderSingleton):
self.data = CacheContent()
else:
print(
- f"Warning: The file '{self.filename}' does not exist."
+ f"Warning: The file '{self.filename}' does not exist. "
"Local memory would not be saved to a file."
)
self.data = CacheContent()
From 15059c2090be47d2a674113f509618b3f58a3510 Mon Sep 17 00:00:00 2001
From: Chris Cheney
Date: Sun, 16 Apr 2023 17:28:25 -0500
Subject: [PATCH 046/152] ensure git operations occur in the working directory
---
autogpt/commands/git_operations.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
index 3ff35cf3..675eb228 100644
--- a/autogpt/commands/git_operations.py
+++ b/autogpt/commands/git_operations.py
@@ -1,6 +1,7 @@
"""Git operations for autogpt"""
import git
from autogpt.config import Config
+from autogpt.workspace import path_in_workspace
CFG = Config()
@@ -16,8 +17,9 @@ def clone_repository(repo_url: str, clone_path: str) -> str:
str: The result of the clone operation"""
split_url = repo_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
+ safe_clone_path = path_in_workspace(clone_path)
try:
- git.Repo.clone_from(auth_repo_url, clone_path)
- return f"""Cloned {repo_url} to {clone_path}"""
+ git.Repo.clone_from(auth_repo_url, safe_clone_path)
+ return f"""Cloned {repo_url} to {safe_clone_path}"""
except Exception as e:
return f"Error: {str(e)}"
From 7a32e03bd537b3c2b98b9f55b2962a37a0c046c6 Mon Sep 17 00:00:00 2001
From: bingokon
Date: Mon, 17 Apr 2023 00:48:53 +0100
Subject: [PATCH 047/152] refactoring the all json utilities
---
autogpt/agent/agent.py | 4 +-
autogpt/app.py | 2 +-
autogpt/json_fixes/auto_fix.py | 53 ------
autogpt/json_fixes/bracket_termination.py | 36 ----
autogpt/json_fixes/escaping.py | 33 ----
autogpt/json_fixes/master_json_fix_method.py | 28 ---
autogpt/json_fixes/missing_quotes.py | 27 ---
autogpt/json_fixes/utilities.py | 20 ---
.../{json_fixes => json_utils}/__init__.py | 0
.../parsing.py => json_utils/auto_fix.py} | 170 ++++++++++++++++--
.../llm_response_format_1.json | 0
.../utilities.py} | 23 ++-
autogpt/logs.py | 5 +-
tests/test_json_parser.py | 2 +-
tests/unit/json_tests.py | 2 +-
15 files changed, 188 insertions(+), 217 deletions(-)
delete mode 100644 autogpt/json_fixes/auto_fix.py
delete mode 100644 autogpt/json_fixes/bracket_termination.py
delete mode 100644 autogpt/json_fixes/escaping.py
delete mode 100644 autogpt/json_fixes/master_json_fix_method.py
delete mode 100644 autogpt/json_fixes/missing_quotes.py
delete mode 100644 autogpt/json_fixes/utilities.py
rename autogpt/{json_fixes => json_utils}/__init__.py (100%)
rename autogpt/{json_fixes/parsing.py => json_utils/auto_fix.py} (51%)
rename autogpt/{json_schemas => json_utils}/llm_response_format_1.json (100%)
rename autogpt/{json_validation/validate_json.py => json_utils/utilities.py} (63%)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index dca614c7..6ec0a623 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -3,8 +3,8 @@ from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
-from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques
-from autogpt.json_validation.validate_json import validate_json
+from autogpt.json_utils.auto_fix import fix_json_using_multiple_techniques
+from autogpt.json_utils.utilities import validate_json
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
diff --git a/autogpt/app.py b/autogpt/app.py
index 78b5bd2f..190f934b 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -18,7 +18,7 @@ from autogpt.commands.file_operations import (
search_files,
write_to_file,
)
-from autogpt.json_fixes.parsing import fix_and_parse_json
+from autogpt.json_utils.auto_fix import fix_and_parse_json
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
from autogpt.speech import say_text
diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py
deleted file mode 100644
index 9fcf909a..00000000
--- a/autogpt/json_fixes/auto_fix.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""This module contains the function to fix JSON strings using GPT-3."""
-import json
-
-from autogpt.llm_utils import call_ai_function
-from autogpt.logs import logger
-from autogpt.config import Config
-
-CFG = Config()
-
-
-def fix_json(json_string: str, schema: str) -> str:
- """Fix the given JSON string to make it parseable and fully compliant with
- the provided schema.
-
- Args:
- json_string (str): The JSON string to fix.
- schema (str): The schema to use to fix the JSON.
- Returns:
- str: The fixed JSON string.
- """
- # Try to fix the JSON using GPT:
- function_string = "def fix_json(json_string: str, schema:str=None) -> str:"
- args = [f"'''{json_string}'''", f"'''{schema}'''"]
- description_string = (
- "This function takes a JSON string and ensures that it"
- " is parseable and fully compliant with the provided schema. If an object"
- " or field specified in the schema isn't contained within the correct JSON,"
- " it is omitted. The function also escapes any double quotes within JSON"
- " string values to ensure that they are valid. If the JSON string contains"
- " any None or NaN values, they are replaced with null before being parsed."
- )
-
- # If it doesn't already start with a "`", add one:
- if not json_string.startswith("`"):
- json_string = "```json\n" + json_string + "\n```"
- result_string = call_ai_function(
- function_string, args, description_string, model=CFG.fast_llm_model
- )
- logger.debug("------------ JSON FIX ATTEMPT ---------------")
- logger.debug(f"Original JSON: {json_string}")
- logger.debug("-----------")
- logger.debug(f"Fixed JSON: {result_string}")
- logger.debug("----------- END OF FIX ATTEMPT ----------------")
-
- try:
- json.loads(result_string) # just check the validity
- return result_string
- except json.JSONDecodeError: # noqa: E722
- # Get the call stack:
- # import traceback
- # call_stack = traceback.format_exc()
- # print(f"Failed to fix JSON: '{json_string}' "+call_stack)
- return "failed"
diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py
deleted file mode 100644
index dd9a8376..00000000
--- a/autogpt/json_fixes/bracket_termination.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Fix JSON brackets."""
-from __future__ import annotations
-
-import contextlib
-import json
-from typing import Optional
-from autogpt.config import Config
-
-CFG = Config()
-
-
-def balance_braces(json_string: str) -> Optional[str]:
- """
- Balance the braces in a JSON string.
-
- Args:
- json_string (str): The JSON string.
-
- Returns:
- str: The JSON string with braces balanced.
- """
-
- open_braces_count = json_string.count("{")
- close_braces_count = json_string.count("}")
-
- while open_braces_count > close_braces_count:
- json_string += "}"
- close_braces_count += 1
-
- while close_braces_count > open_braces_count:
- json_string = json_string.rstrip("}")
- close_braces_count -= 1
-
- with contextlib.suppress(json.JSONDecodeError):
- json.loads(json_string)
- return json_string
diff --git a/autogpt/json_fixes/escaping.py b/autogpt/json_fixes/escaping.py
deleted file mode 100644
index 68eb1714..00000000
--- a/autogpt/json_fixes/escaping.py
+++ /dev/null
@@ -1,33 +0,0 @@
-""" Fix invalid escape sequences in JSON strings. """
-import json
-
-from autogpt.config import Config
-from autogpt.json_fixes.utilities import extract_char_position
-
-CFG = Config()
-
-
-def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
- """Fix invalid escape sequences in JSON strings.
-
- Args:
- json_to_load (str): The JSON string.
- error_message (str): The error message from the JSONDecodeError
- exception.
-
- Returns:
- str: The JSON string with invalid escape sequences fixed.
- """
- while error_message.startswith("Invalid \\escape"):
- bad_escape_location = extract_char_position(error_message)
- json_to_load = (
- json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :]
- )
- try:
- json.loads(json_to_load)
- return json_to_load
- except json.JSONDecodeError as e:
- if CFG.debug_mode:
- print("json loads error - fix invalid escape", e)
- error_message = str(e)
- return json_to_load
diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py
deleted file mode 100644
index 7a2cf3cc..00000000
--- a/autogpt/json_fixes/master_json_fix_method.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from typing import Any, Dict
-
-from autogpt.config import Config
-from autogpt.logs import logger
-from autogpt.speech import say_text
-CFG = Config()
-
-
-def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
- from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets
-
- from autogpt.json_fixes.parsing import fix_and_parse_json
-
- # Parse and print Assistant response
- assistant_reply_json = fix_and_parse_json(assistant_reply)
- if assistant_reply_json == {}:
- assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
- assistant_reply
- )
-
- if assistant_reply_json != {}:
- return assistant_reply_json
-
- logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
- if CFG.speak_mode:
- say_text("I have received an invalid JSON response from the OpenAI API.")
-
- return {}
diff --git a/autogpt/json_fixes/missing_quotes.py b/autogpt/json_fixes/missing_quotes.py
deleted file mode 100644
index 552a1517..00000000
--- a/autogpt/json_fixes/missing_quotes.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""Fix quotes in a JSON string."""
-import json
-import re
-
-
-def add_quotes_to_property_names(json_string: str) -> str:
- """
- Add quotes to property names in a JSON string.
-
- Args:
- json_string (str): The JSON string.
-
- Returns:
- str: The JSON string with quotes added to property names.
- """
-
- def replace_func(match: re.Match) -> str:
- return f'"{match[1]}":'
-
- property_name_pattern = re.compile(r"(\w+):")
- corrected_json_string = property_name_pattern.sub(replace_func, json_string)
-
- try:
- json.loads(corrected_json_string)
- return corrected_json_string
- except json.JSONDecodeError as e:
- raise e
diff --git a/autogpt/json_fixes/utilities.py b/autogpt/json_fixes/utilities.py
deleted file mode 100644
index 0852b18a..00000000
--- a/autogpt/json_fixes/utilities.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""Utilities for the json_fixes package."""
-import re
-
-
-def extract_char_position(error_message: str) -> int:
- """Extract the character position from the JSONDecodeError message.
-
- Args:
- error_message (str): The error message from the JSONDecodeError
- exception.
-
- Returns:
- int: The character position.
- """
-
- char_pattern = re.compile(r"\(char (\d+)\)")
- if match := char_pattern.search(error_message):
- return int(match[1])
- else:
- raise ValueError("Character position not found in the error message.")
diff --git a/autogpt/json_fixes/__init__.py b/autogpt/json_utils/__init__.py
similarity index 100%
rename from autogpt/json_fixes/__init__.py
rename to autogpt/json_utils/__init__.py
diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_utils/auto_fix.py
similarity index 51%
rename from autogpt/json_fixes/parsing.py
rename to autogpt/json_utils/auto_fix.py
index 1e391eed..883eba78 100644
--- a/autogpt/json_fixes/parsing.py
+++ b/autogpt/json_utils/auto_fix.py
@@ -1,20 +1,19 @@
-"""Fix and parse JSON strings."""
+"""This module contains the function to fix JSON strings"""
from __future__ import annotations
import contextlib
import json
-from typing import Any, Dict, Union
+import re
+from typing import Optional, Dict, Any
+
from colorama import Fore
from regex import regex
-from autogpt.config import Config
-from autogpt.json_fixes.auto_fix import fix_json
-from autogpt.json_fixes.bracket_termination import balance_braces
-from autogpt.json_fixes.escaping import fix_invalid_escape
-from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names
+
+from autogpt.json_utils.utilities import extract_char_position
+from autogpt.llm_utils import call_ai_function
from autogpt.logs import logger
from autogpt.speech import say_text
-
-CFG = Config()
+from autogpt.config import Config
JSON_SCHEMA = """
{
@@ -35,6 +34,157 @@ JSON_SCHEMA = """
}
"""
+CFG = Config()
+
+
+def auto_fix_json(json_string: str, schema: str) -> str:
+ """Fix the given JSON string to make it parseable and fully compliant with
+ the provided schema using GPT-3.
+
+ Args:
+ json_string (str): The JSON string to fix.
+ schema (str): The schema to use to fix the JSON.
+ Returns:
+ str: The fixed JSON string.
+ """
+ # Try to fix the JSON using GPT:
+ function_string = "def fix_json(json_string: str, schema:str=None) -> str:"
+ args = [f"'''{json_string}'''", f"'''{schema}'''"]
+ description_string = (
+ "This function takes a JSON string and ensures that it"
+ " is parseable and fully compliant with the provided schema. If an object"
+ " or field specified in the schema isn't contained within the correct JSON,"
+ " it is omitted. The function also escapes any double quotes within JSON"
+ " string values to ensure that they are valid. If the JSON string contains"
+ " any None or NaN values, they are replaced with null before being parsed."
+ )
+
+ # If it doesn't already start with a "`", add one:
+ if not json_string.startswith("`"):
+ json_string = "```json\n" + json_string + "\n```"
+ result_string = call_ai_function(
+ function_string, args, description_string, model=CFG.fast_llm_model
+ )
+ logger.debug("------------ JSON FIX ATTEMPT ---------------")
+ logger.debug(f"Original JSON: {json_string}")
+ logger.debug("-----------")
+ logger.debug(f"Fixed JSON: {result_string}")
+ logger.debug("----------- END OF FIX ATTEMPT ----------------")
+
+ try:
+ json.loads(result_string) # just check the validity
+ return result_string
+ except json.JSONDecodeError: # noqa: E722
+ # Get the call stack:
+ # import traceback
+ # call_stack = traceback.format_exc()
+ # print(f"Failed to fix JSON: '{json_string}' "+call_stack)
+ return "failed"
+
+
+def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
+ """Fix invalid escape sequences in JSON strings.
+
+ Args:
+ json_to_load (str): The JSON string.
+ error_message (str): The error message from the JSONDecodeError
+ exception.
+
+ Returns:
+ str: The JSON string with invalid escape sequences fixed.
+ """
+ while error_message.startswith("Invalid \\escape"):
+ bad_escape_location = extract_char_position(error_message)
+ json_to_load = (
+ json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1:]
+ )
+ try:
+ json.loads(json_to_load)
+ return json_to_load
+ except json.JSONDecodeError as e:
+ if CFG.debug_mode:
+ print("json loads error - fix invalid escape", e)
+ error_message = str(e)
+ return json_to_load
+
+
+def balance_braces(json_string: str) -> Optional[str]:
+ """
+ Balance the braces in a JSON string.
+
+ Args:
+ json_string (str): The JSON string.
+
+ Returns:
+ str: The JSON string with braces balanced.
+ """
+
+ open_braces_count = json_string.count("{")
+ close_braces_count = json_string.count("}")
+
+ while open_braces_count > close_braces_count:
+ json_string += "}"
+ close_braces_count += 1
+
+ while close_braces_count > open_braces_count:
+ json_string = json_string.rstrip("}")
+ close_braces_count -= 1
+
+ with contextlib.suppress(json.JSONDecodeError):
+ json.loads(json_string)
+ return json_string
+
+
+def add_quotes_to_property_names(json_string: str) -> str:
+ """
+ Add quotes to property names in a JSON string.
+
+ Args:
+ json_string (str): The JSON string.
+
+ Returns:
+ str: The JSON string with quotes added to property names.
+ """
+
+ def replace_func(match: re.Match) -> str:
+ return f'"{match[1]}":'
+
+ property_name_pattern = re.compile(r"(\w+):")
+ corrected_json_string = property_name_pattern.sub(replace_func, json_string)
+
+ try:
+ json.loads(corrected_json_string)
+ return corrected_json_string
+ except json.JSONDecodeError as e:
+ raise e
+
+
+def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
+ """Fix the given JSON string to make it parseable and fully compliant with two techniques.
+
+ Args:
+ json_string (str): The JSON string to fix.
+
+ Returns:
+ str: The fixed JSON string.
+ """
+
+ # Parse and print Assistant response
+ assistant_reply_json = fix_and_parse_json(assistant_reply)
+ if assistant_reply_json == {}:
+ assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
+ assistant_reply
+ )
+
+ if assistant_reply_json != {}:
+ return assistant_reply_json
+
+ logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
+ if CFG.speak_mode:
+ say_text("I have received an invalid JSON response from the OpenAI API.")
+
+ return {}
+
def correct_json(json_to_load: str) -> str:
"""
@@ -134,7 +284,7 @@ def try_ai_fix(
" slightly."
)
# Now try to fix this up using the ai_functions
- ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA)
+ ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA)
if ai_fixed_json != "failed":
return json.loads(ai_fixed_json)
diff --git a/autogpt/json_schemas/llm_response_format_1.json b/autogpt/json_utils/llm_response_format_1.json
similarity index 100%
rename from autogpt/json_schemas/llm_response_format_1.json
rename to autogpt/json_utils/llm_response_format_1.json
diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_utils/utilities.py
similarity index 63%
rename from autogpt/json_validation/validate_json.py
rename to autogpt/json_utils/utilities.py
index 440c3b0b..af8a28c9 100644
--- a/autogpt/json_validation/validate_json.py
+++ b/autogpt/json_utils/utilities.py
@@ -1,10 +1,31 @@
+"""Utilities for the json_fixes package."""
import json
+import re
+
from jsonschema import Draft7Validator
-from autogpt.config import Config
+
from autogpt.logs import logger
+from autogpt.config import Config
CFG = Config()
+def extract_char_position(error_message: str) -> int:
+ """Extract the character position from the JSONDecodeError message.
+
+ Args:
+ error_message (str): The error message from the JSONDecodeError
+ exception.
+
+ Returns:
+ int: The character position.
+ """
+
+ char_pattern = re.compile(r"\(char (\d+)\)")
+ if match := char_pattern.search(error_message):
+ return int(match[1])
+ else:
+ raise ValueError("Character position not found in the error message.")
+
def validate_json(json_object: object, schema_name: object) -> object:
"""
diff --git a/autogpt/logs.py b/autogpt/logs.py
index c1e436db..58375f14 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -204,10 +204,7 @@ logger = Logger()
def print_assistant_thoughts(ai_name, assistant_reply):
"""Prints the assistant's thoughts to the console"""
- from autogpt.json_fixes.bracket_termination import (
- attempt_to_fix_json_by_finding_outermost_brackets,
- )
- from autogpt.json_fixes.parsing import fix_and_parse_json
+ from autogpt.json_utils.auto_fix import fix_and_parse_json, attempt_to_fix_json_by_finding_outermost_brackets
try:
try:
diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py
index 2862034b..f8fa5955 100644
--- a/tests/test_json_parser.py
+++ b/tests/test_json_parser.py
@@ -1,7 +1,7 @@
import unittest
import tests.context
-from autogpt.json_fixes.parsing import fix_and_parse_json
+from autogpt.json_utils.auto_fix import fix_and_parse_json
class TestParseJson(unittest.TestCase):
diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py
index 561b8a38..f65a6f6a 100644
--- a/tests/unit/json_tests.py
+++ b/tests/unit/json_tests.py
@@ -1,6 +1,6 @@
import unittest
-from autogpt.json_parser import fix_and_parse_json
+from autogpt.json_utils.auto_fix import fix_and_parse_json
class TestParseJson(unittest.TestCase):
From 56ecbeeef734019d1d3112fc2a657a14bff69ccb Mon Sep 17 00:00:00 2001
From: Reinier van der Leer
Date: Mon, 17 Apr 2023 02:22:18 +0200
Subject: [PATCH 048/152] Clean up README
---
README.md | 227 ++++++++++++++++++++++++------------------------------
1 file changed, 102 insertions(+), 125 deletions(-)
diff --git a/README.md b/README.md
index f60aa9ff..07a088e3 100644
--- a/README.md
+++ b/README.md
@@ -1,15 +1,19 @@
# Auto-GPT: An Autonomous GPT-4 Experiment
+[](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
+[](https://discord.gg/autogpt)
+[](https://github.com/Significant-Gravitas/Auto-GPT/stargazers)
+[](https://twitter.com/SigGravitas)
+
+
+
+
### 🔴 🔴 🔴 Urgent: USE `stable` not `master` 🔴 🔴 🔴
-> This information takes precedence over any other information provided and should be followed accordingly.
+**Download the latest `stable` release from here: https://github.com/Significant-Gravitas/Auto-GPT/releases/latest.**
+The `master` branch may often be in a **broken** state.
-Our workflow has been improved, but please note that `master` branch may often be in a **broken** state.
-Please download the latest `stable` release from here: https://github.com/Torantulino/Auto-GPT/releases/latest.
+
-
-[](https://twitter.com/SigGravitas)
-[](https://discord.gg/autogpt)
-[](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
@@ -37,42 +41,6 @@ Development of this free, open-source project is made possible by all the