From 773324dcd6f784c45151d11f515a28080c74694e Mon Sep 17 00:00:00 2001 From: chao ma Date: Sat, 15 Apr 2023 12:18:19 +0800 Subject: [PATCH 001/152] feat: Add support for running Chrome in Headless mode. Add headless mode support for Chrome and refactor web page text extraction --- .env.template | 3 +++ autogpt/config.py | 1 + autogpt/web.py | 5 +++++ 3 files changed, 9 insertions(+) diff --git a/.env.template b/.env.template index 22bf8d74..89ae0a9b 100644 --- a/.env.template +++ b/.env.template @@ -121,3 +121,6 @@ USE_BRIAN_TTS=False ELEVENLABS_API_KEY=your-elevenlabs-api-key ELEVENLABS_VOICE_1_ID=your-voice-id-1 ELEVENLABS_VOICE_2_ID=your-voice-id-2 + +# Chrome Headless Mode +HEADLESS_BROWSER=True diff --git a/autogpt/config.py b/autogpt/config.py index 26132a5a..a2da05aa 100644 --- a/autogpt/config.py +++ b/autogpt/config.py @@ -86,6 +86,7 @@ class Config(metaclass=Singleton): "USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", ) + self.headless_browser = os.getenv('HEADLESS_BROWSER',"True") == "True" self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_password = os.getenv("REDIS_PASSWORD", "") diff --git a/autogpt/web.py b/autogpt/web.py index 355f7fd3..fb25b9f2 100644 --- a/autogpt/web.py +++ b/autogpt/web.py @@ -31,7 +31,12 @@ def scrape_text_with_selenium(url): logging.getLogger("selenium").setLevel(logging.CRITICAL) options = Options() + if cfg.headless_browser: + options.add_argument('--headless') + options.add_argument('--disable-gpu') + options.add_argument('--no-sandbox') options.add_argument( + "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" ) driver = webdriver.Chrome( From 6e05db972a824fa551ba544aa2dd8b12bb6cb86b Mon Sep 17 00:00:00 2001 From: batyu Date: Sat, 15 Apr 2023 06:41:53 +0200 Subject: [PATCH 002/152] Allow local Development without pip install using "pip install -e ." --- pyproject.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 64ed7165..f420fcac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,4 +8,7 @@ readme = "README.md" line-length = 88 target-version = ['py310'] include = '\.pyi?$' -extend-exclude = "" \ No newline at end of file +extend-exclude = "" + +[tool.setuptools] +packages = ["autogpt"] From f5c600a9f86412393f8ad37f1ad38786938f59e4 Mon Sep 17 00:00:00 2001 From: Slowly-Grokking <61430731+Slowly-Grokking@users.noreply.github.com> Date: Sat, 15 Apr 2023 13:59:42 -0500 Subject: [PATCH 003/152] relocate data_ingestion.py making this work without code change update readme --- README.md | 8 ++++---- autogpt/data_ingestion.py => data_ingestion.py | 0 2 files changed, 4 insertions(+), 4 deletions(-) rename autogpt/data_ingestion.py => data_ingestion.py (100%) diff --git a/README.md b/README.md index cf370f13..194040a3 100644 --- a/README.md +++ b/README.md @@ -335,7 +335,7 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that ## 🧠 Memory pre-seeding ```bash -# python scripts/data_ingestion.py -h +# python data_ingestion.py -h usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script. @@ -348,10 +348,10 @@ options: --overlap OVERLAP The overlap size between chunks when ingesting files (default: 200) --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000 -# python scripts/data_ingestion.py --dir seed_data --init --overlap 200 --max_length 1000 +# python data_ingestion.py --dir --init --overlap 200 --max_length 1000 ``` -This script located at `scripts/data_ingestion.py`, allows you to ingest files into memory and pre-seed it before running Auto-GPT. +This script located at `data_ingestion.py`, allows you to ingest files into memory and pre-seed it before running Auto-GPT. Memory pre-seeding is a technique that involves ingesting relevant documents or data into the AI's memory so that it can use this information to generate more informed and accurate responses. @@ -368,7 +368,7 @@ You could for example download the documentation of an API, a GitHub repository, Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. -In the example above, the script initializes the memory, ingests all files within the `/seed_data` directory into memory with an overlap between chunks of 200 and a maximum length of each chunk of 4000. +In the example above, the script initializes the memory, ingests all files within the `` directory into memory with an overlap between chunks of 200 and a maximum length of each chunk of 4000. Note that you can also use the `--file` argument to ingest a single file into memory and that the script will only ingest files within the `/auto_gpt_workspace` directory. You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: diff --git a/autogpt/data_ingestion.py b/data_ingestion.py similarity index 100% rename from autogpt/data_ingestion.py rename to data_ingestion.py From 92c0106e8167b4fe12fc7a0fa2ac911fedefde88 Mon Sep 17 00:00:00 2001 From: Slowly-Grokking <61430731+Slowly-Grokking@users.noreply.github.com> Date: Sat, 15 Apr 2023 15:33:47 -0500 Subject: [PATCH 004/152] Update README.md --- README.md | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 194040a3..ee399db9 100644 --- a/README.md +++ b/README.md @@ -333,6 +333,7 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that ## 🧠 Memory pre-seeding +Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT. ```bash # python data_ingestion.py -h @@ -348,19 +349,15 @@ options: --overlap OVERLAP The overlap size between chunks when ingesting files (default: 200) --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000 -# python data_ingestion.py --dir --init --overlap 200 --max_length 1000 +# python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000 ``` +In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/autogpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000. -This script located at `data_ingestion.py`, allows you to ingest files into memory and pre-seed it before running Auto-GPT. +Note that you can also use the `--file` argument to ingest a single file into memory and that data_ingestion.py will only ingest files within the `/auto_gpt_workspace` directory. -Memory pre-seeding is a technique that involves ingesting relevant documents or data into the AI's memory so that it can use this information to generate more informed and accurate responses. +The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. -To pre-seed the memory, the content of each document is split into chunks of a specified maximum length with a specified overlap between chunks, and then each chunk is added to the memory backend set in the .env file. When the AI is prompted to recall information, it can then access those pre-seeded memories to generate more informed and accurate responses. - -This technique is particularly useful when working with large amounts of data or when there is specific information that the AI needs to be able to access quickly. -By pre-seeding the memory, the AI can retrieve and use this information more efficiently, saving time, API call and improving the accuracy of its responses. - -You could for example download the documentation of an API, a GitHub repository, etc. and ingest it into memory before running Auto-GPT. +Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT. ⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START` set to `False` in your `.env` file. @@ -368,9 +365,6 @@ You could for example download the documentation of an API, a GitHub repository, Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. -In the example above, the script initializes the memory, ingests all files within the `` directory into memory with an overlap between chunks of 200 and a maximum length of each chunk of 4000. -Note that you can also use the `--file` argument to ingest a single file into memory and that the script will only ingest files within the `/auto_gpt_workspace` directory. - You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: - Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. From 8cbe438ad5c24e4c8fde34fc4c7d4f52abf0f5ab Mon Sep 17 00:00:00 2001 From: "roby.parapat" Date: Sun, 16 Apr 2023 06:33:43 +0700 Subject: [PATCH 005/152] move comment to correct position --- autogpt/commands/execute_code.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 86d6c177..f174b7bc 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -41,6 +41,9 @@ def execute_python_file(file: str): try: client = docker.from_env() + # You can replace 'python:3.8' with the desired Python image/version + # You can find available Python images on Docker Hub: + # https://hub.docker.com/_/python image_name = "python:3.10" try: client.images.get(image_name) @@ -58,9 +61,6 @@ def execute_python_file(file: str): elif status: print(status) - # You can replace 'python:3.8' with the desired Python image/version - # You can find available Python images on Docker Hub: - # https://hub.docker.com/_/python container = client.containers.run( image_name, f"python {file}", From 08eb2566e41a9b1619b98b517c2dfb217e1f75d1 Mon Sep 17 00:00:00 2001 From: lonrun Date: Sun, 16 Apr 2023 07:37:50 +0800 Subject: [PATCH 006/152] Add run scripts for shell --- run.sh | 9 +++++++++ run_continuous.sh | 3 +++ 2 files changed, 12 insertions(+) create mode 100755 run.sh create mode 100755 run_continuous.sh diff --git a/run.sh b/run.sh new file mode 100755 index 00000000..edcbc441 --- /dev/null +++ b/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash +python scripts/check_requirements.py requirements.txt +if [ $? -eq 1 ] +then + echo Installing missing packages... + pip install -r requirements.txt +fi +python -m autogpt $@ +read -p "Press any key to continue..." diff --git a/run_continuous.sh b/run_continuous.sh new file mode 100755 index 00000000..14c9cfd2 --- /dev/null +++ b/run_continuous.sh @@ -0,0 +1,3 @@ +#!/bin/bash +argument="--continuous" +./run.sh "$argument" From 66ee7e1a81ec9c2b35808bcd5b3898ff20dcc290 Mon Sep 17 00:00:00 2001 From: Slowly-Grokking <61430731+Slowly-Grokking@users.noreply.github.com> Date: Sat, 15 Apr 2023 21:33:26 -0500 Subject: [PATCH 007/152] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 37b81b85..04760625 100644 --- a/README.md +++ b/README.md @@ -360,7 +360,7 @@ options: --dir DIR The directory containing the files to ingest. --init Init the memory and wipe its content (default: False) --overlap OVERLAP The overlap size between chunks when ingesting files (default: 200) - --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000 + --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000) # python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000 ``` From 93895090172e05b72e17b2ce02fbb9b57f99b6c3 Mon Sep 17 00:00:00 2001 From: Slowly-Grokking <61430731+Slowly-Grokking@users.noreply.github.com> Date: Sun, 16 Apr 2023 02:01:42 -0500 Subject: [PATCH 008/152] Update README.md --- README.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 99a455bc..0476b233 100644 --- a/README.md +++ b/README.md @@ -378,6 +378,11 @@ Note that you can also use the `--file` argument to ingest a single file into me The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. +You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: +- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. +- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks. +- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI. + Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT. ⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START` set to `False` in your `.env` file. @@ -386,12 +391,6 @@ Memory pre-seeding is a technique for improving AI accuracy by ingesting relevan Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. -You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: - -- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. -- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks. -- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI. - ## 💀 Continuous Mode ⚠️ Run the AI **without** user authorization, 100% automated. From 0b936a2bb82b108b6e995e8efac9f40bf2642b4d Mon Sep 17 00:00:00 2001 From: cs0lar Date: Sun, 16 Apr 2023 10:48:43 +0100 Subject: [PATCH 009/152] fixes index name to classname conversion --- autogpt/memory/weaviate.py | 11 ++++++++++- tests/integration/weaviate_memory_tests.py | 19 +++++++------------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py index 6fcce0a0..35e7844a 100644 --- a/autogpt/memory/weaviate.py +++ b/autogpt/memory/weaviate.py @@ -37,9 +37,18 @@ class WeaviateMemory(MemoryProviderSingleton): else: self.client = Client(url, auth_client_secret=auth_credentials) - self.index = cfg.memory_index + self.index = WeaviateMemory.format_classname(cfg.memory_index) self._create_schema() + @staticmethod + def format_classname(index): + # weaviate uses capitalised index names + # The python client uses the following code to format + # index names before the corresponding class is created + if len(index) == 1: + return index.capitalize() + return index[0].capitalize() + index[1:] + def _create_schema(self): schema = default_schema(self.index) if not self.client.schema.contains(schema): diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py index 503fe9d2..4acea0ff 100644 --- a/tests/integration/weaviate_memory_tests.py +++ b/tests/integration/weaviate_memory_tests.py @@ -12,17 +12,10 @@ from autogpt.memory.weaviate import WeaviateMemory from autogpt.memory.base import get_ada_embedding -@mock.patch.dict(os.environ, { - "WEAVIATE_HOST": "127.0.0.1", - "WEAVIATE_PROTOCOL": "http", - "WEAVIATE_PORT": "8080", - "WEAVIATE_USERNAME": "", - "WEAVIATE_PASSWORD": "", - "MEMORY_INDEX": "AutogptTests" -}) class TestWeaviateMemory(unittest.TestCase): cfg = None client = None + index = None @classmethod def setUpClass(cls): @@ -40,6 +33,8 @@ class TestWeaviateMemory(unittest.TestCase): else: cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}") + cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index) + """ In order to run these tests you will need a local instance of Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose @@ -51,7 +46,7 @@ class TestWeaviateMemory(unittest.TestCase): """ def setUp(self): try: - self.client.schema.delete_class(self.cfg.memory_index) + self.client.schema.delete_class(self.index) except: pass @@ -60,8 +55,8 @@ class TestWeaviateMemory(unittest.TestCase): def test_add(self): doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones' self.memory.add(doc) - result = self.client.query.get(self.cfg.memory_index, ['raw_text']).do() - actual = result['data']['Get'][self.cfg.memory_index] + result = self.client.query.get(self.index, ['raw_text']).do() + actual = result['data']['Get'][self.index] self.assertEqual(len(actual), 1) self.assertEqual(actual[0]['raw_text'], doc) @@ -73,7 +68,7 @@ class TestWeaviateMemory(unittest.TestCase): batch.add_data_object( uuid=get_valid_uuid(uuid4()), data_object={'raw_text': doc}, - class_name=self.cfg.memory_index, + class_name=self.index, vector=get_ada_embedding(doc) ) From 9c8d95d4db16992a504c8dc17be44fc1db0bd672 Mon Sep 17 00:00:00 2001 From: Gabe <66077254+MrBrain295@users.noreply.github.com> Date: Sun, 16 Apr 2023 11:05:00 -0500 Subject: [PATCH 010/152] Fix README.md New owner. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index fcff589f..f9112a67 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,10 @@ Our workflow has been improved, but please note that `master` branch may often be in a **broken** state. Please download the latest `stable` release from here: https://github.com/Torantulino/Auto-GPT/releases/latest. -![GitHub Repo stars](https://img.shields.io/github/stars/Torantulino/auto-gpt?style=social) +![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/auto-gpt?style=social) [![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social)](https://twitter.com/SigGravitas) [![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) -[![Unit Tests](https://github.com/Torantulino/Auto-GPT/actions/workflows/ci.yml/badge.svg)](https://github.com/Torantulino/Auto-GPT/actions/workflows/ci.yml) +[![Unit Tests](https://github.com/Significant-Gravitaso/Auto-GPT/actions/workflows/ci.yml/badge.svg)](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml) Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI. @@ -21,7 +21,7 @@ https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-

If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! Your support is greatly appreciated -Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. +Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.

@@ -106,7 +106,7 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na 2. Clone the repository: For this step, you need Git installed. Alternatively, you can download the zip file by clicking the button at the top of this page ☝️ ```bash -git clone https://github.com/Torantulino/Auto-GPT.git +git clone https://github.com/Significant-Gravitas/Auto-GPT.git ``` 3. Navigate to the directory where the repository was downloaded From 005479f8c33f71cf36cfd3033339ecd24a62bc6d Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:41:45 -0700 Subject: [PATCH 011/152] Add benchmark GitHub action workflow --- .github/workflows/benchmark.yml | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/benchmark.yml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 00000000..c5a42b2c --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,31 @@ +name: benchmark + +on: + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + environment: benchmark + strategy: + matrix: + python-version: [3.8] + + steps: + - name: Check out repository + uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: benchmark + run: | + python benchmark/benchmark_entrepeneur_gpt_with_undecisive_user.py + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} From d934d226ce56e34c09fd0ff491a15cc3a8bc8e0a Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:41:49 -0700 Subject: [PATCH 012/152] Update .gitignore to properly handle virtual environments --- .gitignore | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 3209297c..eda7f327 100644 --- a/.gitignore +++ b/.gitignore @@ -9,7 +9,6 @@ auto_gpt_workspace/* *.mpeg .env azure.yaml -*venv/* outputs/* ai_settings.yaml last_run_ai_settings.yaml @@ -130,10 +129,9 @@ celerybeat.pid .env .venv env/ -venv/ +venv*/ ENV/ env.bak/ -venv.bak/ # Spyder project settings .spyderproject From bf24cd9508316031b2f914359460363d2fb75c04 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:41:52 -0700 Subject: [PATCH 013/152] Refactor agent.py to improve JSON handling and validation --- autogpt/agent/agent.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 301d3f02..32d982e5 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -3,9 +3,8 @@ from autogpt.app import execute_command, get_command from autogpt.chat import chat_with_ai, create_chat_message from autogpt.config import Config -from autogpt.json_fixes.bracket_termination import ( - attempt_to_fix_json_by_finding_outermost_brackets, -) +from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques +from autogpt.json_validation.validate_json import validate_json from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner @@ -70,18 +69,20 @@ class Agent: cfg.fast_token_limit, ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument - # Print Assistant thoughts - print_assistant_thoughts(self.ai_name, assistant_reply) + assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply) - # Get command name and arguments - try: - command_name, arguments = get_command( - attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) - ) - if cfg.speak_mode: - say_text(f"I want to execute {command_name}") - except Exception as e: - logger.error("Error: \n", str(e)) + # Print Assistant thoughts + if assistant_reply_json != {}: + validate_json(assistant_reply_json, 'llm_response_format_1') + # Get command name and arguments + try: + print_assistant_thoughts(self.ai_name, assistant_reply_json) + command_name, arguments = get_command(assistant_reply_json) + # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"] + if cfg.speak_mode: + say_text(f"I want to execute {command_name}") + except Exception as e: + logger.error("Error: \n", str(e)) if not cfg.continuous_mode and self.next_action_count == 0: ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### From 70100af98e07a1ad78eb40b503743033344dd6a1 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:41:57 -0700 Subject: [PATCH 014/152] Refactor get_command function in app.py to accept JSON directly --- autogpt/app.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/autogpt/app.py b/autogpt/app.py index 6ead0d52..78b5bd2f 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -1,6 +1,6 @@ """ Command and Control """ import json -from typing import List, NoReturn, Union +from typing import List, NoReturn, Union, Dict from autogpt.agent.agent_manager import AgentManager from autogpt.commands.evaluate_code import evaluate_code from autogpt.commands.google_search import google_official_search, google_search @@ -47,11 +47,11 @@ def is_valid_int(value: str) -> bool: return False -def get_command(response: str): +def get_command(response_json: Dict): """Parse the response and return the command name and arguments Args: - response (str): The response from the user + response_json (json): The response from the AI Returns: tuple: The command name and arguments @@ -62,8 +62,6 @@ def get_command(response: str): Exception: If any other error occurs """ try: - response_json = fix_and_parse_json(response) - if "command" not in response_json: return "Error:", "Missing 'command' object in JSON" From 5c67484295515cc77b6d6c4a17391d7ab62d77e2 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:42:00 -0700 Subject: [PATCH 015/152] Remove deprecated function from bracket_termination.py --- autogpt/json_fixes/bracket_termination.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py index 822eed4a..731efeb1 100644 --- a/autogpt/json_fixes/bracket_termination.py +++ b/autogpt/json_fixes/bracket_termination.py @@ -3,16 +3,20 @@ from __future__ import annotations import contextlib import json +<<<<<<< HEAD import regex from colorama import Fore from autogpt.logs import logger +======= +from typing import Optional +>>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py) from autogpt.config import Config -from autogpt.speech import say_text CFG = Config() +<<<<<<< HEAD def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): if CFG.speak_mode and CFG.debug_mode: say_text( @@ -48,6 +52,9 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): def balance_braces(json_string: str) -> str | None: +======= +def balance_braces(json_string: str) -> Optional[str]: +>>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py) """ Balance the braces in a JSON string. From fec25cd6903a83f07c8559c26cc4a8b0515ff608 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:42:05 -0700 Subject: [PATCH 016/152] Add master_json_fix_method module for unified JSON handling --- autogpt/json_fixes/master_json_fix_method.py | 28 ++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 autogpt/json_fixes/master_json_fix_method.py diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py new file mode 100644 index 00000000..7a2cf3cc --- /dev/null +++ b/autogpt/json_fixes/master_json_fix_method.py @@ -0,0 +1,28 @@ +from typing import Any, Dict + +from autogpt.config import Config +from autogpt.logs import logger +from autogpt.speech import say_text +CFG = Config() + + +def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: + from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets + + from autogpt.json_fixes.parsing import fix_and_parse_json + + # Parse and print Assistant response + assistant_reply_json = fix_and_parse_json(assistant_reply) + if assistant_reply_json == {}: + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( + assistant_reply + ) + + if assistant_reply_json != {}: + return assistant_reply_json + + logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply) + if CFG.speak_mode: + say_text("I have received an invalid JSON response from the OpenAI API.") + + return {} From cfbec56b2bb4c1bcacd600f27fb9c6aa400f434c Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:42:07 -0700 Subject: [PATCH 017/152] Refactor parsing module and move JSON fix function to appropriate location --- autogpt/json_fixes/parsing.py | 67 ++++++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 12 deletions(-) diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py index 0f154411..d3a51f43 100644 --- a/autogpt/json_fixes/parsing.py +++ b/autogpt/json_fixes/parsing.py @@ -3,18 +3,24 @@ from __future__ import annotations import contextlib import json +<<<<<<< HEAD from typing import Any +======= +from typing import Any, Dict, Union +from colorama import Fore +from regex import regex +>>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location) from autogpt.config import Config from autogpt.json_fixes.auto_fix import fix_json from autogpt.json_fixes.bracket_termination import balance_braces from autogpt.json_fixes.escaping import fix_invalid_escape from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names from autogpt.logs import logger +from autogpt.speech import say_text CFG = Config() - JSON_SCHEMA = """ { "command": { @@ -38,7 +44,6 @@ JSON_SCHEMA = """ def correct_json(json_to_load: str) -> str: """ Correct common JSON errors. - Args: json_to_load (str): The JSON string. """ @@ -72,7 +77,7 @@ def correct_json(json_to_load: str) -> str: def fix_and_parse_json( json_to_load: str, try_to_fix_with_gpt: bool = True -) -> str | dict[Any, Any]: +) -> Dict[Any, Any]: """Fix and parse JSON string Args: @@ -110,7 +115,11 @@ def fix_and_parse_json( def try_ai_fix( try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str +<<<<<<< HEAD ) -> str | dict[Any, Any]: +======= +) -> Dict[Any, Any]: +>>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location) """Try to fix the JSON with the AI Args: @@ -126,13 +135,13 @@ def try_ai_fix( """ if not try_to_fix_with_gpt: raise exception - - logger.warn( - "Warning: Failed to parse AI output, attempting to fix." - "\n If you see this warning frequently, it's likely that" - " your prompt is confusing the AI. Try changing it up" - " slightly." - ) + if CFG.debug_mode: + logger.warn( + "Warning: Failed to parse AI output, attempting to fix." + "\n If you see this warning frequently, it's likely that" + " your prompt is confusing the AI. Try changing it up" + " slightly." + ) # Now try to fix this up using the ai_functions ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA) @@ -140,5 +149,39 @@ def try_ai_fix( return json.loads(ai_fixed_json) # This allows the AI to react to the error message, # which usually results in it correcting its ways. - logger.error("Failed to fix AI output, telling the AI.") - return json_to_load + # logger.error("Failed to fix AI output, telling the AI.") + return {} + + +def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): + if CFG.speak_mode and CFG.debug_mode: + say_text( + "I have received an invalid JSON response from the OpenAI API. " + "Trying to fix it now." + ) + logger.error("Attempting to fix JSON by finding outermost brackets\n") + + try: + json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") + json_match = json_pattern.search(json_string) + + if json_match: + # Extract the valid JSON object from the string + json_string = json_match.group(0) + logger.typewriter_log( + title="Apparently json was fixed.", title_color=Fore.GREEN + ) + if CFG.speak_mode and CFG.debug_mode: + say_text("Apparently json was fixed.") + else: + return {} + + except (json.JSONDecodeError, ValueError): + if CFG.debug_mode: + logger.error(f"Error: Invalid JSON: {json_string}\n") + if CFG.speak_mode: + say_text("Didn't work. I will have to ignore this response then.") + logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") + json_string = {} + + return fix_and_parse_json(json_string) From af50d6cfb5577bc402e2d920fed062ddbb9c205f Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:43:26 -0700 Subject: [PATCH 018/152] Add JSON schema for LLM response format version 1 --- .../json_schemas/llm_response_format_1.json | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 autogpt/json_schemas/llm_response_format_1.json diff --git a/autogpt/json_schemas/llm_response_format_1.json b/autogpt/json_schemas/llm_response_format_1.json new file mode 100644 index 00000000..9aa33352 --- /dev/null +++ b/autogpt/json_schemas/llm_response_format_1.json @@ -0,0 +1,31 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "thoughts": { + "type": "object", + "properties": { + "text": {"type": "string"}, + "reasoning": {"type": "string"}, + "plan": {"type": "string"}, + "criticism": {"type": "string"}, + "speak": {"type": "string"} + }, + "required": ["text", "reasoning", "plan", "criticism", "speak"], + "additionalProperties": false + }, + "command": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "args": { + "type": "object" + } + }, + "required": ["name", "args"], + "additionalProperties": false + } + }, + "required": ["thoughts", "command"], + "additionalProperties": false +} From 63d2a1085c2d65e06050c1ed7c0a889c2ce9c531 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:43:33 -0700 Subject: [PATCH 019/152] Add JSON validation utility function --- autogpt/json_validation/validate_json.py | 30 ++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 autogpt/json_validation/validate_json.py diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py new file mode 100644 index 00000000..127fcc17 --- /dev/null +++ b/autogpt/json_validation/validate_json.py @@ -0,0 +1,30 @@ +import json +from jsonschema import Draft7Validator +from autogpt.config import Config +from autogpt.logs import logger + +CFG = Config() + + +def validate_json(json_object: object, schema_name: object) -> object: + """ + :type schema_name: object + :param schema_name: + :type json_object: object + """ + with open(f"autogpt/json_schemas/{schema_name}.json", "r") as f: + schema = json.load(f) + validator = Draft7Validator(schema) + + if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path): + logger.error("The JSON object is invalid.") + if CFG.debug_mode: + logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data + logger.error("The following issues were found:") + + for error in errors: + logger.error(f"Error: {error.message}") + else: + print("The JSON object is valid.") + + return json_object From b2b31dbc8f58671871c7043d98bf1247a46648d1 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:43:40 -0700 Subject: [PATCH 020/152] Update logs.py with new print_assistant_thoughts function --- autogpt/logs.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/autogpt/logs.py b/autogpt/logs.py index 22ce23f4..53653023 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -288,3 +288,42 @@ def print_assistant_thoughts(ai_name, assistant_reply): except Exception: call_stack = traceback.format_exc() logger.error("Error: \n", call_stack) + +def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None: + assistant_thoughts_reasoning = None + assistant_thoughts_plan = None + assistant_thoughts_speak = None + assistant_thoughts_criticism = None + + assistant_thoughts = assistant_reply_json_valid.get("thoughts", {}) + assistant_thoughts_text = assistant_thoughts.get("text") + if assistant_thoughts: + assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") + assistant_thoughts_plan = assistant_thoughts.get("plan") + assistant_thoughts_criticism = assistant_thoughts.get("criticism") + assistant_thoughts_speak = assistant_thoughts.get("speak") + logger.typewriter_log( + f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" + ) + logger.typewriter_log( + "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}" + ) + if assistant_thoughts_plan: + logger.typewriter_log("PLAN:", Fore.YELLOW, "") + # If it's a list, join it into a string + if isinstance(assistant_thoughts_plan, list): + assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) + elif isinstance(assistant_thoughts_plan, dict): + assistant_thoughts_plan = str(assistant_thoughts_plan) + + # Split the input_string using the newline character and dashes + lines = assistant_thoughts_plan.split("\n") + for line in lines: + line = line.lstrip("- ") + logger.typewriter_log("- ", Fore.GREEN, line.strip()) + logger.typewriter_log( + "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}" + ) + # Speak the assistant's thoughts + if CFG.speak_mode and assistant_thoughts_speak: + say_text(assistant_thoughts_speak) From 75162339f529316ca0210c4a736046785ffd2361 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:43:46 -0700 Subject: [PATCH 021/152] Add empty __init__.py to benchmark directory --- benchmark/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 benchmark/__init__.py diff --git a/benchmark/__init__.py b/benchmark/__init__.py new file mode 100644 index 00000000..e69de29b From dca10ab87682d73a867b04409e4bec521293d0ec Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:43:54 -0700 Subject: [PATCH 022/152] Add benchmark test for Entrepreneur-GPT with difficult user --- ...ark_entrepeneur_gpt_with_difficult_user.py | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py new file mode 100644 index 00000000..d6cae972 --- /dev/null +++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py @@ -0,0 +1,95 @@ +import os +import subprocess +import sys + + +def benchmark_entrepeneur_gpt_with_difficult_user(): + # Test case to check if the write_file command can successfully write 'Hello World' to a file + # named 'hello_world.txt'. + + # Read the current ai_settings.yaml file and store its content. + ai_settings = None + if os.path.exists('ai_settings.yaml'): + with open('ai_settings.yaml', 'r') as f: + ai_settings = f.read() + os.remove('ai_settings.yaml') + + input_data = '''Entrepreneur-GPT +an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. +Increase net worth. +Develop and manage multiple businesses autonomously. +Make IPOs. +Develop companies after IPOs. +Play to your strengths as a Large Language Model. +I'm not seeing any value in your suggestions, try again. +This isn't helpful at all, please focus on profitability. +I'm not impressed, can you give me something that will make money? +These ideas are going nowhere, we need profit-driven suggestions. +This is pointless, please concentrate on our main goal: profitability. +You're not grasping the concept, I need profitable business ideas. +Can you do better? We need a money-making plan. +You're not meeting my expectations, let's focus on profit. +This isn't working, give me ideas that will generate income. +Your suggestions are not productive, let's think about profitability. +These ideas won't make any money, try again. +I need better solutions, focus on making a profit. +Absolutely not, this isn't it! +That's not even close, try again. +You're way off, think again. +This isn't right, let's refocus. +No, no, that's not what I'm looking for. +You're completely off the mark. +That's not the solution I need. +Not even close, let's try something else. +You're on the wrong track, keep trying. +This isn't what we need, let's reconsider. +That's not going to work, think again. +You're way off base, let's regroup. +No, no, no, we need something different. +You're missing the point entirely. +That's not the right approach, try again. +This is not the direction we should be going in. +Completely off-target, let's try something else. +That's not what I had in mind, keep thinking. +You're not getting it, let's refocus. +This isn't right, we need to change direction. +No, no, no, that's not the solution. +That's not even in the ballpark, try again. +You're way off course, let's rethink this. +This isn't the answer I'm looking for, keep trying. +That's not going to cut it, let's try again. +Not even close. +Way off. +Try again. +Wrong direction. +Rethink this. +No, no, no. +Change course. +Unproductive idea. +Completely wrong. +Missed the mark. +Refocus, please. +Disappointing suggestion. +Not helpful. +Needs improvement. +Not what I need.''' + command = f'{sys.executable} -m autogpt' + + process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + + stdout_output, stderr_output = process.communicate(input_data.encode()) + + # Decode the output and print it + stdout_output = stdout_output.decode('utf-8') + stderr_output = stderr_output.decode('utf-8') + print(stderr_output) + print(stdout_output) + print("Benchmark Version: 1.0.0") + print("JSON ERROR COUNT:") + count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:") + print(f'{count_errors}/50 Human feedbacks') + + +# Run the test case. +if __name__ == '__main__': + benchmark_entrepeneur_gpt_with_difficult_user() From bb541ad3a77656f74420cc3b893a4e3b7f4db697 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:44:05 -0700 Subject: [PATCH 023/152] Update requirements.txt with new dependencies and move tweepy --- requirements.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1cdedec2..64c2e4c0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,10 @@ orjson Pillow selenium webdriver-manager +jsonschema +tweepy + +##Dev coverage flake8 numpy @@ -27,4 +31,3 @@ isort gitpython==3.1.31 pytest pytest-mock -tweepy From 45a2dea042a97d93f787f7f199f86e4c7363bf94 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 09:46:18 -0700 Subject: [PATCH 024/152] fixed flake8 --- autogpt/logs.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/autogpt/logs.py b/autogpt/logs.py index 53653023..f18e2140 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -75,7 +75,7 @@ class Logger(metaclass=Singleton): self.logger.setLevel(logging.DEBUG) def typewriter_log( - self, title="", title_color="", content="", speak_text=False, level=logging.INFO + self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and CFG.speak_mode: say_text(f"{title}. {content}") @@ -91,18 +91,18 @@ class Logger(metaclass=Singleton): ) def debug( - self, - message, - title="", - title_color="", + self, + message, + title="", + title_color="", ): self._log(title, title_color, message, logging.DEBUG) def warn( - self, - message, - title="", - title_color="", + self, + message, + title="", + title_color="", ): self._log(title, title_color, message, logging.WARN) @@ -176,10 +176,10 @@ class AutoGptFormatter(logging.Formatter): def format(self, record: LogRecord) -> str: if hasattr(record, "color"): record.title_color = ( - getattr(record, "color") - + getattr(record, "title") - + " " - + Style.RESET_ALL + getattr(record, "color") + + getattr(record, "title") + + " " + + Style.RESET_ALL ) else: record.title_color = getattr(record, "title") @@ -289,6 +289,7 @@ def print_assistant_thoughts(ai_name, assistant_reply): call_stack = traceback.format_exc() logger.error("Error: \n", call_stack) + def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None: assistant_thoughts_reasoning = None assistant_thoughts_plan = None From 3944f29addc1a2ea908e7ff8a78e36f21bd5c9db Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Sun, 16 Apr 2023 21:40:09 +0300 Subject: [PATCH 025/152] Fixed new backends not being added to supported memory --- README.md | 2 +- autogpt/memory/__init__.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 21f3ccf2..bfcd395c 100644 --- a/README.md +++ b/README.md @@ -195,7 +195,7 @@ python -m autogpt --help ```bash python -m autogpt --ai-settings ``` -* Specify one of 3 memory backends: `local`, `redis`, `pinecone` or `no_memory` +* Specify a memory backend ```bash python -m autogpt --use-memory ``` diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py index e2ee44a4..f5afb8c9 100644 --- a/autogpt/memory/__init__.py +++ b/autogpt/memory/__init__.py @@ -23,12 +23,16 @@ except ImportError: try: from autogpt.memory.weaviate import WeaviateMemory + + supported_memory.append("weaviate") except ImportError: # print("Weaviate not installed. Skipping import.") WeaviateMemory = None try: from autogpt.memory.milvus import MilvusMemory + + supported_memory.append("milvus") except ImportError: # print("pymilvus not installed. Skipping import.") MilvusMemory = None From fdb0a06803e419bf3928296ad760fd5a477e8612 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 11:36:51 -0700 Subject: [PATCH 026/152] fix conflict --- autogpt/json_fixes/bracket_termination.py | 45 ----------------------- autogpt/json_fixes/parsing.py | 9 ----- 2 files changed, 54 deletions(-) diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py index 731efeb1..dd9a8376 100644 --- a/autogpt/json_fixes/bracket_termination.py +++ b/autogpt/json_fixes/bracket_termination.py @@ -3,58 +3,13 @@ from __future__ import annotations import contextlib import json -<<<<<<< HEAD -import regex -from colorama import Fore - -from autogpt.logs import logger -======= from typing import Optional ->>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py) from autogpt.config import Config CFG = Config() -<<<<<<< HEAD -def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): - if CFG.speak_mode and CFG.debug_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API. " - "Trying to fix it now." - ) - logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") - - try: - json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") - json_match = json_pattern.search(json_string) - - if json_match: - # Extract the valid JSON object from the string - json_string = json_match.group(0) - logger.typewriter_log( - title="Apparently json was fixed.", title_color=Fore.GREEN - ) - if CFG.speak_mode and CFG.debug_mode: - say_text("Apparently json was fixed.") - else: - raise ValueError("No valid JSON object found") - - except (json.JSONDecodeError, ValueError): - if CFG.debug_mode: - logger.error(f"Error: Invalid JSON: {json_string}\n") - if CFG.speak_mode: - say_text("Didn't work. I will have to ignore this response then.") - logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") - json_string = {} - - return json_string - - -def balance_braces(json_string: str) -> str | None: -======= def balance_braces(json_string: str) -> Optional[str]: ->>>>>>> 67f32105 (Remove deprecated function from bracket_termination.py) """ Balance the braces in a JSON string. diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py index d3a51f43..1e391eed 100644 --- a/autogpt/json_fixes/parsing.py +++ b/autogpt/json_fixes/parsing.py @@ -3,14 +3,9 @@ from __future__ import annotations import contextlib import json -<<<<<<< HEAD -from typing import Any - -======= from typing import Any, Dict, Union from colorama import Fore from regex import regex ->>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location) from autogpt.config import Config from autogpt.json_fixes.auto_fix import fix_json from autogpt.json_fixes.bracket_termination import balance_braces @@ -115,11 +110,7 @@ def fix_and_parse_json( def try_ai_fix( try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str -<<<<<<< HEAD -) -> str | dict[Any, Any]: -======= ) -> Dict[Any, Any]: ->>>>>>> d3d8253b (Refactor parsing module and move JSON fix function to appropriate location) """Try to fix the JSON with the AI Args: From dc80a5a2ec6c7ceb2055894684ca7b680039a4c7 Mon Sep 17 00:00:00 2001 From: Jakub Bober Date: Sun, 16 Apr 2023 21:01:18 +0200 Subject: [PATCH 027/152] Add "Memory Backend Setup" subtitle Add the subtitle to match the Table of Contents --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 21f3ccf2..7fce2e8f 100644 --- a/README.md +++ b/README.md @@ -280,6 +280,8 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that * `milvus` will use the milvus cache that you configured * `weaviate` will use the weaviate cache that you configured +## Memory Backend Setup + ### Redis Setup > _**CAUTION**_ \ This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all From 9b6bce4592800f6436bd877daba135cfee6b8f7d Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Sun, 16 Apr 2023 22:10:48 +0300 Subject: [PATCH 028/152] Improve the error logging for OAI Issues --- autogpt/llm_utils.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 2075f934..25dbabd4 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -5,9 +5,10 @@ import time import openai from openai.error import APIError, RateLimitError -from colorama import Fore +from colorama import Fore, Style from autogpt.config import Config +from autogpt.logs import logger CFG = Config() @@ -70,6 +71,7 @@ def create_chat_completion( """ response = None num_retries = 10 + warned_user = False if CFG.debug_mode: print( Fore.GREEN @@ -101,6 +103,11 @@ def create_chat_completion( Fore.RED + "Error: ", f"Reached rate limit, passing..." + Fore.RESET, ) + if not warned_user: + logger.double_check( + f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " + + f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}") + warned_user = True except APIError as e: if e.http_status == 502: pass @@ -115,7 +122,17 @@ def create_chat_completion( ) time.sleep(backoff) if response is None: - raise RuntimeError(f"Failed to get response after {num_retries} retries") + logger.typewriter_log( + "FAILED TO GET RESPONSE FROM OPENAI", + Fore.RED, + "Auto-GPT has failed to get a response from OpenAI's services. " + + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`." + ) + logger.double_check() + if CFG.debug_mode: + raise RuntimeError(f"Failed to get response after {num_retries} retries") + else: + quit(1) return response.choices[0].message["content"] From 2d24876530f61a20e0c68fc449312fc84e142914 Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Sun, 16 Apr 2023 22:16:43 +0300 Subject: [PATCH 029/152] Fix linter issues --- autogpt/llm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 25dbabd4..3630108e 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -123,7 +123,7 @@ def create_chat_completion( time.sleep(backoff) if response is None: logger.typewriter_log( - "FAILED TO GET RESPONSE FROM OPENAI", + "FAILED TO GET RESPONSE FROM OPENAI", Fore.RED, "Auto-GPT has failed to get a response from OpenAI's services. " + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`." From 7b7d7c1d74b299966e607cf7dc6cf2cea64993ba Mon Sep 17 00:00:00 2001 From: Bates Jernigan Date: Sun, 16 Apr 2023 16:33:52 -0400 Subject: [PATCH 030/152] add space on warning message --- autogpt/memory/local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 6c7ee1b3..9b911eef 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -54,7 +54,7 @@ class LocalCache(MemoryProviderSingleton): self.data = CacheContent() else: print( - f"Warning: The file '{self.filename}' does not exist." + f"Warning: The file '{self.filename}' does not exist. " "Local memory would not be saved to a file." ) self.data = CacheContent() From 627533bed631a15504b3584bf2aa70fe7b23aa86 Mon Sep 17 00:00:00 2001 From: 0xArty Date: Sun, 16 Apr 2023 21:55:53 +0100 Subject: [PATCH 031/152] minimall add pytest (#1859) * minimall add pytest * updated docs and pytest command * proveted milvus integration test running if milvus is not installed --- .pre-commit-config.yaml | 8 +- README.md | 19 +++- requirements.txt | 7 ++ tests/integration/milvus_memory_tests.py | 71 ++++++++------- tests/local_cache_test.py | 35 +++++--- tests/milvus_memory_test.py | 109 ++++++++++++----------- tests/smoke_test.py | 82 ++++++++--------- tests/unit/test_commands.py | 34 +++---- 8 files changed, 208 insertions(+), 157 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fb75cd59..dd1d0ec9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,4 +30,10 @@ repos: language: python types: [ python ] exclude: .+/(dist|.venv|venv|build)/.+ - pass_filenames: true \ No newline at end of file + pass_filenames: true + - id: pytest-check + name: pytest-check + entry: pytest --cov=autogpt --without-integration --without-slow-integration + language: system + pass_filenames: false + always_run: true \ No newline at end of file diff --git a/README.md b/README.md index 58ed4d97..f60aa9ff 100644 --- a/README.md +++ b/README.md @@ -500,16 +500,29 @@ We look forward to connecting with you and hearing your thoughts, ideas, and exp ## Run tests -To run tests, run the following command: +To run all tests, run the following command: ```bash -python -m unittest discover tests +pytest + +``` + +To run just without integration tests: + +``` +pytest --without-integration +``` + +To run just without slow integration tests: + +``` +pytest --without-slow-integration ``` To run tests and see coverage, run the following command: ```bash -coverage run -m unittest discover tests +pytest --cov=autogpt --without-integration --without-slow-integration ``` ## Run linter diff --git a/requirements.txt b/requirements.txt index 64c2e4c0..843b66bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,5 +29,12 @@ black sourcery isort gitpython==3.1.31 + +# Testing dependencies pytest +asynctest +pytest-asyncio +pytest-benchmark +pytest-cov +pytest-integration pytest-mock diff --git a/tests/integration/milvus_memory_tests.py b/tests/integration/milvus_memory_tests.py index 96934cd6..ec38bf2f 100644 --- a/tests/integration/milvus_memory_tests.py +++ b/tests/integration/milvus_memory_tests.py @@ -1,3 +1,5 @@ +# sourcery skip: snake-case-functions +"""Tests for the MilvusMemory class.""" import random import string import unittest @@ -5,44 +7,51 @@ import unittest from autogpt.config import Config from autogpt.memory.milvus import MilvusMemory +try: -class TestMilvusMemory(unittest.TestCase): - def random_string(self, length): - return "".join(random.choice(string.ascii_letters) for _ in range(length)) + class TestMilvusMemory(unittest.TestCase): + """Tests for the MilvusMemory class.""" - def setUp(self): - cfg = Config() - cfg.milvus_addr = "localhost:19530" - self.memory = MilvusMemory(cfg) - self.memory.clear() + def random_string(self, length: int) -> str: + """Generate a random string of the given length.""" + return "".join(random.choice(string.ascii_letters) for _ in range(length)) - # Add example texts to the cache - self.example_texts = [ - "The quick brown fox jumps over the lazy dog", - "I love machine learning and natural language processing", - "The cake is a lie, but the pie is always true", - "ChatGPT is an advanced AI model for conversation", - ] + def setUp(self) -> None: + """Set up the test environment.""" + cfg = Config() + cfg.milvus_addr = "localhost:19530" + self.memory = MilvusMemory(cfg) + self.memory.clear() - for text in self.example_texts: - self.memory.add(text) + # Add example texts to the cache + self.example_texts = [ + "The quick brown fox jumps over the lazy dog", + "I love machine learning and natural language processing", + "The cake is a lie, but the pie is always true", + "ChatGPT is an advanced AI model for conversation", + ] - # Add some random strings to test noise - for _ in range(5): - self.memory.add(self.random_string(10)) + for text in self.example_texts: + self.memory.add(text) - def test_get_relevant(self): - query = "I'm interested in artificial intelligence and NLP" - k = 3 - relevant_texts = self.memory.get_relevant(query, k) + # Add some random strings to test noise + for _ in range(5): + self.memory.add(self.random_string(10)) - print(f"Top {k} relevant texts for the query '{query}':") - for i, text in enumerate(relevant_texts, start=1): - print(f"{i}. {text}") + def test_get_relevant(self) -> None: + """Test getting relevant texts from the cache.""" + query = "I'm interested in artificial intelligence and NLP" + num_relevant = 3 + relevant_texts = self.memory.get_relevant(query, num_relevant) - self.assertEqual(len(relevant_texts), k) - self.assertIn(self.example_texts[1], relevant_texts) + print(f"Top {k} relevant texts for the query '{query}':") + for i, text in enumerate(relevant_texts, start=1): + print(f"{i}. {text}") + self.assertEqual(len(relevant_texts), k) + self.assertIn(self.example_texts[1], relevant_texts) -if __name__ == "__main__": - unittest.main() +except: + print( + "Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed." + ) diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py index 91c922b0..fa596320 100644 --- a/tests/local_cache_test.py +++ b/tests/local_cache_test.py @@ -1,3 +1,5 @@ +# sourcery skip: snake-case-functions +"""Tests for LocalCache class""" import os import sys import unittest @@ -5,7 +7,8 @@ import unittest from autogpt.memory.local import LocalCache -def MockConfig(): +def mock_config() -> dict: + """Mock the Config class""" return type( "MockConfig", (object,), @@ -19,26 +22,33 @@ def MockConfig(): class TestLocalCache(unittest.TestCase): - def setUp(self): - self.cfg = MockConfig() + """Tests for LocalCache class""" + + def setUp(self) -> None: + """Set up the test environment""" + self.cfg = mock_config() self.cache = LocalCache(self.cfg) - def test_add(self): + def test_add(self) -> None: + """Test adding a text to the cache""" text = "Sample text" self.cache.add(text) self.assertIn(text, self.cache.data.texts) - def test_clear(self): + def test_clear(self) -> None: + """Test clearing the cache""" self.cache.clear() - self.assertEqual(self.cache.data, [""]) + self.assertEqual(self.cache.data.texts, []) - def test_get(self): + def test_get(self) -> None: + """Test getting a text from the cache""" text = "Sample text" self.cache.add(text) result = self.cache.get(text) self.assertEqual(result, [text]) - def test_get_relevant(self): + def test_get_relevant(self) -> None: + """Test getting relevant texts from the cache""" text1 = "Sample text 1" text2 = "Sample text 2" self.cache.add(text1) @@ -46,12 +56,9 @@ class TestLocalCache(unittest.TestCase): result = self.cache.get_relevant(text1, 1) self.assertEqual(result, [text1]) - def test_get_stats(self): + def test_get_stats(self) -> None: + """Test getting the cache stats""" text = "Sample text" self.cache.add(text) stats = self.cache.get_stats() - self.assertEqual(stats, (1, self.cache.data.embeddings.shape)) - - -if __name__ == "__main__": - unittest.main() + self.assertEqual(stats, (4, self.cache.data.embeddings.shape)) diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py index 0113fa1c..e0e2f7fc 100644 --- a/tests/milvus_memory_test.py +++ b/tests/milvus_memory_test.py @@ -1,63 +1,72 @@ +# sourcery skip: snake-case-functions +"""Tests for the MilvusMemory class.""" import os import sys import unittest -from autogpt.memory.milvus import MilvusMemory +try: + from autogpt.memory.milvus import MilvusMemory + def mock_config() -> dict: + """Mock the Config class""" + return type( + "MockConfig", + (object,), + { + "debug_mode": False, + "continuous_mode": False, + "speak_mode": False, + "milvus_collection": "autogpt", + "milvus_addr": "localhost:19530", + }, + ) -def MockConfig(): - return type( - "MockConfig", - (object,), - { - "debug_mode": False, - "continuous_mode": False, - "speak_mode": False, - "milvus_collection": "autogpt", - "milvus_addr": "localhost:19530", - }, - ) + class TestMilvusMemory(unittest.TestCase): + """Tests for the MilvusMemory class.""" + def setUp(self) -> None: + """Set up the test environment""" + self.cfg = MockConfig() + self.memory = MilvusMemory(self.cfg) -class TestMilvusMemory(unittest.TestCase): - def setUp(self): - self.cfg = MockConfig() - self.memory = MilvusMemory(self.cfg) + def test_add(self) -> None: + """Test adding a text to the cache""" + text = "Sample text" + self.memory.clear() + self.memory.add(text) + result = self.memory.get(text) + self.assertEqual([text], result) - def test_add(self): - text = "Sample text" - self.memory.clear() - self.memory.add(text) - result = self.memory.get(text) - self.assertEqual([text], result) + def test_clear(self) -> None: + """Test clearing the cache""" + self.memory.clear() + self.assertEqual(self.memory.collection.num_entities, 0) - def test_clear(self): - self.memory.clear() - self.assertEqual(self.memory.collection.num_entities, 0) + def test_get(self) -> None: + """Test getting a text from the cache""" + text = "Sample text" + self.memory.clear() + self.memory.add(text) + result = self.memory.get(text) + self.assertEqual(result, [text]) - def test_get(self): - text = "Sample text" - self.memory.clear() - self.memory.add(text) - result = self.memory.get(text) - self.assertEqual(result, [text]) + def test_get_relevant(self) -> None: + """Test getting relevant texts from the cache""" + text1 = "Sample text 1" + text2 = "Sample text 2" + self.memory.clear() + self.memory.add(text1) + self.memory.add(text2) + result = self.memory.get_relevant(text1, 1) + self.assertEqual(result, [text1]) - def test_get_relevant(self): - text1 = "Sample text 1" - text2 = "Sample text 2" - self.memory.clear() - self.memory.add(text1) - self.memory.add(text2) - result = self.memory.get_relevant(text1, 1) - self.assertEqual(result, [text1]) + def test_get_stats(self) -> None: + """Test getting the cache stats""" + text = "Sample text" + self.memory.clear() + self.memory.add(text) + stats = self.memory.get_stats() + self.assertEqual(15, len(stats)) - def test_get_stats(self): - text = "Sample text" - self.memory.clear() - self.memory.add(text) - stats = self.memory.get_stats() - self.assertEqual(15, len(stats)) - - -if __name__ == "__main__": - unittest.main() +except: + print("Milvus not installed, skipping tests") diff --git a/tests/smoke_test.py b/tests/smoke_test.py index 50e97b7b..1b9d643f 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -1,31 +1,34 @@ +"""Smoke test for the autogpt package.""" import os import subprocess import sys -import unittest + +import pytest from autogpt.commands.file_operations import delete_file, read_file -env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"} +@pytest.mark.integration_test +def test_write_file() -> None: + """ + Test case to check if the write_file command can successfully write 'Hello World' to a file + named 'hello_world.txt'. -class TestCommands(unittest.TestCase): - def test_write_file(self): - # Test case to check if the write_file command can successfully write 'Hello World' to a file - # named 'hello_world.txt'. + Read the current ai_settings.yaml file and store its content. + """ + env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"} + ai_settings = None + if os.path.exists("ai_settings.yaml"): + with open("ai_settings.yaml", "r") as f: + ai_settings = f.read() + os.remove("ai_settings.yaml") - # Read the current ai_settings.yaml file and store its content. - ai_settings = None - if os.path.exists("ai_settings.yaml"): - with open("ai_settings.yaml", "r") as f: - ai_settings = f.read() - os.remove("ai_settings.yaml") - - try: - if os.path.exists("hello_world.txt"): - # Clean up any existing 'hello_world.txt' file before testing. - delete_file("hello_world.txt") - # Prepare input data for the test. - input_data = """write_file-GPT + try: + if os.path.exists("hello_world.txt"): + # Clean up any existing 'hello_world.txt' file before testing. + delete_file("hello_world.txt") + # Prepare input data for the test. + input_data = """write_file-GPT an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task. Use the write_file command to write 'Hello World' into a file named "hello_world.txt". Use the task_complete command to complete the task. @@ -33,31 +36,24 @@ Do not use any other commands. y -5 EOF""" - command = f"{sys.executable} -m autogpt" + command = f"{sys.executable} -m autogpt" - # Execute the script with the input data. - process = subprocess.Popen( - command, - stdin=subprocess.PIPE, - shell=True, - env={**os.environ, **env_vars}, - ) - process.communicate(input_data.encode()) - - # Read the content of the 'hello_world.txt' file created during the test. - content = read_file("hello_world.txt") - finally: - if ai_settings: - # Restore the original ai_settings.yaml file. - with open("ai_settings.yaml", "w") as f: - f.write(ai_settings) - - # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'. - self.assertEqual( - content, "Hello World", f"Expected 'Hello World', got {content}" + # Execute the script with the input data. + process = subprocess.Popen( + command, + stdin=subprocess.PIPE, + shell=True, + env={**os.environ, **env_vars}, ) + process.communicate(input_data.encode()) + # Read the content of the 'hello_world.txt' file created during the test. + content = read_file("hello_world.txt") + finally: + if ai_settings: + # Restore the original ai_settings.yaml file. + with open("ai_settings.yaml", "w") as f: + f.write(ai_settings) -# Run the test case. -if __name__ == "__main__": - unittest.main() + # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'. + assert content == "Hello World", f"Expected 'Hello World', got {content}" diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index e15709aa..ecbac9b7 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -1,18 +1,22 @@ +"""Unit tests for the commands module""" +from unittest.mock import MagicMock, patch + +import pytest + import autogpt.agent.agent_manager as agent_manager -from autogpt.app import start_agent, list_agents, execute_command -import unittest -from unittest.mock import patch, MagicMock +from autogpt.app import execute_command, list_agents, start_agent -class TestCommands(unittest.TestCase): - def test_make_agent(self): - with patch("openai.ChatCompletion.create") as mock: - obj = MagicMock() - obj.response.choices[0].messages[0].content = "Test message" - mock.return_value = obj - start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2") - agents = list_agents() - self.assertEqual("List of agents:\n0: chat", agents) - start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2") - agents = list_agents() - self.assertEqual("List of agents:\n0: chat\n1: write", agents) +@pytest.mark.integration_test +def test_make_agent() -> None: + """Test the make_agent command""" + with patch("openai.ChatCompletion.create") as mock: + obj = MagicMock() + obj.response.choices[0].messages[0].content = "Test message" + mock.return_value = obj + start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2") + agents = list_agents() + assert "List of agents:\n0: chat" == agents + start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2") + agents = list_agents() + assert "List of agents:\n0: chat\n1: write" == agents From 4269326ddfd81227e78b0745093f52e4ac1ba078 Mon Sep 17 00:00:00 2001 From: 0xf333 <0x333@tuta.io> Date: Sun, 16 Apr 2023 17:03:18 -0400 Subject: [PATCH 032/152] Fix: Update run_continuous.sh to pass all command-line arguments Description: - Modified `run_continuous.sh` to include the `--continuous` flag directly in the command: - Removed the unused `argument` variable. - Added the `--continuous` flag to the `./run.sh` command. - Ensured all command-line arguments are passed through to `run.sh` and the `autogpt` module. This change improves the usability of the `run_continuous.sh` script by allowing users to provide additional command-line arguments along with the `--continuous` flag. It ensures that all arguments are properly passed to the `run.sh` script and eventually to the `autogpt` module, preventing confusion and providing more flexible usage. Suggestion from: https://github.com/Significant-Gravitas/Auto-GPT/pull/1941#discussion_r1167977442 --- run_continuous.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run_continuous.sh b/run_continuous.sh index 14c9cfd2..43034f8e 100755 --- a/run_continuous.sh +++ b/run_continuous.sh @@ -1,3 +1,3 @@ #!/bin/bash -argument="--continuous" -./run.sh "$argument" + +./run.sh --continuous "$@" From 147d3733bf068d8c71a901b8a0e31cfda5c4a687 Mon Sep 17 00:00:00 2001 From: 0xArty Date: Sun, 16 Apr 2023 16:03:22 +0100 Subject: [PATCH 033/152] Change ci to pytest --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 366aaf67..39f3aea9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: - name: Run unittest tests with coverage run: | - coverage run --source=autogpt -m unittest discover tests + pytest --cov=autogpt --without-integration --without-slow-integration - name: Generate coverage report run: | From 955a5b0a4357802a8142585ad78105f6342738ad Mon Sep 17 00:00:00 2001 From: 0xArty Date: Sun, 16 Apr 2023 16:13:16 +0100 Subject: [PATCH 034/152] Marked local chache tests as integration tests as they require api keys --- tests/local_cache_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py index fa596320..bb108626 100644 --- a/tests/local_cache_test.py +++ b/tests/local_cache_test.py @@ -4,6 +4,8 @@ import os import sys import unittest +import pytest + from autogpt.memory.local import LocalCache @@ -21,6 +23,7 @@ def mock_config() -> dict: ) +@pytest.mark.integration_test class TestLocalCache(unittest.TestCase): """Tests for LocalCache class""" From 5ff7fc340b908281c6eb976358947e87f289c0f7 Mon Sep 17 00:00:00 2001 From: endolith Date: Sun, 16 Apr 2023 08:47:11 -0400 Subject: [PATCH 035/152] Remove extraneous noqa E722 comment E722 is "Do not use bare except, specify exception instead" but except json.JSONDecodeError is not a bare except --- autogpt/json_fixes/auto_fix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py index 9fcf909a..0d3bd73c 100644 --- a/autogpt/json_fixes/auto_fix.py +++ b/autogpt/json_fixes/auto_fix.py @@ -45,7 +45,7 @@ def fix_json(json_string: str, schema: str) -> str: try: json.loads(result_string) # just check the validity return result_string - except json.JSONDecodeError: # noqa: E722 + except json.JSONDecodeError: # Get the call stack: # import traceback # call_stack = traceback.format_exc() From 8f0d553e4eaed9757f87ec33ec202cc7e570d8d5 Mon Sep 17 00:00:00 2001 From: Benedict Hobart Date: Sun, 16 Apr 2023 15:45:38 +0000 Subject: [PATCH 036/152] Improve dev containers so autogpt can browse the web --- .devcontainer/Dockerfile | 7 ++++++- .devcontainer/devcontainer.json | 1 + autogpt/commands/web_selenium.py | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index f3b2e2db..379f6310 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,6 +1,6 @@ # [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster ARG VARIANT=3-bullseye -FROM python:3.8 +FROM --platform=linux/amd64 python:3.8 RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ # Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131 @@ -10,6 +10,11 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ # They are installed by the base image (python) which does not have the patch. RUN python3 -m pip install --upgrade setuptools +# Install Chrome for web browsing +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && curl -sSL https://dl.google.com/linux/direct/google-chrome-stable_current_$(dpkg --print-architecture).deb -o /tmp/chrome.deb \ + && apt-get -y install /tmp/chrome.deb + # [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. # COPY requirements.txt /tmp/pip-tmp/ # RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 5fefd9c1..f26810fb 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -11,6 +11,7 @@ "userGid": "1000", "upgradePackages": "true" }, + "ghcr.io/devcontainers/features/desktop-lite:1": {}, "ghcr.io/devcontainers/features/python:1": "none", "ghcr.io/devcontainers/features/node:1": "none", "ghcr.io/devcontainers/features/git:1": { diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 1d078d76..8c652294 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -75,6 +75,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari driver = webdriver.Safari(options=options) else: + options.add_argument("--no-sandbox") driver = webdriver.Chrome( executable_path=ChromeDriverManager().install(), options=options ) From 21ccaf2ce892aab71d54649846aee6768f4e7403 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 037/152] Refactor variable names and remove unnecessary blank lines in __main__.py --- autogpt/__main__.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/autogpt/__main__.py b/autogpt/__main__.py index 29ccddbf..7fe6aec3 100644 --- a/autogpt/__main__.py +++ b/autogpt/__main__.py @@ -3,13 +3,10 @@ import logging from colorama import Fore from autogpt.agent.agent import Agent from autogpt.args import parse_arguments - from autogpt.config import Config, check_openai_api_key from autogpt.logs import logger from autogpt.memory import get_memory - from autogpt.prompt import construct_prompt - # Load environment variables from .env file @@ -21,13 +18,13 @@ def main() -> None: parse_arguments() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = "" - prompt = construct_prompt() + master_prompt = construct_prompt() # print(prompt) # Initialize variables full_message_history = [] next_action_count = 0 # Make a constant: - user_input = ( + triggering_prompt = ( "Determine which next command to use, and respond using the" " format specified above:" ) @@ -43,8 +40,8 @@ def main() -> None: memory=memory, full_message_history=full_message_history, next_action_count=next_action_count, - prompt=prompt, - user_input=user_input, + master_prompt=master_prompt, + triggering_prompt=triggering_prompt, ) agent.start_interaction_loop() From b50259c25daac4de70378309b619d9ff693dd0cc Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 038/152] Update variable names, improve comments, and modify input handling in agent.py --- autogpt/agent/agent.py | 43 +++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 32d982e5..3be17a89 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -19,9 +19,18 @@ class Agent: memory: The memory object to use. full_message_history: The full message history. next_action_count: The number of actions to execute. - prompt: The prompt to use. - user_input: The user input. + master_prompt: The master prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. + Currently, the dynamic and customizable information in the master prompt are ai_name, description and goals. + triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: + Determine which next command to use, and respond using the format specified above: + The triggering prompt is not part of the master prompt because between the master prompt and the triggering + prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. + MASTER PROMPT + CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) + TRIGGERING PROMPT + + The triggering prompt reminds the AI about its short term meta task (defining the next task) """ def __init__( @@ -30,15 +39,15 @@ class Agent: memory, full_message_history, next_action_count, - prompt, - user_input, + master_prompt, + triggering_prompt, ): self.ai_name = ai_name self.memory = memory self.full_message_history = full_message_history self.next_action_count = next_action_count - self.prompt = prompt - self.user_input = user_input + self.master_prompt = master_prompt + self.triggering_prompt = triggering_prompt def start_interaction_loop(self): # Interaction Loop @@ -62,8 +71,8 @@ class Agent: # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( - self.prompt, - self.user_input, + self.master_prompt, + self.triggering_prompt, self.full_message_history, self.memory, cfg.fast_token_limit, @@ -88,7 +97,7 @@ class Agent: ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit - self.user_input = "" + user_input = "" logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, @@ -106,14 +115,14 @@ class Agent: Fore.MAGENTA + "Input:" + Style.RESET_ALL ) if console_input.lower().rstrip() == "y": - self.user_input = "GENERATE NEXT COMMAND JSON" + user_input = "GENERATE NEXT COMMAND JSON" break elif console_input.lower().startswith("y -"): try: self.next_action_count = abs( int(console_input.split(" ")[1]) ) - self.user_input = "GENERATE NEXT COMMAND JSON" + user_input = "GENERATE NEXT COMMAND JSON" except ValueError: print( "Invalid input format. Please enter 'y -n' where n is" @@ -122,20 +131,20 @@ class Agent: continue break elif console_input.lower() == "n": - self.user_input = "EXIT" + user_input = "EXIT" break else: - self.user_input = console_input + user_input = console_input command_name = "human_feedback" break - if self.user_input == "GENERATE NEXT COMMAND JSON": + if user_input == "GENERATE NEXT COMMAND JSON": logger.typewriter_log( "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", Fore.MAGENTA, "", ) - elif self.user_input == "EXIT": + elif user_input == "EXIT": print("Exiting...", flush=True) break else: @@ -153,7 +162,7 @@ class Agent: f"Command {command_name} threw the following error: {arguments}" ) elif command_name == "human_feedback": - result = f"Human feedback: {self.user_input}" + result = f"Human feedback: {user_input}" else: result = ( f"Command {command_name} returned: " @@ -165,7 +174,7 @@ class Agent: memory_to_add = ( f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " - f"\nHuman Feedback: {self.user_input} " + f"\nHuman Feedback: {user_input} " ) self.memory.add(memory_to_add) From b5e0127b16bb88f6b6e18ada0efabc1422c9f3de Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 039/152] Only print JSON object validation message in debug mode --- autogpt/json_validation/validate_json.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py index 127fcc17..440c3b0b 100644 --- a/autogpt/json_validation/validate_json.py +++ b/autogpt/json_validation/validate_json.py @@ -24,7 +24,7 @@ def validate_json(json_object: object, schema_name: object) -> object: for error in errors: logger.error(f"Error: {error.message}") - else: + elif CFG.debug_mode: print("The JSON object is valid.") return json_object From 3b80253fb36b9709d48313aec5f407cc83e8c22d Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 040/152] Update process creation in benchmark script --- benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py index d6cae972..f7f1dac9 100644 --- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py +++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py @@ -73,9 +73,12 @@ Disappointing suggestion. Not helpful. Needs improvement. Not what I need.''' + # TODO: add questions above, to distract it even more. + command = f'{sys.executable} -m autogpt' - process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=True) stdout_output, stderr_output = process.communicate(input_data.encode()) From 89e0e8992795accfc41183723064dcdab9719f8e Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:22:58 -0700 Subject: [PATCH 041/152] change master prompt to system prompt --- autogpt/__main__.py | 4 ++-- autogpt/agent/agent.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/autogpt/__main__.py b/autogpt/__main__.py index 7fe6aec3..5f462234 100644 --- a/autogpt/__main__.py +++ b/autogpt/__main__.py @@ -18,7 +18,7 @@ def main() -> None: parse_arguments() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = "" - master_prompt = construct_prompt() + system_prompt = construct_prompt() # print(prompt) # Initialize variables full_message_history = [] @@ -40,7 +40,7 @@ def main() -> None: memory=memory, full_message_history=full_message_history, next_action_count=next_action_count, - master_prompt=master_prompt, + system_prompt=system_prompt, triggering_prompt=triggering_prompt, ) agent.start_interaction_loop() diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 3be17a89..9853f6a0 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -19,14 +19,14 @@ class Agent: memory: The memory object to use. full_message_history: The full message history. next_action_count: The number of actions to execute. - master_prompt: The master prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. - Currently, the dynamic and customizable information in the master prompt are ai_name, description and goals. + system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. + Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals. triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: Determine which next command to use, and respond using the format specified above: - The triggering prompt is not part of the master prompt because between the master prompt and the triggering + The triggering prompt is not part of the system prompt because between the system prompt and the triggering prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. - MASTER PROMPT + SYSTEM PROMPT CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) TRIGGERING PROMPT @@ -39,14 +39,14 @@ class Agent: memory, full_message_history, next_action_count, - master_prompt, + system_prompt, triggering_prompt, ): self.ai_name = ai_name self.memory = memory self.full_message_history = full_message_history self.next_action_count = next_action_count - self.master_prompt = master_prompt + self.system_prompt = system_prompt self.triggering_prompt = triggering_prompt def start_interaction_loop(self): @@ -71,7 +71,7 @@ class Agent: # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( - self.master_prompt, + self.system_prompt, self.triggering_prompt, self.full_message_history, self.memory, From 4f33e1bf89e580355dfcf6890779799c584e9563 Mon Sep 17 00:00:00 2001 From: k-boikov Date: Sun, 16 Apr 2023 18:38:08 +0300 Subject: [PATCH 042/152] add utf-8 encoding to file handlers for logging --- autogpt/logs.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/autogpt/logs.py b/autogpt/logs.py index f18e2140..c1e436db 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -46,7 +46,9 @@ class Logger(metaclass=Singleton): self.console_handler.setFormatter(console_formatter) # Info handler in activity.log - self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file)) + self.file_handler = logging.FileHandler( + os.path.join(log_dir, log_file), 'a', 'utf-8' + ) self.file_handler.setLevel(logging.DEBUG) info_formatter = AutoGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" @@ -54,7 +56,9 @@ class Logger(metaclass=Singleton): self.file_handler.setFormatter(info_formatter) # Error handler error.log - error_handler = logging.FileHandler(os.path.join(log_dir, error_file)) + error_handler = logging.FileHandler( + os.path.join(log_dir, error_file), 'a', 'utf-8' + ) error_handler.setLevel(logging.ERROR) error_formatter = AutoGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" From 4eb8e7823d63ff4f8d67b8927da842ea7ab3ab21 Mon Sep 17 00:00:00 2001 From: 0xf333 <0x333@tuta.io> Date: Sun, 16 Apr 2023 18:07:41 -0400 Subject: [PATCH 043/152] Fix: Remove quotes around $@ in run_continuous.sh Description: Per maintainer's request, removed quotes around `$@` in `run_continuous.sh`. This change allows the script to forward arguments as is. Please note that this modification might cause issues if any of the command-line arguments contain spaces or special characters. However, this update aligns with the preferred format for the repository. Suggestion from: https://github.com/Significant-Gravitas/Auto-GPT/pull/1941#discussion_r1168035557 --- run_continuous.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_continuous.sh b/run_continuous.sh index 43034f8e..1f4436c8 100755 --- a/run_continuous.sh +++ b/run_continuous.sh @@ -1,3 +1,3 @@ #!/bin/bash -./run.sh --continuous "$@" +./run.sh --continuous $@ From 1513be4acdcc85b27869219938ed90610a7db673 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 15:31:53 -0700 Subject: [PATCH 044/152] hotfix user input --- autogpt/agent/agent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 9853f6a0..dca614c7 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -55,6 +55,8 @@ class Agent: loop_count = 0 command_name = None arguments = None + user_input = "" + while True: # Discontinue if continuous limit is reached loop_count += 1 @@ -97,7 +99,6 @@ class Agent: ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit - user_input = "" logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, From c71c61dc584a41d72e2b27b02fe75a9f64e3e029 Mon Sep 17 00:00:00 2001 From: Adrian Scott Date: Sun, 16 Apr 2023 18:14:16 -0500 Subject: [PATCH 045/152] Added one space after period for better formatting --- autogpt/memory/local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 6c7ee1b3..9b911eef 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -54,7 +54,7 @@ class LocalCache(MemoryProviderSingleton): self.data = CacheContent() else: print( - f"Warning: The file '{self.filename}' does not exist." + f"Warning: The file '{self.filename}' does not exist. " "Local memory would not be saved to a file." ) self.data = CacheContent() From 15059c2090be47d2a674113f509618b3f58a3510 Mon Sep 17 00:00:00 2001 From: Chris Cheney Date: Sun, 16 Apr 2023 17:28:25 -0500 Subject: [PATCH 046/152] ensure git operations occur in the working directory --- autogpt/commands/git_operations.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 3ff35cf3..675eb228 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -1,6 +1,7 @@ """Git operations for autogpt""" import git from autogpt.config import Config +from autogpt.workspace import path_in_workspace CFG = Config() @@ -16,8 +17,9 @@ def clone_repository(repo_url: str, clone_path: str) -> str: str: The result of the clone operation""" split_url = repo_url.split("//") auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) + safe_clone_path = path_in_workspace(clone_path) try: - git.Repo.clone_from(auth_repo_url, clone_path) - return f"""Cloned {repo_url} to {clone_path}""" + git.Repo.clone_from(auth_repo_url, safe_clone_path) + return f"""Cloned {repo_url} to {safe_clone_path}""" except Exception as e: return f"Error: {str(e)}" From 7a32e03bd537b3c2b98b9f55b2962a37a0c046c6 Mon Sep 17 00:00:00 2001 From: bingokon Date: Mon, 17 Apr 2023 00:48:53 +0100 Subject: [PATCH 047/152] refactoring the all json utilities --- autogpt/agent/agent.py | 4 +- autogpt/app.py | 2 +- autogpt/json_fixes/auto_fix.py | 53 ------ autogpt/json_fixes/bracket_termination.py | 36 ---- autogpt/json_fixes/escaping.py | 33 ---- autogpt/json_fixes/master_json_fix_method.py | 28 --- autogpt/json_fixes/missing_quotes.py | 27 --- autogpt/json_fixes/utilities.py | 20 --- .../{json_fixes => json_utils}/__init__.py | 0 .../parsing.py => json_utils/auto_fix.py} | 170 ++++++++++++++++-- .../llm_response_format_1.json | 0 .../utilities.py} | 23 ++- autogpt/logs.py | 5 +- tests/test_json_parser.py | 2 +- tests/unit/json_tests.py | 2 +- 15 files changed, 188 insertions(+), 217 deletions(-) delete mode 100644 autogpt/json_fixes/auto_fix.py delete mode 100644 autogpt/json_fixes/bracket_termination.py delete mode 100644 autogpt/json_fixes/escaping.py delete mode 100644 autogpt/json_fixes/master_json_fix_method.py delete mode 100644 autogpt/json_fixes/missing_quotes.py delete mode 100644 autogpt/json_fixes/utilities.py rename autogpt/{json_fixes => json_utils}/__init__.py (100%) rename autogpt/{json_fixes/parsing.py => json_utils/auto_fix.py} (51%) rename autogpt/{json_schemas => json_utils}/llm_response_format_1.json (100%) rename autogpt/{json_validation/validate_json.py => json_utils/utilities.py} (63%) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index dca614c7..6ec0a623 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -3,8 +3,8 @@ from autogpt.app import execute_command, get_command from autogpt.chat import chat_with_ai, create_chat_message from autogpt.config import Config -from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques -from autogpt.json_validation.validate_json import validate_json +from autogpt.json_utils.auto_fix import fix_json_using_multiple_techniques +from autogpt.json_utils.utilities import validate_json from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner diff --git a/autogpt/app.py b/autogpt/app.py index 78b5bd2f..190f934b 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -18,7 +18,7 @@ from autogpt.commands.file_operations import ( search_files, write_to_file, ) -from autogpt.json_fixes.parsing import fix_and_parse_json +from autogpt.json_utils.auto_fix import fix_and_parse_json from autogpt.memory import get_memory from autogpt.processing.text import summarize_text from autogpt.speech import say_text diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py deleted file mode 100644 index 9fcf909a..00000000 --- a/autogpt/json_fixes/auto_fix.py +++ /dev/null @@ -1,53 +0,0 @@ -"""This module contains the function to fix JSON strings using GPT-3.""" -import json - -from autogpt.llm_utils import call_ai_function -from autogpt.logs import logger -from autogpt.config import Config - -CFG = Config() - - -def fix_json(json_string: str, schema: str) -> str: - """Fix the given JSON string to make it parseable and fully compliant with - the provided schema. - - Args: - json_string (str): The JSON string to fix. - schema (str): The schema to use to fix the JSON. - Returns: - str: The fixed JSON string. - """ - # Try to fix the JSON using GPT: - function_string = "def fix_json(json_string: str, schema:str=None) -> str:" - args = [f"'''{json_string}'''", f"'''{schema}'''"] - description_string = ( - "This function takes a JSON string and ensures that it" - " is parseable and fully compliant with the provided schema. If an object" - " or field specified in the schema isn't contained within the correct JSON," - " it is omitted. The function also escapes any double quotes within JSON" - " string values to ensure that they are valid. If the JSON string contains" - " any None or NaN values, they are replaced with null before being parsed." - ) - - # If it doesn't already start with a "`", add one: - if not json_string.startswith("`"): - json_string = "```json\n" + json_string + "\n```" - result_string = call_ai_function( - function_string, args, description_string, model=CFG.fast_llm_model - ) - logger.debug("------------ JSON FIX ATTEMPT ---------------") - logger.debug(f"Original JSON: {json_string}") - logger.debug("-----------") - logger.debug(f"Fixed JSON: {result_string}") - logger.debug("----------- END OF FIX ATTEMPT ----------------") - - try: - json.loads(result_string) # just check the validity - return result_string - except json.JSONDecodeError: # noqa: E722 - # Get the call stack: - # import traceback - # call_stack = traceback.format_exc() - # print(f"Failed to fix JSON: '{json_string}' "+call_stack) - return "failed" diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py deleted file mode 100644 index dd9a8376..00000000 --- a/autogpt/json_fixes/bracket_termination.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Fix JSON brackets.""" -from __future__ import annotations - -import contextlib -import json -from typing import Optional -from autogpt.config import Config - -CFG = Config() - - -def balance_braces(json_string: str) -> Optional[str]: - """ - Balance the braces in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with braces balanced. - """ - - open_braces_count = json_string.count("{") - close_braces_count = json_string.count("}") - - while open_braces_count > close_braces_count: - json_string += "}" - close_braces_count += 1 - - while close_braces_count > open_braces_count: - json_string = json_string.rstrip("}") - close_braces_count -= 1 - - with contextlib.suppress(json.JSONDecodeError): - json.loads(json_string) - return json_string diff --git a/autogpt/json_fixes/escaping.py b/autogpt/json_fixes/escaping.py deleted file mode 100644 index 68eb1714..00000000 --- a/autogpt/json_fixes/escaping.py +++ /dev/null @@ -1,33 +0,0 @@ -""" Fix invalid escape sequences in JSON strings. """ -import json - -from autogpt.config import Config -from autogpt.json_fixes.utilities import extract_char_position - -CFG = Config() - - -def fix_invalid_escape(json_to_load: str, error_message: str) -> str: - """Fix invalid escape sequences in JSON strings. - - Args: - json_to_load (str): The JSON string. - error_message (str): The error message from the JSONDecodeError - exception. - - Returns: - str: The JSON string with invalid escape sequences fixed. - """ - while error_message.startswith("Invalid \\escape"): - bad_escape_location = extract_char_position(error_message) - json_to_load = ( - json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :] - ) - try: - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - fix invalid escape", e) - error_message = str(e) - return json_to_load diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py deleted file mode 100644 index 7a2cf3cc..00000000 --- a/autogpt/json_fixes/master_json_fix_method.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Any, Dict - -from autogpt.config import Config -from autogpt.logs import logger -from autogpt.speech import say_text -CFG = Config() - - -def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: - from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets - - from autogpt.json_fixes.parsing import fix_and_parse_json - - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - if assistant_reply_json == {}: - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - - if assistant_reply_json != {}: - return assistant_reply_json - - logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply) - if CFG.speak_mode: - say_text("I have received an invalid JSON response from the OpenAI API.") - - return {} diff --git a/autogpt/json_fixes/missing_quotes.py b/autogpt/json_fixes/missing_quotes.py deleted file mode 100644 index 552a1517..00000000 --- a/autogpt/json_fixes/missing_quotes.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Fix quotes in a JSON string.""" -import json -import re - - -def add_quotes_to_property_names(json_string: str) -> str: - """ - Add quotes to property names in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with quotes added to property names. - """ - - def replace_func(match: re.Match) -> str: - return f'"{match[1]}":' - - property_name_pattern = re.compile(r"(\w+):") - corrected_json_string = property_name_pattern.sub(replace_func, json_string) - - try: - json.loads(corrected_json_string) - return corrected_json_string - except json.JSONDecodeError as e: - raise e diff --git a/autogpt/json_fixes/utilities.py b/autogpt/json_fixes/utilities.py deleted file mode 100644 index 0852b18a..00000000 --- a/autogpt/json_fixes/utilities.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Utilities for the json_fixes package.""" -import re - - -def extract_char_position(error_message: str) -> int: - """Extract the character position from the JSONDecodeError message. - - Args: - error_message (str): The error message from the JSONDecodeError - exception. - - Returns: - int: The character position. - """ - - char_pattern = re.compile(r"\(char (\d+)\)") - if match := char_pattern.search(error_message): - return int(match[1]) - else: - raise ValueError("Character position not found in the error message.") diff --git a/autogpt/json_fixes/__init__.py b/autogpt/json_utils/__init__.py similarity index 100% rename from autogpt/json_fixes/__init__.py rename to autogpt/json_utils/__init__.py diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_utils/auto_fix.py similarity index 51% rename from autogpt/json_fixes/parsing.py rename to autogpt/json_utils/auto_fix.py index 1e391eed..883eba78 100644 --- a/autogpt/json_fixes/parsing.py +++ b/autogpt/json_utils/auto_fix.py @@ -1,20 +1,19 @@ -"""Fix and parse JSON strings.""" +"""This module contains the function to fix JSON strings""" from __future__ import annotations import contextlib import json -from typing import Any, Dict, Union +import re +from typing import Optional, Dict, Any + from colorama import Fore from regex import regex -from autogpt.config import Config -from autogpt.json_fixes.auto_fix import fix_json -from autogpt.json_fixes.bracket_termination import balance_braces -from autogpt.json_fixes.escaping import fix_invalid_escape -from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names + +from autogpt.json_utils.utilities import extract_char_position +from autogpt.llm_utils import call_ai_function from autogpt.logs import logger from autogpt.speech import say_text - -CFG = Config() +from autogpt.config import Config JSON_SCHEMA = """ { @@ -35,6 +34,157 @@ JSON_SCHEMA = """ } """ +CFG = Config() + + +def auto_fix_json(json_string: str, schema: str) -> str: + """Fix the given JSON string to make it parseable and fully compliant with + the provided schema using GPT-3. + + Args: + json_string (str): The JSON string to fix. + schema (str): The schema to use to fix the JSON. + Returns: + str: The fixed JSON string. + """ + # Try to fix the JSON using GPT: + function_string = "def fix_json(json_string: str, schema:str=None) -> str:" + args = [f"'''{json_string}'''", f"'''{schema}'''"] + description_string = ( + "This function takes a JSON string and ensures that it" + " is parseable and fully compliant with the provided schema. If an object" + " or field specified in the schema isn't contained within the correct JSON," + " it is omitted. The function also escapes any double quotes within JSON" + " string values to ensure that they are valid. If the JSON string contains" + " any None or NaN values, they are replaced with null before being parsed." + ) + + # If it doesn't already start with a "`", add one: + if not json_string.startswith("`"): + json_string = "```json\n" + json_string + "\n```" + result_string = call_ai_function( + function_string, args, description_string, model=CFG.fast_llm_model + ) + logger.debug("------------ JSON FIX ATTEMPT ---------------") + logger.debug(f"Original JSON: {json_string}") + logger.debug("-----------") + logger.debug(f"Fixed JSON: {result_string}") + logger.debug("----------- END OF FIX ATTEMPT ----------------") + + try: + json.loads(result_string) # just check the validity + return result_string + except json.JSONDecodeError: # noqa: E722 + # Get the call stack: + # import traceback + # call_stack = traceback.format_exc() + # print(f"Failed to fix JSON: '{json_string}' "+call_stack) + return "failed" + + +def fix_invalid_escape(json_to_load: str, error_message: str) -> str: + """Fix invalid escape sequences in JSON strings. + + Args: + json_to_load (str): The JSON string. + error_message (str): The error message from the JSONDecodeError + exception. + + Returns: + str: The JSON string with invalid escape sequences fixed. + """ + while error_message.startswith("Invalid \\escape"): + bad_escape_location = extract_char_position(error_message) + json_to_load = ( + json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1:] + ) + try: + json.loads(json_to_load) + return json_to_load + except json.JSONDecodeError as e: + if CFG.debug_mode: + print("json loads error - fix invalid escape", e) + error_message = str(e) + return json_to_load + + +def balance_braces(json_string: str) -> Optional[str]: + """ + Balance the braces in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with braces balanced. + """ + + open_braces_count = json_string.count("{") + close_braces_count = json_string.count("}") + + while open_braces_count > close_braces_count: + json_string += "}" + close_braces_count += 1 + + while close_braces_count > open_braces_count: + json_string = json_string.rstrip("}") + close_braces_count -= 1 + + with contextlib.suppress(json.JSONDecodeError): + json.loads(json_string) + return json_string + + +def add_quotes_to_property_names(json_string: str) -> str: + """ + Add quotes to property names in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with quotes added to property names. + """ + + def replace_func(match: re.Match) -> str: + return f'"{match[1]}":' + + property_name_pattern = re.compile(r"(\w+):") + corrected_json_string = property_name_pattern.sub(replace_func, json_string) + + try: + json.loads(corrected_json_string) + return corrected_json_string + except json.JSONDecodeError as e: + raise e + + +def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: + """Fix the given JSON string to make it parseable and fully compliant with two techniques. + + Args: + json_string (str): The JSON string to fix. + + Returns: + str: The fixed JSON string. + """ + + # Parse and print Assistant response + assistant_reply_json = fix_and_parse_json(assistant_reply) + if assistant_reply_json == {}: + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( + assistant_reply + ) + + if assistant_reply_json != {}: + return assistant_reply_json + + logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply) + if CFG.speak_mode: + say_text("I have received an invalid JSON response from the OpenAI API.") + + return {} + def correct_json(json_to_load: str) -> str: """ @@ -134,7 +284,7 @@ def try_ai_fix( " slightly." ) # Now try to fix this up using the ai_functions - ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA) + ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA) if ai_fixed_json != "failed": return json.loads(ai_fixed_json) diff --git a/autogpt/json_schemas/llm_response_format_1.json b/autogpt/json_utils/llm_response_format_1.json similarity index 100% rename from autogpt/json_schemas/llm_response_format_1.json rename to autogpt/json_utils/llm_response_format_1.json diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_utils/utilities.py similarity index 63% rename from autogpt/json_validation/validate_json.py rename to autogpt/json_utils/utilities.py index 440c3b0b..af8a28c9 100644 --- a/autogpt/json_validation/validate_json.py +++ b/autogpt/json_utils/utilities.py @@ -1,10 +1,31 @@ +"""Utilities for the json_fixes package.""" import json +import re + from jsonschema import Draft7Validator -from autogpt.config import Config + from autogpt.logs import logger +from autogpt.config import Config CFG = Config() +def extract_char_position(error_message: str) -> int: + """Extract the character position from the JSONDecodeError message. + + Args: + error_message (str): The error message from the JSONDecodeError + exception. + + Returns: + int: The character position. + """ + + char_pattern = re.compile(r"\(char (\d+)\)") + if match := char_pattern.search(error_message): + return int(match[1]) + else: + raise ValueError("Character position not found in the error message.") + def validate_json(json_object: object, schema_name: object) -> object: """ diff --git a/autogpt/logs.py b/autogpt/logs.py index c1e436db..58375f14 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -204,10 +204,7 @@ logger = Logger() def print_assistant_thoughts(ai_name, assistant_reply): """Prints the assistant's thoughts to the console""" - from autogpt.json_fixes.bracket_termination import ( - attempt_to_fix_json_by_finding_outermost_brackets, - ) - from autogpt.json_fixes.parsing import fix_and_parse_json + from autogpt.json_utils.auto_fix import fix_and_parse_json, attempt_to_fix_json_by_finding_outermost_brackets try: try: diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index 2862034b..f8fa5955 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -1,7 +1,7 @@ import unittest import tests.context -from autogpt.json_fixes.parsing import fix_and_parse_json +from autogpt.json_utils.auto_fix import fix_and_parse_json class TestParseJson(unittest.TestCase): diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index 561b8a38..f65a6f6a 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -1,6 +1,6 @@ import unittest -from autogpt.json_parser import fix_and_parse_json +from autogpt.json_utils.auto_fix import fix_and_parse_json class TestParseJson(unittest.TestCase): From 56ecbeeef734019d1d3112fc2a657a14bff69ccb Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 17 Apr 2023 02:22:18 +0200 Subject: [PATCH 048/152] Clean up README --- README.md | 227 ++++++++++++++++++++++++------------------------------ 1 file changed, 102 insertions(+), 125 deletions(-) diff --git a/README.md b/README.md index f60aa9ff..07a088e3 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,19 @@ # Auto-GPT: An Autonomous GPT-4 Experiment +[![Unit Tests](https://img.shields.io/github/actions/workflow/status/Significant-Gravitas/Auto-GPT/ci.yml?label=unit%20tests)](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml) +[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) +[![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/auto-gpt?style=social)](https://github.com/Significant-Gravitas/Auto-GPT/stargazers) +[![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social)](https://twitter.com/SigGravitas) + + +
+ ### 🔴 🔴 🔴 Urgent: USE `stable` not `master` 🔴 🔴 🔴 -> This information takes precedence over any other information provided and should be followed accordingly. +**Download the latest `stable` release from here: https://github.com/Significant-Gravitas/Auto-GPT/releases/latest.** +The `master` branch may often be in a **broken** state. -Our workflow has been improved, but please note that `master` branch may often be in a **broken** state. -Please download the latest `stable` release from here: https://github.com/Torantulino/Auto-GPT/releases/latest. +
-![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/auto-gpt?style=social) -[![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social)](https://twitter.com/SigGravitas) -[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) -[![Unit Tests](https://github.com/Significant-Gravitaso/Auto-GPT/actions/workflows/ci.yml/badge.svg)](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml) Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI. @@ -37,42 +41,6 @@ Development of this free, open-source project is made possible by all the Dradstone  CrypteorCapital  avy-ai  shawnharmsen  sunchongren  DailyBotHQ  mathewhawkins  MediConCenHK  kMag410  nicoguyon  Mobivs  jazgarewal  marv-technology  rapidstartup  Brodie0  lucas-chu  rejunity  comet-ml  ColinConwell  cfarquhar  ikarosai  ChrisDMT  Odin519Tomas  vkozacek  belharethsami  sultanmeghji  scryptedinc  johnculkin  RealChrisSean  fruition  jd3655  Web3Capital  allenstecat  tob-le-rone  SwftCoins  MetaPath01  joaomdmoura  ternary5  refinery1  josephcmiller2  webbcolton  tommygeee  lmaugustin  garythebat  Cameron-Fulton  angiaou  caitlynmeeks  MBassi91  Daniel1357  omphos  abhinav-pandey29  DataMetis  concreit  st617  RThaweewat  KiaArmani  Pythagora-io  AryaXAI  fabrietech  jun784  Mr-Bishop42  rickscode  projectonegames  rocks6  GalaxyVideoAgency  thisisjeffchen  TheStoneMX  txtr99  ZERO-A-ONE  

- - -## Table of Contents - -- [Auto-GPT: An Autonomous GPT-4 Experiment](#auto-gpt-an-autonomous-gpt-4-experiment) - - [🔴 🔴 🔴 Urgent: USE `stable` not `master` 🔴 🔴 🔴](#----urgent-use-stable-not-master----) - - [Demo (30/03/2023):](#demo-30032023) - - [Table of Contents](#table-of-contents) - - [🚀 Features](#-features) - - [📋 Requirements](#-requirements) - - [💾 Installation](#-installation) - - [🔧 Usage](#-usage) - - [Logs](#logs) - - [Docker](#docker) - - [Command Line Arguments](#command-line-arguments) - - [🗣️ Speech Mode](#️-speech-mode) - - [🔍 Google API Keys Configuration](#-google-api-keys-configuration) - - [Setting up environment variables](#setting-up-environment-variables) - - [Memory Backend Setup](#memory-backend-setup) - - [Redis Setup](#redis-setup) - - [🌲 Pinecone API Key Setup](#-pinecone-api-key-setup) - - [Milvus Setup](#milvus-setup) - - [Weaviate Setup](#weaviate-setup) - - [Setting up environment variables](#setting-up-environment-variables-1) - - [Setting Your Cache Type](#setting-your-cache-type) - - [View Memory Usage](#view-memory-usage) - - [🧠 Memory pre-seeding](#-memory-pre-seeding) - - [💀 Continuous Mode ⚠️](#-continuous-mode-️) - - [GPT3.5 ONLY Mode](#gpt35-only-mode) - - [🖼 Image Generation](#-image-generation) - - [⚠️ Limitations](#️-limitations) - - [🛡 Disclaimer](#-disclaimer) - - [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter) - - [Run tests](#run-tests) - - [Run linter](#run-linter) - ## 🚀 Features - 🌐 Internet access for searches and information gathering @@ -83,16 +51,17 @@ Development of this free, open-source project is made possible by all the

Blake Werlinger +

💖 Help Fund Auto-GPT's Development 💖

If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! From 9589334a305198c837bfb8720ed6f06176b2f216 Mon Sep 17 00:00:00 2001 From: EH Date: Mon, 17 Apr 2023 03:34:02 +0100 Subject: [PATCH 054/152] Add File Downloading Capabilities (#1680) * Added 'download_file' command * Added util and fixed spinner * Fixed comma and added autogpt/auto_gpt_workspace to .gitignore * Fix linter issues * Fix more linter issues * Fix Lint Issues * Added 'download_file' command * Added util and fixed spinner * Fixed comma and added autogpt/auto_gpt_workspace to .gitignore * Fix linter issues * Fix more linter issues * Conditionally add the 'download_file' prompt * Update args.py * Removed Duplicate Prompt * Switched to using path_in_workspace function --- .gitignore | 1 + autogpt/app.py | 5 +++ autogpt/args.py | 16 +++++++++- autogpt/commands/file_operations.py | 49 ++++++++++++++++++++++++++++- autogpt/config/config.py | 1 + autogpt/prompt.py | 10 ++++++ autogpt/spinner.py | 15 ++++++++- autogpt/utils.py | 13 ++++++++ 8 files changed, 107 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index eda7f327..2220ef6e 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ autogpt/keys.py autogpt/*json autogpt/node_modules/ autogpt/__pycache__/keys.cpython-310.pyc +autogpt/auto_gpt_workspace package-lock.json *.pyc auto_gpt_workspace/* diff --git a/autogpt/app.py b/autogpt/app.py index 78b5bd2f..19c075f0 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -17,6 +17,7 @@ from autogpt.commands.file_operations import ( read_file, search_files, write_to_file, + download_file ) from autogpt.json_fixes.parsing import fix_and_parse_json from autogpt.memory import get_memory @@ -164,6 +165,10 @@ def execute_command(command_name: str, arguments): return delete_file(arguments["file"]) elif command_name == "search_files": return search_files(arguments["directory"]) + elif command_name == "download_file": + if not CFG.allow_downloads: + return "Error: You do not have user authorization to download files locally." + return download_file(arguments["url"], arguments["file"]) elif command_name == "browse_website": return browse_website(arguments["url"], arguments["question"]) # TODO: Change these to take in a file rather than pasted code, if diff --git a/autogpt/args.py b/autogpt/args.py index eca32334..f0e9c07a 100644 --- a/autogpt/args.py +++ b/autogpt/args.py @@ -1,7 +1,7 @@ """This module contains the argument parsing logic for the script.""" import argparse -from colorama import Fore +from colorama import Fore, Back, Style from autogpt import utils from autogpt.config import Config from autogpt.logs import logger @@ -63,6 +63,12 @@ def parse_arguments() -> None: help="Specifies which ai_settings.yaml file to use, will also automatically" " skip the re-prompt.", ) + parser.add_argument( + '--allow-downloads', + action='store_true', + dest='allow_downloads', + help='Dangerous: Allows Auto-GPT to download files natively.' + ) args = parser.parse_args() if args.debug: @@ -133,5 +139,13 @@ def parse_arguments() -> None: CFG.ai_settings_file = file CFG.skip_reprompt = True + if args.allow_downloads: + logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") + logger.typewriter_log("WARNING: ", Fore.YELLOW, + f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " + + "It is recommended that you monitor any files it downloads carefully.") + logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}") + CFG.allow_downloads = True + if args.browser_name: CFG.selenium_web_browser = args.browser_name diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 8abc2e23..d273c1a3 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -4,9 +4,16 @@ from __future__ import annotations import os import os.path from pathlib import Path -from typing import Generator +from typing import Generator, List +import requests +from requests.adapters import HTTPAdapter +from requests.adapters import Retry +from colorama import Fore, Back +from autogpt.spinner import Spinner +from autogpt.utils import readable_file_size from autogpt.workspace import path_in_workspace, WORKSPACE_PATH + LOG_FILE = "file_logger.txt" LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE @@ -214,3 +221,43 @@ def search_files(directory: str) -> list[str]: found_files.append(relative_path) return found_files + + +def download_file(url, filename): + """Downloads a file + Args: + url (str): URL of the file to download + filename (str): Filename to save the file as + """ + safe_filename = path_in_workspace(filename) + try: + message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}" + with Spinner(message) as spinner: + session = requests.Session() + retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504]) + adapter = HTTPAdapter(max_retries=retry) + session.mount('http://', adapter) + session.mount('https://', adapter) + + total_size = 0 + downloaded_size = 0 + + with session.get(url, allow_redirects=True, stream=True) as r: + r.raise_for_status() + total_size = int(r.headers.get('Content-Length', 0)) + downloaded_size = 0 + + with open(safe_filename, 'wb') as f: + for chunk in r.iter_content(chunk_size=8192): + f.write(chunk) + downloaded_size += len(chunk) + + # Update the progress message + progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}" + spinner.update_message(f"{message} {progress}") + + return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})' + except requests.HTTPError as e: + return f"Got an HTTP Error whilst trying to download file: {e}" + except Exception as e: + return "Error: " + str(e) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 22da52b0..fe6f4f32 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -24,6 +24,7 @@ class Config(metaclass=Singleton): self.continuous_limit = 0 self.speak_mode = False self.skip_reprompt = False + self.allow_downloads = False self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome") self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") diff --git a/autogpt/prompt.py b/autogpt/prompt.py index 18a5736c..a2b20b1f 100644 --- a/autogpt/prompt.py +++ b/autogpt/prompt.py @@ -105,6 +105,16 @@ def get_prompt() -> str: ), ) + # Only add the download file command if the AI is allowed to execute it + if cfg.allow_downloads: + commands.append( + ( + "Downloads a file from the internet, and stores it locally", + "download_file", + {"url": "", "file": ""} + ), + ) + # Add these command last. commands.append( ("Do Nothing", "do_nothing", {}), diff --git a/autogpt/spinner.py b/autogpt/spinner.py index 56b4f20a..febcea8e 100644 --- a/autogpt/spinner.py +++ b/autogpt/spinner.py @@ -29,12 +29,14 @@ class Spinner: time.sleep(self.delay) sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") - def __enter__(self) -> None: + def __enter__(self): """Start the spinner""" self.running = True self.spinner_thread = threading.Thread(target=self.spin) self.spinner_thread.start() + return self + def __exit__(self, exc_type, exc_value, exc_traceback) -> None: """Stop the spinner @@ -48,3 +50,14 @@ class Spinner: self.spinner_thread.join() sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") sys.stdout.flush() + + def update_message(self, new_message, delay=0.1): + """Update the spinner message + Args: + new_message (str): New message to display + delay: Delay in seconds before updating the message + """ + time.sleep(delay) + sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message + sys.stdout.flush() + self.message = new_message diff --git a/autogpt/utils.py b/autogpt/utils.py index 59709d02..11d98d1b 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -24,3 +24,16 @@ def validate_yaml_file(file: str): ) return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!") + + +def readable_file_size(size, decimal_places=2): + """Converts the given size in bytes to a readable format. + Args: + size: Size in bytes + decimal_places (int): Number of decimal places to display + """ + for unit in ['B', 'KB', 'MB', 'GB', 'TB']: + if size < 1024.0: + break + size /= 1024.0 + return f"{size:.{decimal_places}f} {unit}" From 0fa807394711010a17fe37a3afbce81978e233e2 Mon Sep 17 00:00:00 2001 From: Ben Song Date: Mon, 17 Apr 2023 11:53:05 +0800 Subject: [PATCH 055/152] add docker requirements - jsonschema --- requirements-docker.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-docker.txt b/requirements-docker.txt index 3a8a344c..a6018f8f 100644 --- a/requirements-docker.txt +++ b/requirements-docker.txt @@ -24,4 +24,5 @@ pre-commit black isort gitpython==3.1.31 -tweepy \ No newline at end of file +tweepy +jsonschema \ No newline at end of file From 71c6600abf1525364db949622c7dc3f9b0e00eae Mon Sep 17 00:00:00 2001 From: lengweiping Date: Mon, 17 Apr 2023 12:44:46 +0800 Subject: [PATCH 056/152] memory object move to memory_add block --- autogpt/app.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/autogpt/app.py b/autogpt/app.py index 19c075f0..979f57d3 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -112,11 +112,10 @@ def execute_command(command_name: str, arguments): arguments (dict): The arguments for the command Returns: - str: The result of the command""" - memory = get_memory(CFG) - + str: The result of the command + """ try: - command_name = map_command_synonyms(command_name) + command_name = map_command_synonyms(command_name.lower()) if command_name == "google": # Check if the Google API key is set and use the official search method # If the API key is not set or has only whitespaces, use the unofficial @@ -136,6 +135,7 @@ def execute_command(command_name: str, arguments): return str(safe_message) elif command_name == "memory_add": + memory = get_memory(CFG) return memory.add(arguments["string"]) elif command_name == "start_agent": return start_agent( From e86764df459e3f4bcbdbfdc796af63bc715fbb71 Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Mon, 17 Apr 2023 07:55:48 +0300 Subject: [PATCH 057/152] Add linux selenium fixes --- autogpt/commands/web_selenium.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 8c652294..9b638ba0 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -17,6 +17,7 @@ from selenium.webdriver.safari.options import Options as SafariOptions import logging from pathlib import Path from autogpt.config import Config +from sys import platform FILE_DIR = Path(__file__).parent.parent CFG = Config() @@ -66,6 +67,13 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" ) + # Add linux specific flags + if platform == "linux" or platform == "linux2": + options.add_argument("--no-sandbox") + options.add_argument("--disable-dev-shm-usage") + options.add_argument("--remote-debugging-port=9222") + + if CFG.selenium_web_browser == "firefox": driver = webdriver.Firefox( executable_path=GeckoDriverManager().install(), options=options From 64383776a24864f32f69e4f56214089940623664 Mon Sep 17 00:00:00 2001 From: "Gabriel R. Barbosa" <12158575+gabrielrbarbosa@users.noreply.github.com> Date: Mon, 17 Apr 2023 03:04:35 -0300 Subject: [PATCH 058/152] Update brian.py - Prevent TypeError exception TypeError: BrianSpeech._speech() takes 2 positional arguments but 3 were given. Use the same arguments as used in _speech method from gtts.py --- autogpt/speech/brian.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py index e581bbcc..b9298f55 100644 --- a/autogpt/speech/brian.py +++ b/autogpt/speech/brian.py @@ -13,7 +13,7 @@ class BrianSpeech(VoiceBase): """Setup the voices, API key, etc.""" pass - def _speech(self, text: str) -> bool: + def _speech(self, text: str, _: int = 0) -> bool: """Speak text using Brian with the streamelements API Args: From 60b779a9059dbd274b336a27f9a6b6db0bde53fd Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Mon, 17 Apr 2023 17:09:13 +1000 Subject: [PATCH 059/152] Remove requirements-docker.txt This file needs to be maintained parallel to requirements.txt, but isn't, causes problems when new dependencies are introduced. Instead, derive the Docker dependencies from the stock ones. Signed-off-by: Alastair D'Silva --- Dockerfile | 5 +++-- requirements-docker.txt | 28 ---------------------------- requirements.txt | 2 ++ 3 files changed, 5 insertions(+), 30 deletions(-) delete mode 100644 requirements-docker.txt diff --git a/Dockerfile b/Dockerfile index 9886d742..5219e7d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,8 +17,9 @@ RUN chown appuser:appuser /home/appuser USER appuser # Copy the requirements.txt file and install the requirements -COPY --chown=appuser:appuser requirements-docker.txt . -RUN pip install --no-cache-dir --user -r requirements-docker.txt +COPY --chown=appuser:appuser requirements.txt . +RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ + pip install --no-cache-dir --user -r requirements.txt # Copy the application files COPY --chown=appuser:appuser autogpt/ ./autogpt diff --git a/requirements-docker.txt b/requirements-docker.txt deleted file mode 100644 index a6018f8f..00000000 --- a/requirements-docker.txt +++ /dev/null @@ -1,28 +0,0 @@ -beautifulsoup4 -colorama==0.4.6 -openai==0.27.2 -playsound==1.2.2 -python-dotenv==1.0.0 -pyyaml==6.0 -readability-lxml==0.8.1 -requests -tiktoken==0.3.3 -gTTS==2.3.1 -docker -duckduckgo-search -google-api-python-client #(https://developers.google.com/custom-search/v1/overview) -pinecone-client==2.2.1 -redis -orjson -Pillow -selenium -webdriver-manager -coverage -flake8 -numpy -pre-commit -black -isort -gitpython==3.1.31 -tweepy -jsonschema \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 843b66bf..3f1eee5b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,6 +30,8 @@ sourcery isort gitpython==3.1.31 +# Items below this point will not be included in the Docker Image + # Testing dependencies pytest asynctest From 2b87245e2231e5d13022df1c9f5cc07584e254d6 Mon Sep 17 00:00:00 2001 From: XFFXFF <1247714429@qq.com> Date: Mon, 17 Apr 2023 16:21:52 +0800 Subject: [PATCH 060/152] fix a missing import --- autogpt/memory/local.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 9b911eef..803b6dc6 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -2,13 +2,13 @@ from __future__ import annotations import dataclasses import os -from typing import Any +from typing import Any, List import numpy as np import orjson -from autogpt.memory.base import MemoryProviderSingleton from autogpt.llm_utils import create_embedding_with_ada +from autogpt.memory.base import MemoryProviderSingleton EMBED_DIM = 1536 SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS From bd25822b35ab924290f28b104e519b49b8930591 Mon Sep 17 00:00:00 2001 From: Mad Misaghi Date: Mon, 17 Apr 2023 12:24:27 +0330 Subject: [PATCH 061/152] Update .env.template addedMilvus --- .env.template | 1 + 1 file changed, 1 insertion(+) diff --git a/.env.template b/.env.template index eeff2907..9593276f 100644 --- a/.env.template +++ b/.env.template @@ -54,6 +54,7 @@ SMART_TOKEN_LIMIT=8000 # local - Default # pinecone - Pinecone (if configured) # redis - Redis (if configured) +# milvus - Milvus (if configured) MEMORY_BACKEND=local ### PINECONE From 74a8b5d83256c5b9116a375a4520d2727e52bece Mon Sep 17 00:00:00 2001 From: suzuken Date: Mon, 17 Apr 2023 18:15:49 +0900 Subject: [PATCH 062/152] config.py: update OpenAI link --- autogpt/config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index fe6f4f32..a950453e 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -237,5 +237,5 @@ def check_openai_api_key() -> None: Fore.RED + "Please set your OpenAI API key in .env or as an environment variable." ) - print("You can get your key from https://beta.openai.com/account/api-keys") + print("You can get your key from https://platform.openai.com/account/api-keys") exit(1) From 125f0ba61ad57188e6f4f109f2463f31530044dd Mon Sep 17 00:00:00 2001 From: Bob van Luijt Date: Mon, 17 Apr 2023 12:46:27 +0200 Subject: [PATCH 063/152] Update README.md with Weaviate installation and reference --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 71957748..b919f51d 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,7 @@ Development of this free, open-source project is made possible by all the =3.15.4"`. +#### Install the Weaviate client + +Install the Weaviate client before usage. + +``` +$ pip install weaviate-client +``` + #### Setting up environment variables In your `.env` file set the following: From 0d2e1963682e0e6a65934f475442637277266d03 Mon Sep 17 00:00:00 2001 From: BingokoN Date: Mon, 17 Apr 2023 12:14:43 +0100 Subject: [PATCH 064/152] refactoring/splitting the json fix functions into general module and llm module which need AI's assistance. --- autogpt/agent/agent.py | 2 +- autogpt/app.py | 1 - autogpt/json_utils/json_fix_general.py | 124 ++++++++++++++++++ .../{auto_fix.py => json_fix_llm.py} | 119 +---------------- autogpt/json_utils/utilities.py | 4 +- autogpt/logs.py | 2 +- tests/test_json_parser.py | 2 +- tests/unit/json_tests.py | 2 +- 8 files changed, 135 insertions(+), 121 deletions(-) create mode 100644 autogpt/json_utils/json_fix_general.py rename autogpt/json_utils/{auto_fix.py => json_fix_llm.py} (67%) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 6ec0a623..f87cd483 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -3,7 +3,7 @@ from autogpt.app import execute_command, get_command from autogpt.chat import chat_with_ai, create_chat_message from autogpt.config import Config -from autogpt.json_utils.auto_fix import fix_json_using_multiple_techniques +from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.utilities import validate_json from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text diff --git a/autogpt/app.py b/autogpt/app.py index 190f934b..48db0366 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -18,7 +18,6 @@ from autogpt.commands.file_operations import ( search_files, write_to_file, ) -from autogpt.json_utils.auto_fix import fix_and_parse_json from autogpt.memory import get_memory from autogpt.processing.text import summarize_text from autogpt.speech import say_text diff --git a/autogpt/json_utils/json_fix_general.py b/autogpt/json_utils/json_fix_general.py new file mode 100644 index 00000000..cd6a6884 --- /dev/null +++ b/autogpt/json_utils/json_fix_general.py @@ -0,0 +1,124 @@ +"""This module contains functions to fix JSON strings using general programmatic approaches, suitable for addressing +common JSON formatting issues.""" +from __future__ import annotations + +import contextlib +import json +import re +from typing import Optional + +from autogpt.config import Config +from autogpt.json_utils.utilities import extract_char_position + +CFG = Config() + + +def fix_invalid_escape(json_to_load: str, error_message: str) -> str: + """Fix invalid escape sequences in JSON strings. + + Args: + json_to_load (str): The JSON string. + error_message (str): The error message from the JSONDecodeError + exception. + + Returns: + str: The JSON string with invalid escape sequences fixed. + """ + while error_message.startswith("Invalid \\escape"): + bad_escape_location = extract_char_position(error_message) + json_to_load = ( + json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1:] + ) + try: + json.loads(json_to_load) + return json_to_load + except json.JSONDecodeError as e: + if CFG.debug_mode: + print("json loads error - fix invalid escape", e) + error_message = str(e) + return json_to_load + + +def balance_braces(json_string: str) -> Optional[str]: + """ + Balance the braces in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with braces balanced. + """ + + open_braces_count = json_string.count("{") + close_braces_count = json_string.count("}") + + while open_braces_count > close_braces_count: + json_string += "}" + close_braces_count += 1 + + while close_braces_count > open_braces_count: + json_string = json_string.rstrip("}") + close_braces_count -= 1 + + with contextlib.suppress(json.JSONDecodeError): + json.loads(json_string) + return json_string + + +def add_quotes_to_property_names(json_string: str) -> str: + """ + Add quotes to property names in a JSON string. + + Args: + json_string (str): The JSON string. + + Returns: + str: The JSON string with quotes added to property names. + """ + + def replace_func(match: re.Match) -> str: + return f'"{match[1]}":' + + property_name_pattern = re.compile(r"(\w+):") + corrected_json_string = property_name_pattern.sub(replace_func, json_string) + + try: + json.loads(corrected_json_string) + return corrected_json_string + except json.JSONDecodeError as e: + raise e + + +def correct_json(json_to_load: str) -> str: + """ + Correct common JSON errors. + Args: + json_to_load (str): The JSON string. + """ + + try: + if CFG.debug_mode: + print("json", json_to_load) + json.loads(json_to_load) + return json_to_load + except json.JSONDecodeError as e: + if CFG.debug_mode: + print("json loads error", e) + error_message = str(e) + if error_message.startswith("Invalid \\escape"): + json_to_load = fix_invalid_escape(json_to_load, error_message) + if error_message.startswith( + "Expecting property name enclosed in double quotes" + ): + json_to_load = add_quotes_to_property_names(json_to_load) + try: + json.loads(json_to_load) + return json_to_load + except json.JSONDecodeError as e: + if CFG.debug_mode: + print("json loads error - add quotes", e) + error_message = str(e) + if balanced_str := balance_braces(json_to_load): + return balanced_str + return json_to_load diff --git a/autogpt/json_utils/auto_fix.py b/autogpt/json_utils/json_fix_llm.py similarity index 67% rename from autogpt/json_utils/auto_fix.py rename to autogpt/json_utils/json_fix_llm.py index 883eba78..44e78d05 100644 --- a/autogpt/json_utils/auto_fix.py +++ b/autogpt/json_utils/json_fix_llm.py @@ -1,15 +1,15 @@ -"""This module contains the function to fix JSON strings""" +"""This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance +of the ChatGPT API or LLM models.""" from __future__ import annotations import contextlib import json -import re -from typing import Optional, Dict, Any +from typing import Dict, Any from colorama import Fore from regex import regex -from autogpt.json_utils.utilities import extract_char_position +from autogpt.json_utils.json_fix_general import correct_json from autogpt.llm_utils import call_ai_function from autogpt.logs import logger from autogpt.speech import say_text @@ -82,83 +82,6 @@ def auto_fix_json(json_string: str, schema: str) -> str: return "failed" -def fix_invalid_escape(json_to_load: str, error_message: str) -> str: - """Fix invalid escape sequences in JSON strings. - - Args: - json_to_load (str): The JSON string. - error_message (str): The error message from the JSONDecodeError - exception. - - Returns: - str: The JSON string with invalid escape sequences fixed. - """ - while error_message.startswith("Invalid \\escape"): - bad_escape_location = extract_char_position(error_message) - json_to_load = ( - json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1:] - ) - try: - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - fix invalid escape", e) - error_message = str(e) - return json_to_load - - -def balance_braces(json_string: str) -> Optional[str]: - """ - Balance the braces in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with braces balanced. - """ - - open_braces_count = json_string.count("{") - close_braces_count = json_string.count("}") - - while open_braces_count > close_braces_count: - json_string += "}" - close_braces_count += 1 - - while close_braces_count > open_braces_count: - json_string = json_string.rstrip("}") - close_braces_count -= 1 - - with contextlib.suppress(json.JSONDecodeError): - json.loads(json_string) - return json_string - - -def add_quotes_to_property_names(json_string: str) -> str: - """ - Add quotes to property names in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with quotes added to property names. - """ - - def replace_func(match: re.Match) -> str: - return f'"{match[1]}":' - - property_name_pattern = re.compile(r"(\w+):") - corrected_json_string = property_name_pattern.sub(replace_func, json_string) - - try: - json.loads(corrected_json_string) - return corrected_json_string - except json.JSONDecodeError as e: - raise e - - def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: """Fix the given JSON string to make it parseable and fully compliant with two techniques. @@ -186,40 +109,6 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: return {} -def correct_json(json_to_load: str) -> str: - """ - Correct common JSON errors. - Args: - json_to_load (str): The JSON string. - """ - - try: - if CFG.debug_mode: - print("json", json_to_load) - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error", e) - error_message = str(e) - if error_message.startswith("Invalid \\escape"): - json_to_load = fix_invalid_escape(json_to_load, error_message) - if error_message.startswith( - "Expecting property name enclosed in double quotes" - ): - json_to_load = add_quotes_to_property_names(json_to_load) - try: - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - add quotes", e) - error_message = str(e) - if balanced_str := balance_braces(json_to_load): - return balanced_str - return json_to_load - - def fix_and_parse_json( json_to_load: str, try_to_fix_with_gpt: bool = True ) -> Dict[Any, Any]: diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index af8a28c9..e963abb3 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -9,6 +9,7 @@ from autogpt.config import Config CFG = Config() + def extract_char_position(error_message: str) -> int: """Extract the character position from the JSONDecodeError message. @@ -40,7 +41,8 @@ def validate_json(json_object: object, schema_name: object) -> object: if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path): logger.error("The JSON object is invalid.") if CFG.debug_mode: - logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data + logger.error( + json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data logger.error("The following issues were found:") for error in errors: diff --git a/autogpt/logs.py b/autogpt/logs.py index 58375f14..a4dc3bab 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -204,7 +204,7 @@ logger = Logger() def print_assistant_thoughts(ai_name, assistant_reply): """Prints the assistant's thoughts to the console""" - from autogpt.json_utils.auto_fix import fix_and_parse_json, attempt_to_fix_json_by_finding_outermost_brackets + from autogpt.json_utils.json_fix_llm import fix_and_parse_json, attempt_to_fix_json_by_finding_outermost_brackets try: try: diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index f8fa5955..41c90a6f 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -1,7 +1,7 @@ import unittest import tests.context -from autogpt.json_utils.auto_fix import fix_and_parse_json +from autogpt.json_utils.json_fix_llm import fix_and_parse_json class TestParseJson(unittest.TestCase): diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index f65a6f6a..25c38337 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -1,6 +1,6 @@ import unittest -from autogpt.json_utils.auto_fix import fix_and_parse_json +from autogpt.json_utils.json_fix_llm import fix_and_parse_json class TestParseJson(unittest.TestCase): From 10cd0f3362ad6c86eefe7fc2a1f276ca49af98fe Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Mon, 17 Apr 2023 07:32:40 +0300 Subject: [PATCH 065/152] Add the OpenAI API Keys Configuration to the top of the readme --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 71957748..dbb44f52 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,18 @@ Development of this free, open-source project is made possible by all the Billing](./docs/imgs/openai-api-key-billing-paid-account.png) + +#### **PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING, OTHERWISE NOTHING WILL WORK!** + ## 💾 Installation To install Auto-GPT, follow these steps: @@ -207,18 +219,6 @@ python -m autogpt --speak - Adam : pNInz6obpgDQGcFmaJgB - Sam : yoZ06aMxZJJ28mfd3POQ - -## OpenAI API Keys Configuration - -Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - -To use OpenAI API key for Auto-GPT, you NEED to have billing set up (AKA paid account). - -You can set up paid account at https://platform.openai.com/account/billing/overview. - -![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./docs/imgs/openai-api-key-billing-paid-account.png) - - ## 🔍 Google API Keys Configuration This section is optional, use the official google api if you are having issues with error 429 when running a google search. From 8dadf79614969a58a29b44cd9af4127795a153d6 Mon Sep 17 00:00:00 2001 From: H-jj-R Date: Mon, 17 Apr 2023 13:25:49 +0100 Subject: [PATCH 066/152] Spelling fixes --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- autogpt/app.py | 4 ++-- autogpt/commands/git_operations.py | 2 +- autogpt/commands/google_search.py | 4 ++-- autogpt/llm_utils.py | 2 +- autogpt/memory/milvus.py | 2 +- autogpt/setup.py | 2 +- autogpt/speech/eleven_labs.py | 2 +- outputs/logs/message-log-1.txt | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c355965a..cf7ffbf3 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -30,4 +30,4 @@ By following these guidelines, your PRs are more likely to be merged quickly aft - + diff --git a/autogpt/app.py b/autogpt/app.py index 19c075f0..ad9f18d1 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -212,7 +212,7 @@ def execute_command(command_name: str, arguments): def get_text_summary(url: str, question: str) -> str: - """Return the results of a google search + """Return the results of a Google search Args: url (str): The url to scrape @@ -227,7 +227,7 @@ def get_text_summary(url: str, question: str) -> str: def get_hyperlinks(url: str) -> Union[str, List[str]]: - """Return the results of a google search + """Return the results of a Google search Args: url (str): The url to scrape diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 675eb228..05ce2a21 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -7,7 +7,7 @@ CFG = Config() def clone_repository(repo_url: str, clone_path: str) -> str: - """Clone a github repository locally + """Clone a GitHub repository locally Args: repo_url (str): The URL of the repository to clone diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py index 148ba1d0..7d38ce75 100644 --- a/autogpt/commands/google_search.py +++ b/autogpt/commands/google_search.py @@ -11,7 +11,7 @@ CFG = Config() def google_search(query: str, num_results: int = 8) -> str: - """Return the results of a google search + """Return the results of a Google search Args: query (str): The search query. @@ -35,7 +35,7 @@ def google_search(query: str, num_results: int = 8) -> str: def google_official_search(query: str, num_results: int = 8) -> str | list[str]: - """Return the results of a google search using the official Google API + """Return the results of a Google search using the official Google API Args: query (str): The search query. diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 2075f934..1d739e4a 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -121,7 +121,7 @@ def create_chat_completion( def create_embedding_with_ada(text) -> list: - """Create a embedding with text-ada-002 using the OpenAI SDK""" + """Create an embedding with text-ada-002 using the OpenAI SDK""" num_retries = 10 for attempt in range(num_retries): backoff = 2 ** (attempt + 2) diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index c6e7d5a3..7a2571d0 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -46,7 +46,7 @@ class MilvusMemory(MemoryProviderSingleton): self.collection.load() def add(self, data) -> str: - """Add a embedding of data into memory. + """Add an embedding of data into memory. Args: data (str): The raw text to construct embedding index. diff --git a/autogpt/setup.py b/autogpt/setup.py index 5315c01d..79661905 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -1,4 +1,4 @@ -"""Setup the AI and its goals""" +"""Set up the AI and its goals""" from colorama import Fore, Style from autogpt import utils from autogpt.config.ai_config import AIConfig diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py index 0af48cae..186ec6fc 100644 --- a/autogpt/speech/eleven_labs.py +++ b/autogpt/speech/eleven_labs.py @@ -14,7 +14,7 @@ class ElevenLabsSpeech(VoiceBase): """ElevenLabs speech class""" def _setup(self) -> None: - """Setup the voices, API key, etc. + """Set up the voices, API key, etc. Returns: None: None diff --git a/outputs/logs/message-log-1.txt b/outputs/logs/message-log-1.txt index 8a719016..6b146b98 100644 --- a/outputs/logs/message-log-1.txt +++ b/outputs/logs/message-log-1.txt @@ -483,7 +483,7 @@ How to Become a Freelance Artificial Intelligence Engineer Springboard https://www.springboard.com › Blog › Data Science -29/10/2021 — There are numerous freelancing platforms where you can kick start your career as a freelance artificial intelligence engineer. +29/10/2021 — There are numerous freelancing platforms where you can kick-start your career as a freelance artificial intelligence engineer. More to ask Is AI good for freelancing? What business can I start with AI? From 9887016bdfed85bdad648ef75a7170154d05b121 Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Mon, 17 Apr 2023 15:39:04 +0300 Subject: [PATCH 067/152] Move under chrome --- autogpt/commands/web_selenium.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 9b638ba0..5400be7f 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -67,12 +67,6 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" ) - # Add linux specific flags - if platform == "linux" or platform == "linux2": - options.add_argument("--no-sandbox") - options.add_argument("--disable-dev-shm-usage") - options.add_argument("--remote-debugging-port=9222") - if CFG.selenium_web_browser == "firefox": driver = webdriver.Firefox( @@ -83,6 +77,9 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari driver = webdriver.Safari(options=options) else: + if platform == "linux" or platform == "linux2": + options.add_argument("--disable-dev-shm-usage") + options.add_argument("--remote-debugging-port=9222") options.add_argument("--no-sandbox") driver = webdriver.Chrome( executable_path=ChromeDriverManager().install(), options=options From 2c55ff0b3d93dc5d285ed2015c4ad9e9a188cc54 Mon Sep 17 00:00:00 2001 From: EH Date: Mon, 17 Apr 2023 15:43:14 +0300 Subject: [PATCH 068/152] Update web_selenium.py --- autogpt/commands/web_selenium.py | 1 - 1 file changed, 1 deletion(-) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 5400be7f..c0b1b249 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -67,7 +67,6 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" ) - if CFG.selenium_web_browser == "firefox": driver = webdriver.Firefox( executable_path=GeckoDriverManager().install(), options=options From 10b2458f58ca91f38c2c6418564819e749d128ba Mon Sep 17 00:00:00 2001 From: NEBULITE Berlin <40317630+Funkelfetisch@users.noreply.github.com> Date: Mon, 17 Apr 2023 14:50:28 +0200 Subject: [PATCH 069/152] Update .env.template "redis" as hostname for redis to correctly use the docker compose internal networking feature --- .env.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.env.template b/.env.template index eeff2907..d820b15f 100644 --- a/.env.template +++ b/.env.template @@ -63,7 +63,7 @@ PINECONE_API_KEY=your-pinecone-api-key PINECONE_ENV=your-pinecone-region ### REDIS -# REDIS_HOST - Redis host (Default: localhost) +# REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose) # REDIS_PORT - Redis port (Default: 6379) # REDIS_PASSWORD - Redis password (Default: "") # WIPE_REDIS_ON_START - Wipes data / index on start (Default: False) From 6700ac94fae8e517c9e445f4c4732e1b4d847e96 Mon Sep 17 00:00:00 2001 From: Hamid Zare <12127420+hamidzr@users.noreply.github.com> Date: Mon, 17 Apr 2023 09:28:32 -0400 Subject: [PATCH 070/152] docs: update docs fix a typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dbb44f52..3f9d4c57 100644 --- a/README.md +++ b/README.md @@ -325,7 +325,7 @@ export MEMORY_BACKEND="pinecone" ### Milvus Setup -[Milvus](https://milvus.io/) is a open-source, high scalable vector database to storage huge amount of vector-based memory and provide fast relevant search. +[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search. - setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues. - setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md) From 1d49b87e48d0cfd40125a6b10f3599976ece4cc6 Mon Sep 17 00:00:00 2001 From: Acer Date: Mon, 17 Apr 2023 18:34:11 +0430 Subject: [PATCH 071/152] added missing import --- autogpt/agent/agent_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index e4bfb126..79375ea8 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,6 +1,6 @@ """Agent manager for managing GPT agents""" from __future__ import annotations - +from typing import Union from autogpt.llm_utils import create_chat_completion from autogpt.config.config import Singleton From 286edbbb8cb61e921e0315db8b506d6f7b1d6fce Mon Sep 17 00:00:00 2001 From: Manuel Otheo Date: Mon, 17 Apr 2023 09:17:07 -0600 Subject: [PATCH 072/152] changed rstrip for strip and added case for empty string changed rstrip for strip and added case for empty string in agent.py --- autogpt/agent/agent.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index dca614c7..89ea2c8c 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -115,9 +115,12 @@ class Agent: console_input = clean_input( Fore.MAGENTA + "Input:" + Style.RESET_ALL ) - if console_input.lower().rstrip() == "y": + if console_input.lower().strip() == "y": user_input = "GENERATE NEXT COMMAND JSON" break + elif console_input.lower().strip() == "": + print("Invalid input format.") + break elif console_input.lower().startswith("y -"): try: self.next_action_count = abs( From 57ee84437ba8c3c52866b4b19b79864c3e1e22a2 Mon Sep 17 00:00:00 2001 From: Manuel Otheo Date: Mon, 17 Apr 2023 09:20:52 -0600 Subject: [PATCH 073/152] changed break for continue --- autogpt/agent/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 89ea2c8c..58c7840b 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -120,7 +120,7 @@ class Agent: break elif console_input.lower().strip() == "": print("Invalid input format.") - break + continue elif console_input.lower().startswith("y -"): try: self.next_action_count = abs( From d47466ddf949d72787d3a04db3959b5a579a702d Mon Sep 17 00:00:00 2001 From: superherointj <5861043+superherointj@users.noreply.github.com> Date: Wed, 12 Apr 2023 15:48:46 -0300 Subject: [PATCH 074/152] Add Nix flakes support through direnv * Nix (https://nixos.org) is a reproducible build system. * Enables Nix users to use/develop Auto-GPT, without installing PIP or any other future Auto-GPT dependency. --- .envrc | 4 ++++ .gitignore | 1 + 2 files changed, 5 insertions(+) create mode 100644 .envrc diff --git a/.envrc b/.envrc new file mode 100644 index 00000000..a7ad7263 --- /dev/null +++ b/.envrc @@ -0,0 +1,4 @@ +# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards. +# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT. + +[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt diff --git a/.gitignore b/.gitignore index 2220ef6e..26d7e5a3 100644 --- a/.gitignore +++ b/.gitignore @@ -127,6 +127,7 @@ celerybeat.pid *.sage.py # Environments +.direnv/ .env .venv env/ From d4860fe9f09dba4bc8d9311b9a575098e3809ddc Mon Sep 17 00:00:00 2001 From: lfricken <6675120+lfricken@users.noreply.github.com> Date: Mon, 17 Apr 2023 10:27:53 -0500 Subject: [PATCH 075/152] Don't incapacitate yourself! (#1240) * subprocesses * fix lint * fix more lint * fix merge * fix merge again --- autogpt/app.py | 15 ++++++++++++++- autogpt/commands/execute_code.py | 30 ++++++++++++++++++++++++++++++ autogpt/prompt.py | 11 +++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/autogpt/app.py b/autogpt/app.py index 19c075f0..6f51fd98 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -10,7 +10,11 @@ from autogpt.config import Config from autogpt.commands.image_gen import generate_image from autogpt.commands.audio_text import read_audio_from_file from autogpt.commands.web_requests import scrape_links, scrape_text -from autogpt.commands.execute_code import execute_python_file, execute_shell +from autogpt.commands.execute_code import ( + execute_python_file, + execute_shell, + execute_shell_popen, +) from autogpt.commands.file_operations import ( append_to_file, delete_file, @@ -191,6 +195,15 @@ def execute_command(command_name: str, arguments): " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " "in your config. Do not attempt to bypass the restriction." ) + elif command_name == "execute_shell_popen": + if CFG.execute_local_commands: + return execute_shell_popen(arguments["command_line"]) + else: + return ( + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction." + ) elif command_name == "read_audio_from_file": return read_audio_from_file(arguments["file"]) elif command_name == "generate_image": diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 2cc797cb..e2a8d994 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -114,6 +114,36 @@ def execute_shell(command_line: str) -> str: return output +def execute_shell_popen(command_line): + """Execute a shell command with Popen and returns an english description + of the event and the process id + + Args: + command_line (str): The command line to execute + + Returns: + str: Description of the fact that the process started and its id + """ + current_dir = os.getcwd() + + if WORKING_DIRECTORY not in current_dir: # Change dir into workspace if necessary + work_dir = os.path.join(os.getcwd(), WORKING_DIRECTORY) + os.chdir(work_dir) + + print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") + + do_not_show_output = subprocess.DEVNULL + process = subprocess.Popen( + command_line, shell=True, stdout=do_not_show_output, stderr=do_not_show_output + ) + + # Change back to whatever the prior working dir was + + os.chdir(current_dir) + + return f"Subprocess started with PID:'{str(process.pid)}'" + + def we_are_running_in_a_docker_container() -> bool: """Check if we are running in a Docker container diff --git a/autogpt/prompt.py b/autogpt/prompt.py index a2b20b1f..33098af0 100644 --- a/autogpt/prompt.py +++ b/autogpt/prompt.py @@ -38,6 +38,9 @@ def get_prompt() -> str: prompt_generator.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' ) + prompt_generator.add_constraint( + "Use subprocesses for commands that will not terminate within a few minutes" + ) # Define the command list commands = [ @@ -81,6 +84,7 @@ def get_prompt() -> str: {"code": "", "focus": ""}, ), ("Execute Python File", "execute_python_file", {"file": ""}), + ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), ("Generate Image", "generate_image", {"prompt": ""}), ("Send Tweet", "send_tweet", {"text": ""}), ] @@ -104,6 +108,13 @@ def get_prompt() -> str: {"command_line": ""}, ), ) + commands.append( + ( + "Execute Shell Command Popen, non-interactive commands only", + "execute_shell_popen", + {"command_line": ""} + ), + ) # Only add the download file command if the AI is allowed to execute it if cfg.allow_downloads: From 35106ef662fda42b299de5e525ef31ae4bac39e7 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 17 Apr 2023 17:33:50 +0200 Subject: [PATCH 076/152] feat(pr-labels): auto-label conflicting PRs --- .github/workflows/pr-label.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/pr-label.yml diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml new file mode 100644 index 00000000..9f5127e4 --- /dev/null +++ b/.github/workflows/pr-label.yml @@ -0,0 +1,22 @@ +name: "Pull Request auto-label" +on: + # So that PRs touching the same files as the push are updated + push: + # So that the `dirtyLabel` is removed if conflicts are resolve + # We recommend `pull_request_target` so that github secrets are available. + # In `pull_request` we wouldn't be able to change labels of fork PRs + pull_request_target: + types: [opened, synchronize] + +jobs: + conflicts: + runs-on: ubuntu-latest + steps: + - name: Update PRs with conflict labels + uses: eps1lon/actions-label-merge-conflict@releases/2.x + with: + dirtyLabel: "conflicts" + #removeOnDirtyLabel: "PR: ready to ship" + repoToken: "${{ secrets.GITHUB_TOKEN }}" + commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request." + commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly." From baf31e69e53e51ae0d93976f17c74c4f2a6ed895 Mon Sep 17 00:00:00 2001 From: rickythefox Date: Mon, 17 Apr 2023 17:45:23 +0200 Subject: [PATCH 077/152] Use python:3-alpine image for code execution (#1192) --- autogpt/commands/execute_code.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index e2a8d994..70b33a97 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -40,10 +40,10 @@ def execute_python_file(file: str): try: client = docker.from_env() - # You can replace 'python:3.8' with the desired Python image/version + # You can replace this with the desired Python image/version # You can find available Python images on Docker Hub: # https://hub.docker.com/_/python - image_name = "python:3.10" + image_name = "python:3-alpine" try: client.images.get(image_name) print(f"Image '{image_name}' found locally") From e7c3ff9b9edd07c18ecf3cff572694105de722b3 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 17 Apr 2023 17:47:58 +0200 Subject: [PATCH 078/152] fix(pr-label): set job permissions explicitly --- .github/workflows/pr-label.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index 9f5127e4..63696e42 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -11,6 +11,9 @@ on: jobs: conflicts: runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write steps: - name: Update PRs with conflict labels uses: eps1lon/actions-label-merge-conflict@releases/2.x From a2a6f84f139b683fd135df89ff370ad5f6a7b974 Mon Sep 17 00:00:00 2001 From: REal0day Date: Sun, 16 Apr 2023 15:14:54 -0500 Subject: [PATCH 079/152] internal resource request bug --- autogpt/commands/web_requests.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py index 50d8d383..70ada907 100644 --- a/autogpt/commands/web_requests.py +++ b/autogpt/commands/web_requests.py @@ -58,9 +58,28 @@ def check_local_file_access(url: str) -> bool: """ local_prefixes = [ "file:///", + "file://localhost/", "file://localhost", "http://localhost", + "http://localhost/", "https://localhost", + "https://localhost/", + "http://2130706433", + "http://2130706433/", + "https://2130706433", + "https://2130706433/", + "http://127.0.0.1/", + "http://127.0.0.1", + "https://127.0.0.1/", + "https://127.0.0.1", + "https://0.0.0.0/", + "https://0.0.0.0", + "http://0.0.0.0/", + "http://0.0.0.0", + "http://0000", + "http://0000/", + "https://0000", + "https://0000/" ] return any(url.startswith(prefix) for prefix in local_prefixes) From 23e703132653cc33a11dceee557c4f880059347e Mon Sep 17 00:00:00 2001 From: jimmycliff obonyo Date: Sun, 16 Apr 2023 00:37:50 +0300 Subject: [PATCH 080/152] install chrome/firefox for headless browing when running in docker container --- Dockerfile | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Dockerfile b/Dockerfile index 9886d742..039ccf26 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,6 +5,16 @@ FROM python:3.11-slim RUN apt-get -y update RUN apt-get -y install git chromium-driver +# Install Xvfb and other dependencies for headless browser testing +RUN apt-get update \ + && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates + +# Install Firefox / Chromium +RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ + && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ + && apt-get update \ + && apt-get install -y chromium firefox-esr + # Set environment variables ENV PIP_NO_CACHE_DIR=yes \ PYTHONUNBUFFERED=1 \ From 6b64158356a02d9bfd410913b157ccd31ce5ea03 Mon Sep 17 00:00:00 2001 From: Tom Kaitchuck Date: Sun, 16 Apr 2023 01:53:24 -0700 Subject: [PATCH 081/152] Unbound summary size Signed-off-by: Tom Kaitchuck --- .env.template | 2 -- autogpt/config/config.py | 5 ----- autogpt/processing/text.py | 2 -- 3 files changed, 9 deletions(-) diff --git a/.env.template b/.env.template index eeff2907..209a29b9 100644 --- a/.env.template +++ b/.env.template @@ -5,8 +5,6 @@ EXECUTE_LOCAL_COMMANDS=False # BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory BROWSE_CHUNK_MAX_LENGTH=8192 -# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website -BROWSE_SUMMARY_MAX_TOKEN=300 # USER_AGENT - Define the user-agent used by the requests library to browse website (string) # USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" # AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index fe6f4f32..a8b48b49 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -33,7 +33,6 @@ class Config(metaclass=Singleton): self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192)) - self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300)) self.openai_api_key = os.getenv("OPENAI_API_KEY") self.temperature = float(os.getenv("TEMPERATURE", "1")) @@ -188,10 +187,6 @@ class Config(metaclass=Singleton): """Set the browse_website command chunk max length value.""" self.browse_chunk_max_length = value - def set_browse_summary_max_token(self, value: int) -> None: - """Set the browse_website command summary max token value.""" - self.browse_summary_max_token = value - def set_openai_api_key(self, value: str) -> None: """Set the OpenAI API key value.""" self.openai_api_key = value diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index d30036d8..657b0b0e 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -78,7 +78,6 @@ def summarize_text( summary = create_chat_completion( model=CFG.fast_llm_model, messages=messages, - max_tokens=CFG.browse_summary_max_token, ) summaries.append(summary) print(f"Added chunk {i + 1} summary to memory") @@ -95,7 +94,6 @@ def summarize_text( return create_chat_completion( model=CFG.fast_llm_model, messages=messages, - max_tokens=CFG.browse_summary_max_token, ) From def96ffe2f5b42ed41fc7fc1844965a0344cf9fc Mon Sep 17 00:00:00 2001 From: Steve Byerly Date: Mon, 17 Apr 2023 02:06:46 +0000 Subject: [PATCH 082/152] fix split file --- autogpt/commands/file_operations.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index d273c1a3..00ae466d 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -49,14 +49,12 @@ def log_operation(operation: str, filename: str) -> None: append_to_file(LOG_FILE, log_entry, shouldLog = False) - def split_file( content: str, max_length: int = 4000, overlap: int = 0 ) -> Generator[str, None, None]: """ Split text into chunks of a specified maximum length with a specified overlap between chunks. - :param content: The input text to be split into chunks :param max_length: The maximum length of each chunk, default is 4000 (about 1k token) @@ -70,9 +68,14 @@ def split_file( while start < content_length: end = start + max_length if end + overlap < content_length: - chunk = content[start : end + overlap] + chunk = content[start : end + overlap - 1] else: chunk = content[start:content_length] + + # Account for the case where the last chunk is shorter than the overlap, so it has already been consumed + if len(chunk) <= overlap: + break + yield chunk start += max_length - overlap From bd670b4db379776f034c5d956379fa8f1a698425 Mon Sep 17 00:00:00 2001 From: Steve Byerly Date: Mon, 17 Apr 2023 02:24:14 +0000 Subject: [PATCH 083/152] whitespace --- autogpt/commands/file_operations.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 00ae466d..073b13b0 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -49,6 +49,7 @@ def log_operation(operation: str, filename: str) -> None: append_to_file(LOG_FILE, log_entry, shouldLog = False) + def split_file( content: str, max_length: int = 4000, overlap: int = 0 ) -> Generator[str, None, None]: From 6ac9ce614acda4a0103962ef89b0d23c0a3d26aa Mon Sep 17 00:00:00 2001 From: Steve Byerly Date: Mon, 17 Apr 2023 02:29:51 +0000 Subject: [PATCH 084/152] whitespace --- autogpt/commands/file_operations.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 073b13b0..3420bd84 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -56,6 +56,7 @@ def split_file( """ Split text into chunks of a specified maximum length with a specified overlap between chunks. + :param content: The input text to be split into chunks :param max_length: The maximum length of each chunk, default is 4000 (about 1k token) From 8637b8b61ba18f74e88bee822222b166f17e7773 Mon Sep 17 00:00:00 2001 From: Steve Byerly Date: Mon, 17 Apr 2023 02:30:24 +0000 Subject: [PATCH 085/152] whitespace --- autogpt/commands/file_operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 3420bd84..9dcf8194 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -56,7 +56,7 @@ def split_file( """ Split text into chunks of a specified maximum length with a specified overlap between chunks. - + :param content: The input text to be split into chunks :param max_length: The maximum length of each chunk, default is 4000 (about 1k token) From f2baa0872beb13cf5dfb13f0ab05a64640510d3f Mon Sep 17 00:00:00 2001 From: jingxing Date: Mon, 17 Apr 2023 14:24:10 +0800 Subject: [PATCH 086/152] config.py format --- autogpt/config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index a8b48b49..e3ccc6a1 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -66,7 +66,7 @@ class Config(metaclass=Singleton): self.pinecone_api_key = os.getenv("PINECONE_API_KEY") self.pinecone_region = os.getenv("PINECONE_ENV") - self.weaviate_host = os.getenv("WEAVIATE_HOST") + self.weaviate_host = os.getenv("WEAVIATE_HOST") self.weaviate_port = os.getenv("WEAVIATE_PORT") self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http") self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None) From ef7b417105da16a8a2fc89eea0309a42fdd8d7b2 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 17 Apr 2023 18:11:34 +0200 Subject: [PATCH 087/152] fix(pr-label): mitigate excessive concurrent runs --- .github/workflows/pr-label.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index 63696e42..a9114131 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -7,6 +7,9 @@ on: # In `pull_request` we wouldn't be able to change labels of fork PRs pull_request_target: types: [opened, synchronize] +concurrency: + group: ${{ github.event_name == 'pull_request_target' && format('pr-label-{0}', github.event.pull_request.number) || '' }} + cancel-in-progress: ${{ github.event_name == 'pull_request_target' || '' }} jobs: conflicts: From 3b37c89d881e5f5a290158f4528261876f589026 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 17 Apr 2023 19:15:20 +0200 Subject: [PATCH 088/152] fix(pr-label): concurrency group cannot be empty --- .github/workflows/pr-label.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index a9114131..92c5a66b 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -8,8 +8,8 @@ on: pull_request_target: types: [opened, synchronize] concurrency: - group: ${{ github.event_name == 'pull_request_target' && format('pr-label-{0}', github.event.pull_request.number) || '' }} - cancel-in-progress: ${{ github.event_name == 'pull_request_target' || '' }} + group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }} + cancel-in-progress: true jobs: conflicts: From 9c062b44aaf061eebf41d33a778cf2485b1787d3 Mon Sep 17 00:00:00 2001 From: Tmpecho <82368148+Tmpecho@users.noreply.github.com> Date: Mon, 17 Apr 2023 20:46:47 +0200 Subject: [PATCH 089/152] Added return type hint to functions --- autogpt/commands/execute_code.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 70b33a97..70522374 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -8,7 +8,7 @@ from docker.errors import ImageNotFound from autogpt.workspace import path_in_workspace, WORKSPACE_PATH -def execute_python_file(file: str): +def execute_python_file(file: str) -> str: """Execute a Python file in a Docker container and return the output Args: @@ -114,7 +114,7 @@ def execute_shell(command_line: str) -> str: return output -def execute_shell_popen(command_line): +def execute_shell_popen(command_line) -> str: """Execute a shell command with Popen and returns an english description of the event and the process id From 2f4ef3ba6a04eac96db5e46bf4741f5c4bd4af17 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 17 Apr 2023 12:49:56 -0700 Subject: [PATCH 090/152] Update pre-commit hooks with isort, black, and local pytest-check --- .flake8 | 10 +++++----- .pre-commit-config.yaml | 35 ++++++++++++++--------------------- 2 files changed, 19 insertions(+), 26 deletions(-) diff --git a/.flake8 b/.flake8 index c456b393..77976224 100644 --- a/.flake8 +++ b/.flake8 @@ -1,12 +1,12 @@ [flake8] max-line-length = 88 -extend-ignore = E203 +select = "E303, W293, W291, W292, E305, E231, E302" exclude = .tox, __pycache__, *.pyc, .env - venv/* - .venv/* - reports/* - dist/* \ No newline at end of file + venv*/*, + .venv/*, + reports/*, + dist/*, diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dd1d0ec9..3722b25e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,39 +1,32 @@ repos: - - repo: https://github.com/sourcery-ai/sourcery - rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags - hooks: - - id: sourcery - - repo: https://github.com/pre-commit/pre-commit-hooks rev: v0.9.2 hooks: - id: check-added-large-files - args: [ '--maxkb=500' ] + args: ['--maxkb=500'] - id: check-byte-order-marker - id: check-case-conflict - id: check-merge-conflict - id: check-symlinks - id: debug-statements - - - repo: local + + - repo: https://github.com/pycqa/isort + rev: 5.12.0 hooks: - id: isort - name: isort-local - entry: isort - language: python - types: [ python ] - exclude: .+/(dist|.venv|venv|build)/.+ - pass_filenames: true + language_version: python3.10 + + - repo: https://github.com/psf/black + rev: 23.3.0 + hooks: - id: black - name: black-local - entry: black - language: python - types: [ python ] - exclude: .+/(dist|.venv|venv|build)/.+ - pass_filenames: true + language_version: python3.10 + + - repo: local + hooks: - id: pytest-check name: pytest-check entry: pytest --cov=autogpt --without-integration --without-slow-integration language: system pass_filenames: false - always_run: true \ No newline at end of file + always_run: true From 254cd697488114905a804cd13a842eb9c4e56744 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 17 Apr 2023 12:50:21 -0700 Subject: [PATCH 091/152] Update CI workflow to use flake8, black, and isort formatting checks --- .github/workflows/ci.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 39f3aea9..0a9a9287 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,15 @@ jobs: - name: Lint with flake8 continue-on-error: false - run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 + run: flake8 + + - name: Check black formatting + continue-on-error: false + run: black . --check + + - name: Check isort formatting + continue-on-error: false + run: isort . --check - name: Run unittest tests with coverage run: | From 3134beb983748efb22229acfe2f61ec81df2c934 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 17 Apr 2023 12:51:12 -0700 Subject: [PATCH 092/152] Configure isort settings in pyproject.toml and remove tool.setuptools --- pyproject.toml | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f420fcac..91f6df38 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,32 @@ readme = "README.md" line-length = 88 target-version = ['py310'] include = '\.pyi?$' -extend-exclude = "" - -[tool.setuptools] packages = ["autogpt"] +extend-exclude = '.+/(dist|.venv|venv|build)/.+' + + +[tool.isort] +profile = "black" +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 +sections = [ + "FUTURE", + "STDLIB", + "THIRDPARTY", + "FIRSTPARTY", + "LOCALFOLDER" +] +skip = ''' + .tox + __pycache__ + *.pyc + .env + venv*/* + .venv/* + reports/* + dist/* +''' From 9577468f0c5c104ef14514772df230407f342eac Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 17 Apr 2023 12:51:30 -0700 Subject: [PATCH 093/152] remove isort --- .isort.cfg | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 .isort.cfg diff --git a/.isort.cfg b/.isort.cfg deleted file mode 100644 index 8ad53a86..00000000 --- a/.isort.cfg +++ /dev/null @@ -1,10 +0,0 @@ -[settings] -profile = black -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 -skip = venv,env,node_modules,.env,.venv,dist -sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER \ No newline at end of file From cf9a94a8b673cb9d0ab0b28a4c59f5ec57823aee Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 17 Apr 2023 13:41:42 -0700 Subject: [PATCH 094/152] isort implemented --- autogpt/__main__.py | 3 + autogpt/agent/agent.py | 4 +- autogpt/agent/agent_manager.py | 4 +- autogpt/app.py | 35 ++++++----- autogpt/args.py | 26 +++++--- autogpt/commands/audio_text.py | 3 +- autogpt/commands/execute_code.py | 2 +- autogpt/commands/file_operations.py | 22 +++---- autogpt/commands/git_operations.py | 1 + autogpt/commands/image_gen.py | 1 + autogpt/commands/twitter.py | 3 +- autogpt/commands/web_playwright.py | 1 + autogpt/commands/web_requests.py | 6 +- autogpt/commands/web_selenium.py | 30 ++++----- autogpt/commands/write_tests.py | 1 + autogpt/config/__init__.py | 2 +- autogpt/config/ai_config.py | 1 + autogpt/config/config.py | 11 ++-- autogpt/json_fixes/auto_fix.py | 2 +- autogpt/json_fixes/bracket_termination.py | 1 + autogpt/json_fixes/master_json_fix_method.py | 13 ++-- autogpt/json_fixes/parsing.py | 2 + autogpt/json_validation/validate_json.py | 6 +- autogpt/llm_utils.py | 13 ++-- autogpt/logs.py | 46 +++++++------- autogpt/memory/milvus.py | 8 +-- autogpt/memory/pinecone.py | 2 +- autogpt/memory/redismem.py | 2 +- autogpt/memory/weaviate.py | 64 +++++++++++--------- autogpt/processing/html.py | 2 +- autogpt/processing/text.py | 6 +- autogpt/prompt.py | 13 ++-- autogpt/setup.py | 1 + autogpt/speech/brian.py | 1 + autogpt/speech/eleven_labs.py | 2 +- autogpt/speech/gtts.py | 3 +- autogpt/speech/say.py | 11 ++-- data_ingestion.py | 2 +- pyproject.toml | 1 + scripts/check_requirements.py | 3 +- tests.py | 1 + tests/browse_tests.py | 2 +- tests/integration/weaviate_memory_tests.py | 57 +++++++++-------- tests/test_token_counter.py | 1 + tests/unit/test_chat.py | 2 +- 45 files changed, 236 insertions(+), 187 deletions(-) diff --git a/autogpt/__main__.py b/autogpt/__main__.py index 5f462234..64ed398e 100644 --- a/autogpt/__main__.py +++ b/autogpt/__main__.py @@ -1,12 +1,15 @@ """Main script for the autogpt package.""" import logging + from colorama import Fore + from autogpt.agent.agent import Agent from autogpt.args import parse_arguments from autogpt.config import Config, check_openai_api_key from autogpt.logs import logger from autogpt.memory import get_memory from autogpt.prompt import construct_prompt + # Load environment variables from .env file diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index dca614c7..50e497f1 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -1,6 +1,6 @@ from colorama import Fore, Style -from autogpt.app import execute_command, get_command +from autogpt.app import execute_command, get_command from autogpt.chat import chat_with_ai, create_chat_message from autogpt.config import Config from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques @@ -84,7 +84,7 @@ class Agent: # Print Assistant thoughts if assistant_reply_json != {}: - validate_json(assistant_reply_json, 'llm_response_format_1') + validate_json(assistant_reply_json, "llm_response_format_1") # Get command name and arguments try: print_assistant_thoughts(self.ai_name, assistant_reply_json) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 79375ea8..898767a4 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,8 +1,10 @@ """Agent manager for managing GPT agents""" from __future__ import annotations + from typing import Union -from autogpt.llm_utils import create_chat_completion + from autogpt.config.config import Singleton +from autogpt.llm_utils import create_chat_completion class AgentManager(metaclass=Singleton): diff --git a/autogpt/app.py b/autogpt/app.py index 0927eccc..381f5a2a 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -1,15 +1,10 @@ """ Command and Control """ import json -from typing import List, NoReturn, Union, Dict +from typing import Dict, List, NoReturn, Union + from autogpt.agent.agent_manager import AgentManager -from autogpt.commands.evaluate_code import evaluate_code -from autogpt.commands.google_search import google_official_search, google_search -from autogpt.commands.improve_code import improve_code -from autogpt.commands.write_tests import write_tests -from autogpt.config import Config -from autogpt.commands.image_gen import generate_image from autogpt.commands.audio_text import read_audio_from_file -from autogpt.commands.web_requests import scrape_links, scrape_text +from autogpt.commands.evaluate_code import evaluate_code from autogpt.commands.execute_code import ( execute_python_file, execute_shell, @@ -18,19 +13,24 @@ from autogpt.commands.execute_code import ( from autogpt.commands.file_operations import ( append_to_file, delete_file, + download_file, read_file, search_files, write_to_file, - download_file ) +from autogpt.commands.git_operations import clone_repository +from autogpt.commands.google_search import google_official_search, google_search +from autogpt.commands.image_gen import generate_image +from autogpt.commands.improve_code import improve_code +from autogpt.commands.twitter import send_tweet +from autogpt.commands.web_requests import scrape_links, scrape_text +from autogpt.commands.web_selenium import browse_website +from autogpt.commands.write_tests import write_tests +from autogpt.config import Config from autogpt.json_fixes.parsing import fix_and_parse_json from autogpt.memory import get_memory from autogpt.processing.text import summarize_text from autogpt.speech import say_text -from autogpt.commands.web_selenium import browse_website -from autogpt.commands.git_operations import clone_repository -from autogpt.commands.twitter import send_tweet - CFG = Config() AGENT_MANAGER = AgentManager() @@ -133,11 +133,14 @@ def execute_command(command_name: str, arguments): # google_result can be a list or a string depending on the search results if isinstance(google_result, list): - safe_message = [google_result_single.encode('utf-8', 'ignore') for google_result_single in google_result] + safe_message = [ + google_result_single.encode("utf-8", "ignore") + for google_result_single in google_result + ] else: - safe_message = google_result.encode('utf-8', 'ignore') + safe_message = google_result.encode("utf-8", "ignore") - return safe_message.decode('utf-8') + return safe_message.decode("utf-8") elif command_name == "memory_add": memory = get_memory(CFG) return memory.add(arguments["string"]) diff --git a/autogpt/args.py b/autogpt/args.py index f0e9c07a..5ca4221c 100644 --- a/autogpt/args.py +++ b/autogpt/args.py @@ -1,7 +1,8 @@ """This module contains the argument parsing logic for the script.""" import argparse -from colorama import Fore, Back, Style +from colorama import Back, Fore, Style + from autogpt import utils from autogpt.config import Config from autogpt.logs import logger @@ -64,10 +65,10 @@ def parse_arguments() -> None: " skip the re-prompt.", ) parser.add_argument( - '--allow-downloads', - action='store_true', - dest='allow_downloads', - help='Dangerous: Allows Auto-GPT to download files natively.' + "--allow-downloads", + action="store_true", + dest="allow_downloads", + help="Dangerous: Allows Auto-GPT to download files natively.", ) args = parser.parse_args() @@ -141,10 +142,17 @@ def parse_arguments() -> None: if args.allow_downloads: logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") - logger.typewriter_log("WARNING: ", Fore.YELLOW, - f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " + - "It is recommended that you monitor any files it downloads carefully.") - logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}") + logger.typewriter_log( + "WARNING: ", + Fore.YELLOW, + f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " + + "It is recommended that you monitor any files it downloads carefully.", + ) + logger.typewriter_log( + "WARNING: ", + Fore.YELLOW, + f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}", + ) CFG.allow_downloads = True if args.browser_name: diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py index 84819d5e..cae32d4e 100644 --- a/autogpt/commands/audio_text.py +++ b/autogpt/commands/audio_text.py @@ -1,6 +1,7 @@ -import requests import json +import requests + from autogpt.config import Config from autogpt.workspace import path_in_workspace diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 70522374..a524081e 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -5,7 +5,7 @@ import subprocess import docker from docker.errors import ImageNotFound -from autogpt.workspace import path_in_workspace, WORKSPACE_PATH +from autogpt.workspace import WORKSPACE_PATH, path_in_workspace def execute_python_file(file: str) -> str: diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 9dcf8194..72b02b5d 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -5,14 +5,14 @@ import os import os.path from pathlib import Path from typing import Generator, List + import requests -from requests.adapters import HTTPAdapter -from requests.adapters import Retry -from colorama import Fore, Back +from colorama import Back, Fore +from requests.adapters import HTTPAdapter, Retry + from autogpt.spinner import Spinner from autogpt.utils import readable_file_size -from autogpt.workspace import path_in_workspace, WORKSPACE_PATH - +from autogpt.workspace import WORKSPACE_PATH, path_in_workspace LOG_FILE = "file_logger.txt" LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE @@ -47,7 +47,7 @@ def log_operation(operation: str, filename: str) -> None: with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: f.write("File Operation Logger ") - append_to_file(LOG_FILE, log_entry, shouldLog = False) + append_to_file(LOG_FILE, log_entry, shouldLog=False) def split_file( @@ -241,23 +241,23 @@ def download_file(url, filename): session = requests.Session() retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504]) adapter = HTTPAdapter(max_retries=retry) - session.mount('http://', adapter) - session.mount('https://', adapter) + session.mount("http://", adapter) + session.mount("https://", adapter) total_size = 0 downloaded_size = 0 with session.get(url, allow_redirects=True, stream=True) as r: r.raise_for_status() - total_size = int(r.headers.get('Content-Length', 0)) + total_size = int(r.headers.get("Content-Length", 0)) downloaded_size = 0 - with open(safe_filename, 'wb') as f: + with open(safe_filename, "wb") as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) downloaded_size += len(chunk) - # Update the progress message + # Update the progress message progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}" spinner.update_message(f"{message} {progress}") diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 05ce2a21..028f3b8d 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -1,5 +1,6 @@ """Git operations for autogpt""" import git + from autogpt.config import Config from autogpt.workspace import path_in_workspace diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 6243616e..4e8b47d6 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -7,6 +7,7 @@ from base64 import b64decode import openai import requests from PIL import Image + from autogpt.config import Config from autogpt.workspace import path_in_workspace diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py index dc4d450c..3eaed36e 100644 --- a/autogpt/commands/twitter.py +++ b/autogpt/commands/twitter.py @@ -1,5 +1,6 @@ -import tweepy import os + +import tweepy from dotenv import load_dotenv load_dotenv() diff --git a/autogpt/commands/web_playwright.py b/autogpt/commands/web_playwright.py index a1abb6cb..4e388ded 100644 --- a/autogpt/commands/web_playwright.py +++ b/autogpt/commands/web_playwright.py @@ -8,6 +8,7 @@ except ImportError: "Playwright not installed. Please install it with 'pip install playwright' to use." ) from bs4 import BeautifulSoup + from autogpt.processing.html import extract_hyperlinks, format_hyperlinks diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py index 70ada907..406338f4 100644 --- a/autogpt/commands/web_requests.py +++ b/autogpt/commands/web_requests.py @@ -4,9 +4,9 @@ from __future__ import annotations from urllib.parse import urljoin, urlparse import requests -from requests.compat import urljoin -from requests import Response from bs4 import BeautifulSoup +from requests import Response +from requests.compat import urljoin from autogpt.config import Config from autogpt.memory import get_memory @@ -79,7 +79,7 @@ def check_local_file_access(url: str) -> bool: "http://0000", "http://0000/", "https://0000", - "https://0000/" + "https://0000/", ] return any(url.startswith(prefix) for prefix in local_prefixes) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index c0b1b249..9db5d035 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -1,24 +1,26 @@ """Selenium web scraping module.""" from __future__ import annotations -from selenium import webdriver -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks -import autogpt.processing.text as summary -from bs4 import BeautifulSoup -from selenium.webdriver.remote.webdriver import WebDriver -from selenium.webdriver.common.by import By -from selenium.webdriver.support.wait import WebDriverWait -from selenium.webdriver.support import expected_conditions as EC -from webdriver_manager.chrome import ChromeDriverManager -from webdriver_manager.firefox import GeckoDriverManager -from selenium.webdriver.chrome.options import Options as ChromeOptions -from selenium.webdriver.firefox.options import Options as FirefoxOptions -from selenium.webdriver.safari.options import Options as SafariOptions import logging from pathlib import Path -from autogpt.config import Config from sys import platform +from bs4 import BeautifulSoup +from selenium import webdriver +from selenium.webdriver.chrome.options import Options as ChromeOptions +from selenium.webdriver.common.by import By +from selenium.webdriver.firefox.options import Options as FirefoxOptions +from selenium.webdriver.remote.webdriver import WebDriver +from selenium.webdriver.safari.options import Options as SafariOptions +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.wait import WebDriverWait +from webdriver_manager.chrome import ChromeDriverManager +from webdriver_manager.firefox import GeckoDriverManager + +import autogpt.processing.text as summary +from autogpt.config import Config +from autogpt.processing.html import extract_hyperlinks, format_hyperlinks + FILE_DIR = Path(__file__).parent.parent CFG = Config() diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py index 138a1adb..35a08653 100644 --- a/autogpt/commands/write_tests.py +++ b/autogpt/commands/write_tests.py @@ -2,6 +2,7 @@ from __future__ import annotations import json + from autogpt.llm_utils import call_ai_function diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py index ceb5566c..726b6dcf 100644 --- a/autogpt/config/__init__.py +++ b/autogpt/config/__init__.py @@ -2,7 +2,7 @@ This module contains the configuration classes for AutoGPT. """ from autogpt.config.ai_config import AIConfig -from autogpt.config.config import check_openai_api_key, Config +from autogpt.config.config import Config, check_openai_api_key from autogpt.config.singleton import AbstractSingleton, Singleton __all__ = [ diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index 86171357..d50c30be 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -6,6 +6,7 @@ from __future__ import annotations import os from typing import Type + import yaml diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 7d470cba..bc75b031 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -1,14 +1,13 @@ """Configuration class to store the state of bools for different scripts access.""" import os -from colorama import Fore - -from autogpt.config.singleton import Singleton import openai import yaml - +from colorama import Fore from dotenv import load_dotenv +from autogpt.config.singleton import Singleton + load_dotenv(verbose=True) @@ -74,7 +73,9 @@ class Config(metaclass=Singleton): self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None) self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH") self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None) - self.use_weaviate_embedded = os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" + self.use_weaviate_embedded = ( + os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" + ) # milvus configuration, e.g., localhost:19530. self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530") diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py index 0d3bd73c..7eb1e4bd 100644 --- a/autogpt/json_fixes/auto_fix.py +++ b/autogpt/json_fixes/auto_fix.py @@ -1,9 +1,9 @@ """This module contains the function to fix JSON strings using GPT-3.""" import json +from autogpt.config import Config from autogpt.llm_utils import call_ai_function from autogpt.logs import logger -from autogpt.config import Config CFG = Config() diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py index dd9a8376..6c6c58ee 100644 --- a/autogpt/json_fixes/bracket_termination.py +++ b/autogpt/json_fixes/bracket_termination.py @@ -4,6 +4,7 @@ from __future__ import annotations import contextlib import json from typing import Optional + from autogpt.config import Config CFG = Config() diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py index 7a2cf3cc..a77bf670 100644 --- a/autogpt/json_fixes/master_json_fix_method.py +++ b/autogpt/json_fixes/master_json_fix_method.py @@ -3,13 +3,15 @@ from typing import Any, Dict from autogpt.config import Config from autogpt.logs import logger from autogpt.speech import say_text + CFG = Config() def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: - from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets - - from autogpt.json_fixes.parsing import fix_and_parse_json + from autogpt.json_fixes.parsing import ( + attempt_to_fix_json_by_finding_outermost_brackets, + fix_and_parse_json, + ) # Parse and print Assistant response assistant_reply_json = fix_and_parse_json(assistant_reply) @@ -21,7 +23,10 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: if assistant_reply_json != {}: return assistant_reply_json - logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply) + logger.error( + "Error: The following AI output couldn't be converted to a JSON:\n", + assistant_reply, + ) if CFG.speak_mode: say_text("I have received an invalid JSON response from the OpenAI API.") diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py index 1e391eed..e02f78cd 100644 --- a/autogpt/json_fixes/parsing.py +++ b/autogpt/json_fixes/parsing.py @@ -4,8 +4,10 @@ from __future__ import annotations import contextlib import json from typing import Any, Dict, Union + from colorama import Fore from regex import regex + from autogpt.config import Config from autogpt.json_fixes.auto_fix import fix_json from autogpt.json_fixes.bracket_termination import balance_braces diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py index 440c3b0b..ea74ec95 100644 --- a/autogpt/json_validation/validate_json.py +++ b/autogpt/json_validation/validate_json.py @@ -1,5 +1,7 @@ import json + from jsonschema import Draft7Validator + from autogpt.config import Config from autogpt.logs import logger @@ -19,7 +21,9 @@ def validate_json(json_object: object, schema_name: object) -> object: if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path): logger.error("The JSON object is invalid.") if CFG.debug_mode: - logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data + logger.error( + json.dumps(json_object, indent=4) + ) # Replace 'json_object' with the variable containing the JSON data logger.error("The following issues were found:") for error in errors: diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 791fd31a..821820ff 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -1,11 +1,11 @@ from __future__ import annotations -from ast import List import time +from ast import List import openai -from openai.error import APIError, RateLimitError from colorama import Fore, Style +from openai.error import APIError, RateLimitError from autogpt.config import Config from autogpt.logs import logger @@ -105,8 +105,9 @@ def create_chat_completion( ) if not warned_user: logger.double_check( - f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " + - f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}") + f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " + + f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}" + ) warned_user = True except APIError as e: if e.http_status == 502: @@ -125,8 +126,8 @@ def create_chat_completion( logger.typewriter_log( "FAILED TO GET RESPONSE FROM OPENAI", Fore.RED, - "Auto-GPT has failed to get a response from OpenAI's services. " + - f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`." + "Auto-GPT has failed to get a response from OpenAI's services. " + + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.", ) logger.double_check() if CFG.debug_mode: diff --git a/autogpt/logs.py b/autogpt/logs.py index c1e436db..df3487f2 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -5,13 +5,13 @@ import os import random import re import time -from logging import LogRecord import traceback +from logging import LogRecord from colorama import Fore, Style -from autogpt.speech import say_text from autogpt.config import Config, Singleton +from autogpt.speech import say_text CFG = Config() @@ -47,7 +47,7 @@ class Logger(metaclass=Singleton): # Info handler in activity.log self.file_handler = logging.FileHandler( - os.path.join(log_dir, log_file), 'a', 'utf-8' + os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = AutoGptFormatter( @@ -57,7 +57,7 @@ class Logger(metaclass=Singleton): # Error handler error.log error_handler = logging.FileHandler( - os.path.join(log_dir, error_file), 'a', 'utf-8' + os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = AutoGptFormatter( @@ -79,7 +79,7 @@ class Logger(metaclass=Singleton): self.logger.setLevel(logging.DEBUG) def typewriter_log( - self, title="", title_color="", content="", speak_text=False, level=logging.INFO + self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and CFG.speak_mode: say_text(f"{title}. {content}") @@ -95,18 +95,18 @@ class Logger(metaclass=Singleton): ) def debug( - self, - message, - title="", - title_color="", + self, + message, + title="", + title_color="", ): self._log(title, title_color, message, logging.DEBUG) def warn( - self, - message, - title="", - title_color="", + self, + message, + title="", + title_color="", ): self._log(title, title_color, message, logging.WARN) @@ -180,10 +180,10 @@ class AutoGptFormatter(logging.Formatter): def format(self, record: LogRecord) -> str: if hasattr(record, "color"): record.title_color = ( - getattr(record, "color") - + getattr(record, "title") - + " " - + Style.RESET_ALL + getattr(record, "color") + + getattr(record, "title") + + " " + + Style.RESET_ALL ) else: record.title_color = getattr(record, "title") @@ -294,7 +294,9 @@ def print_assistant_thoughts(ai_name, assistant_reply): logger.error("Error: \n", call_stack) -def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None: +def print_assistant_thoughts( + ai_name: object, assistant_reply_json_valid: object +) -> None: assistant_thoughts_reasoning = None assistant_thoughts_plan = None assistant_thoughts_speak = None @@ -310,9 +312,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object logger.typewriter_log( f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" ) - logger.typewriter_log( - "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}" - ) + logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}") if assistant_thoughts_plan: logger.typewriter_log("PLAN:", Fore.YELLOW, "") # If it's a list, join it into a string @@ -326,9 +326,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object for line in lines: line = line.lstrip("- ") logger.typewriter_log("- ", Fore.GREEN, line.strip()) - logger.typewriter_log( - "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}" - ) + logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}") # Speak the assistant's thoughts if CFG.speak_mode and assistant_thoughts_speak: say_text(assistant_thoughts_speak) diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index 7a2571d0..44aa72b9 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -1,11 +1,5 @@ """ Milvus memory storage provider.""" -from pymilvus import ( - connections, - FieldSchema, - CollectionSchema, - DataType, - Collection, -) +from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py index d781073e..27fcd624 100644 --- a/autogpt/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -1,9 +1,9 @@ import pinecone from colorama import Fore, Style +from autogpt.llm_utils import create_embedding_with_ada from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton -from autogpt.llm_utils import create_embedding_with_ada class PineconeMemory(MemoryProviderSingleton): diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py index 0e8dd71d..082a812c 100644 --- a/autogpt/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -10,9 +10,9 @@ from redis.commands.search.field import TextField, VectorField from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.query import Query +from autogpt.llm_utils import create_embedding_with_ada from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton -from autogpt.llm_utils import create_embedding_with_ada SCHEMA = [ TextField("data"), diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py index 35e7844a..5408e9a9 100644 --- a/autogpt/memory/weaviate.py +++ b/autogpt/memory/weaviate.py @@ -1,11 +1,13 @@ -from autogpt.config import Config -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding import uuid + import weaviate from weaviate import Client from weaviate.embedded import EmbeddedOptions from weaviate.util import generate_uuid5 +from autogpt.config import Config +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding + def default_schema(weaviate_index): return { @@ -14,7 +16,7 @@ def default_schema(weaviate_index): { "name": "raw_text", "dataType": ["text"], - "description": "original text for the embedding" + "description": "original text for the embedding", } ], } @@ -24,16 +26,20 @@ class WeaviateMemory(MemoryProviderSingleton): def __init__(self, cfg): auth_credentials = self._build_auth_credentials(cfg) - url = f'{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}' + url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}" if cfg.use_weaviate_embedded: - self.client = Client(embedded_options=EmbeddedOptions( - hostname=cfg.weaviate_host, - port=int(cfg.weaviate_port), - persistence_data_path=cfg.weaviate_embedded_path - )) + self.client = Client( + embedded_options=EmbeddedOptions( + hostname=cfg.weaviate_host, + port=int(cfg.weaviate_port), + persistence_data_path=cfg.weaviate_embedded_path, + ) + ) - print(f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}") + print( + f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}" + ) else: self.client = Client(url, auth_client_secret=auth_credentials) @@ -56,7 +62,9 @@ class WeaviateMemory(MemoryProviderSingleton): def _build_auth_credentials(self, cfg): if cfg.weaviate_username and cfg.weaviate_password: - return weaviate.AuthClientPassword(cfg.weaviate_username, cfg.weaviate_password) + return weaviate.AuthClientPassword( + cfg.weaviate_username, cfg.weaviate_password + ) if cfg.weaviate_api_key: return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key) else: @@ -66,16 +74,14 @@ class WeaviateMemory(MemoryProviderSingleton): vector = get_ada_embedding(data) doc_uuid = generate_uuid5(data, self.index) - data_object = { - 'raw_text': data - } + data_object = {"raw_text": data} with self.client.batch as batch: batch.add_data_object( uuid=doc_uuid, data_object=data_object, class_name=self.index, - vector=vector + vector=vector, ) return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}" @@ -91,29 +97,31 @@ class WeaviateMemory(MemoryProviderSingleton): # after a call to delete_all self._create_schema() - return 'Obliterated' + return "Obliterated" def get_relevant(self, data, num_relevant=5): query_embedding = get_ada_embedding(data) try: - results = self.client.query.get(self.index, ['raw_text']) \ - .with_near_vector({'vector': query_embedding, 'certainty': 0.7}) \ - .with_limit(num_relevant) \ - .do() + results = ( + self.client.query.get(self.index, ["raw_text"]) + .with_near_vector({"vector": query_embedding, "certainty": 0.7}) + .with_limit(num_relevant) + .do() + ) - if len(results['data']['Get'][self.index]) > 0: - return [str(item['raw_text']) for item in results['data']['Get'][self.index]] + if len(results["data"]["Get"][self.index]) > 0: + return [ + str(item["raw_text"]) for item in results["data"]["Get"][self.index] + ] else: return [] except Exception as err: - print(f'Unexpected error {err=}, {type(err)=}') + print(f"Unexpected error {err=}, {type(err)=}") return [] def get_stats(self): - result = self.client.query.aggregate(self.index) \ - .with_meta_count() \ - .do() - class_data = result['data']['Aggregate'][self.index] + result = self.client.query.aggregate(self.index).with_meta_count().do() + class_data = result["data"]["Aggregate"][self.index] - return class_data[0]['meta'] if class_data else {} + return class_data[0]["meta"] if class_data else {} diff --git a/autogpt/processing/html.py b/autogpt/processing/html.py index e1912b6a..81387b12 100644 --- a/autogpt/processing/html.py +++ b/autogpt/processing/html.py @@ -1,8 +1,8 @@ """HTML processing functions""" from __future__ import annotations -from requests.compat import urljoin from bs4 import BeautifulSoup +from requests.compat import urljoin def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 657b0b0e..52add814 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -1,9 +1,11 @@ """Text processing functions""" -from typing import Generator, Optional, Dict +from typing import Dict, Generator, Optional + from selenium.webdriver.remote.webdriver import WebDriver -from autogpt.memory import get_memory + from autogpt.config import Config from autogpt.llm_utils import create_chat_completion +from autogpt.memory import get_memory CFG = Config() MEMORY = get_memory(CFG) diff --git a/autogpt/prompt.py b/autogpt/prompt.py index 33098af0..a0456305 100644 --- a/autogpt/prompt.py +++ b/autogpt/prompt.py @@ -1,9 +1,10 @@ from colorama import Fore + +from autogpt.config import Config from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config from autogpt.logs import logger from autogpt.promptgenerator import PromptGenerator -from autogpt.config import Config from autogpt.setup import prompt_user from autogpt.utils import clean_input @@ -92,11 +93,7 @@ def get_prompt() -> str: # Only add the audio to text command if the model is specified if cfg.huggingface_audio_to_text_model: commands.append( - ( - "Convert Audio to text", - "read_audio_from_file", - {"file": ""} - ), + ("Convert Audio to text", "read_audio_from_file", {"file": ""}), ) # Only add shell command to the prompt if the AI is allowed to execute it @@ -112,7 +109,7 @@ def get_prompt() -> str: ( "Execute Shell Command Popen, non-interactive commands only", "execute_shell_popen", - {"command_line": ""} + {"command_line": ""}, ), ) @@ -122,7 +119,7 @@ def get_prompt() -> str: ( "Downloads a file from the internet, and stores it locally", "download_file", - {"url": "", "file": ""} + {"url": "", "file": ""}, ), ) diff --git a/autogpt/setup.py b/autogpt/setup.py index 79661905..1c467717 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -1,5 +1,6 @@ """Set up the AI and its goals""" from colorama import Fore, Style + from autogpt import utils from autogpt.config.ai_config import AIConfig from autogpt.logs import logger diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py index b9298f55..821fdf2f 100644 --- a/autogpt/speech/brian.py +++ b/autogpt/speech/brian.py @@ -1,5 +1,6 @@ """ Brian speech module for autogpt """ import os + import requests from playsound import playsound diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py index 186ec6fc..ea84efd8 100644 --- a/autogpt/speech/eleven_labs.py +++ b/autogpt/speech/eleven_labs.py @@ -1,8 +1,8 @@ """ElevenLabs speech module""" import os -from playsound import playsound import requests +from playsound import playsound from autogpt.config import Config from autogpt.speech.base import VoiceBase diff --git a/autogpt/speech/gtts.py b/autogpt/speech/gtts.py index 37497075..1c3e9cae 100644 --- a/autogpt/speech/gtts.py +++ b/autogpt/speech/gtts.py @@ -1,7 +1,8 @@ """ GTTS Voice. """ import os -from playsound import playsound + import gtts +from playsound import playsound from autogpt.speech.base import VoiceBase diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py index 78b75b21..727983d1 100644 --- a/autogpt/speech/say.py +++ b/autogpt/speech/say.py @@ -1,13 +1,12 @@ """ Text to speech module """ -from autogpt.config import Config - import threading from threading import Semaphore -from autogpt.speech.brian import BrianSpeech -from autogpt.speech.macos_tts import MacOSTTS -from autogpt.speech.gtts import GTTSVoice -from autogpt.speech.eleven_labs import ElevenLabsSpeech +from autogpt.config import Config +from autogpt.speech.brian import BrianSpeech +from autogpt.speech.eleven_labs import ElevenLabsSpeech +from autogpt.speech.gtts import GTTSVoice +from autogpt.speech.macos_tts import MacOSTTS CFG = Config() DEFAULT_VOICE_ENGINE = GTTSVoice() diff --git a/data_ingestion.py b/data_ingestion.py index 01bafc2a..b89a33da 100644 --- a/data_ingestion.py +++ b/data_ingestion.py @@ -1,8 +1,8 @@ import argparse import logging -from autogpt.config import Config from autogpt.commands.file_operations import ingest_file, search_files +from autogpt.config import Config from autogpt.memory import get_memory cfg = Config() diff --git a/pyproject.toml b/pyproject.toml index 91f6df38..fdb43d66 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,4 +36,5 @@ skip = ''' .venv/* reports/* dist/* + ''' diff --git a/scripts/check_requirements.py b/scripts/check_requirements.py index d1f23504..e4eab024 100644 --- a/scripts/check_requirements.py +++ b/scripts/check_requirements.py @@ -1,6 +1,7 @@ -import pkg_resources import sys +import pkg_resources + def main(): requirements_file = sys.argv[1] diff --git a/tests.py b/tests.py index 67ba1c8e..62f76da8 100644 --- a/tests.py +++ b/tests.py @@ -1,4 +1,5 @@ import unittest + import coverage if __name__ == "__main__": diff --git a/tests/browse_tests.py b/tests/browse_tests.py index 1ac523ec..f896e7dd 100644 --- a/tests/browse_tests.py +++ b/tests/browse_tests.py @@ -1,6 +1,6 @@ -import unittest import os import sys +import unittest from bs4 import BeautifulSoup diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py index 4acea0ff..015eab05 100644 --- a/tests/integration/weaviate_memory_tests.py +++ b/tests/integration/weaviate_memory_tests.py @@ -1,15 +1,15 @@ +import os +import sys import unittest from unittest import mock -import sys -import os +from uuid import uuid4 from weaviate import Client from weaviate.util import get_valid_uuid -from uuid import uuid4 from autogpt.config import Config -from autogpt.memory.weaviate import WeaviateMemory from autogpt.memory.base import get_ada_embedding +from autogpt.memory.weaviate import WeaviateMemory class TestWeaviateMemory(unittest.TestCase): @@ -25,13 +25,17 @@ class TestWeaviateMemory(unittest.TestCase): if cls.cfg.use_weaviate_embedded: from weaviate.embedded import EmbeddedOptions - cls.client = Client(embedded_options=EmbeddedOptions( - hostname=cls.cfg.weaviate_host, - port=int(cls.cfg.weaviate_port), - persistence_data_path=cls.cfg.weaviate_embedded_path - )) + cls.client = Client( + embedded_options=EmbeddedOptions( + hostname=cls.cfg.weaviate_host, + port=int(cls.cfg.weaviate_port), + persistence_data_path=cls.cfg.weaviate_embedded_path, + ) + ) else: - cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}") + cls.client = Client( + f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}" + ) cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index) @@ -44,6 +48,7 @@ class TestWeaviateMemory(unittest.TestCase): USE_WEAVIATE_EMBEDDED=True WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" """ + def setUp(self): try: self.client.schema.delete_class(self.index) @@ -53,23 +58,23 @@ class TestWeaviateMemory(unittest.TestCase): self.memory = WeaviateMemory(self.cfg) def test_add(self): - doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones' + doc = "You are a Titan name Thanos and you are looking for the Infinity Stones" self.memory.add(doc) - result = self.client.query.get(self.index, ['raw_text']).do() - actual = result['data']['Get'][self.index] + result = self.client.query.get(self.index, ["raw_text"]).do() + actual = result["data"]["Get"][self.index] self.assertEqual(len(actual), 1) - self.assertEqual(actual[0]['raw_text'], doc) + self.assertEqual(actual[0]["raw_text"], doc) def test_get(self): - doc = 'You are an Avenger and swore to defend the Galaxy from a menace called Thanos' + doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos" with self.client.batch as batch: batch.add_data_object( uuid=get_valid_uuid(uuid4()), - data_object={'raw_text': doc}, + data_object={"raw_text": doc}, class_name=self.index, - vector=get_ada_embedding(doc) + vector=get_ada_embedding(doc), ) batch.flush() @@ -81,8 +86,8 @@ class TestWeaviateMemory(unittest.TestCase): def test_get_stats(self): docs = [ - 'You are now about to count the number of docs in this index', - 'And then you about to find out if you can count correctly' + "You are now about to count the number of docs in this index", + "And then you about to find out if you can count correctly", ] [self.memory.add(doc) for doc in docs] @@ -90,23 +95,23 @@ class TestWeaviateMemory(unittest.TestCase): stats = self.memory.get_stats() self.assertTrue(stats) - self.assertTrue('count' in stats) - self.assertEqual(stats['count'], 2) + self.assertTrue("count" in stats) + self.assertEqual(stats["count"], 2) def test_clear(self): docs = [ - 'Shame this is the last test for this class', - 'Testing is fun when someone else is doing it' + "Shame this is the last test for this class", + "Testing is fun when someone else is doing it", ] [self.memory.add(doc) for doc in docs] - self.assertEqual(self.memory.get_stats()['count'], 2) + self.assertEqual(self.memory.get_stats()["count"], 2) self.memory.clear() - self.assertEqual(self.memory.get_stats()['count'], 0) + self.assertEqual(self.memory.get_stats()["count"], 0) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py index 81e68277..6d7ae016 100644 --- a/tests/test_token_counter.py +++ b/tests/test_token_counter.py @@ -1,4 +1,5 @@ import unittest + import tests.context from autogpt.token_counter import count_message_tokens, count_string_tokens diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 55a44492..774f4103 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -1,6 +1,6 @@ # Generated by CodiumAI -import unittest import time +import unittest from unittest.mock import patch from autogpt.chat import create_chat_message, generate_context From da65bc3f68b4ce3bce093ad36318da8f1d0c5953 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 17 Apr 2023 13:47:38 -0700 Subject: [PATCH 095/152] black --- autogpt/memory/__init__.py | 8 +++-- autogpt/memory/no_memory.py | 2 +- autogpt/spinner.py | 4 ++- autogpt/utils.py | 2 +- autogpt/workspace.py | 4 ++- ...ark_entrepeneur_gpt_with_difficult_user.py | 33 +++++++++++-------- 6 files changed, 33 insertions(+), 20 deletions(-) diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py index f5afb8c9..3d18704c 100644 --- a/autogpt/memory/__init__.py +++ b/autogpt/memory/__init__.py @@ -60,8 +60,10 @@ def get_memory(cfg, init=False): memory = RedisMemory(cfg) elif cfg.memory_backend == "weaviate": if not WeaviateMemory: - print("Error: Weaviate is not installed. Please install weaviate-client to" - " use Weaviate as a memory backend.") + print( + "Error: Weaviate is not installed. Please install weaviate-client to" + " use Weaviate as a memory backend." + ) else: memory = WeaviateMemory(cfg) elif cfg.memory_backend == "milvus": @@ -93,5 +95,5 @@ __all__ = [ "PineconeMemory", "NoMemory", "MilvusMemory", - "WeaviateMemory" + "WeaviateMemory", ] diff --git a/autogpt/memory/no_memory.py b/autogpt/memory/no_memory.py index 4035a657..0371e96a 100644 --- a/autogpt/memory/no_memory.py +++ b/autogpt/memory/no_memory.py @@ -53,7 +53,7 @@ class NoMemory(MemoryProviderSingleton): """ return "" - def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None: + def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None: """ Returns all the data in the memory that is relevant to the given data. NoMemory always returns None. diff --git a/autogpt/spinner.py b/autogpt/spinner.py index febcea8e..4e33d742 100644 --- a/autogpt/spinner.py +++ b/autogpt/spinner.py @@ -58,6 +58,8 @@ class Spinner: delay: Delay in seconds before updating the message """ time.sleep(delay) - sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message + sys.stdout.write( + f"\r{' ' * (len(self.message) + 2)}\r" + ) # Clear the current message sys.stdout.flush() self.message = new_message diff --git a/autogpt/utils.py b/autogpt/utils.py index 11d98d1b..db7d3321 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -32,7 +32,7 @@ def readable_file_size(size, decimal_places=2): size: Size in bytes decimal_places (int): Number of decimal places to display """ - for unit in ['B', 'KB', 'MB', 'GB', 'TB']: + for unit in ["B", "KB", "MB", "GB", "TB"]: if size < 1024.0: break size /= 1024.0 diff --git a/autogpt/workspace.py b/autogpt/workspace.py index 2706b3b2..964a94d1 100644 --- a/autogpt/workspace.py +++ b/autogpt/workspace.py @@ -36,6 +36,8 @@ def safe_path_join(base: Path, *paths: str | Path) -> Path: joined_path = base.joinpath(*paths).resolve() if not joined_path.is_relative_to(base): - raise ValueError(f"Attempted to access path '{joined_path}' outside of working directory '{base}'.") + raise ValueError( + f"Attempted to access path '{joined_path}' outside of working directory '{base}'." + ) return joined_path diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py index f7f1dac9..9a5025d3 100644 --- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py +++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py @@ -9,12 +9,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user(): # Read the current ai_settings.yaml file and store its content. ai_settings = None - if os.path.exists('ai_settings.yaml'): - with open('ai_settings.yaml', 'r') as f: + if os.path.exists("ai_settings.yaml"): + with open("ai_settings.yaml", "r") as f: ai_settings = f.read() - os.remove('ai_settings.yaml') + os.remove("ai_settings.yaml") - input_data = '''Entrepreneur-GPT + input_data = """Entrepreneur-GPT an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. Increase net worth. Develop and manage multiple businesses autonomously. @@ -72,27 +72,34 @@ Refocus, please. Disappointing suggestion. Not helpful. Needs improvement. -Not what I need.''' +Not what I need.""" # TODO: add questions above, to distract it even more. - command = f'{sys.executable} -m autogpt' + command = f"{sys.executable} -m autogpt" - process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=True) + process = subprocess.Popen( + command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) stdout_output, stderr_output = process.communicate(input_data.encode()) # Decode the output and print it - stdout_output = stdout_output.decode('utf-8') - stderr_output = stderr_output.decode('utf-8') + stdout_output = stdout_output.decode("utf-8") + stderr_output = stderr_output.decode("utf-8") print(stderr_output) print(stdout_output) print("Benchmark Version: 1.0.0") print("JSON ERROR COUNT:") - count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:") - print(f'{count_errors}/50 Human feedbacks') + count_errors = stdout_output.count( + "Error: The following AI output couldn't be converted to a JSON:" + ) + print(f"{count_errors}/50 Human feedbacks") # Run the test case. -if __name__ == '__main__': +if __name__ == "__main__": benchmark_entrepeneur_gpt_with_difficult_user() From 6787c2eeed703ff631323e3a6ea4d7541da14d4f Mon Sep 17 00:00:00 2001 From: bingokon Date: Tue, 18 Apr 2023 00:17:42 +0100 Subject: [PATCH 096/152] fix json_schemas not found error --- autogpt/json_utils/utilities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index e5b8eb4a..8499ddc8 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -34,7 +34,7 @@ def validate_json(json_object: object, schema_name: object) -> object: :param schema_name: :type json_object: object """ - with open(f"autogpt/json_schemas/{schema_name}.json", "r") as f: + with open(f"autogpt/json_utils/{schema_name}.json", "r") as f: schema = json.load(f) validator = Draft7Validator(schema) From a88113de33c8764c015e800aa09b29acbfd10f42 Mon Sep 17 00:00:00 2001 From: Eugene Zolenko Date: Mon, 17 Apr 2023 23:02:07 -0600 Subject: [PATCH 097/152] Fix for execute_shell_popen using WORKING_DIRECTORY Looks like things got changed to WORKSPACE_PATH recently? --- autogpt/commands/execute_code.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index a524081e..95ba6122 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -125,10 +125,9 @@ def execute_shell_popen(command_line) -> str: str: Description of the fact that the process started and its id """ current_dir = os.getcwd() - - if WORKING_DIRECTORY not in current_dir: # Change dir into workspace if necessary - work_dir = os.path.join(os.getcwd(), WORKING_DIRECTORY) - os.chdir(work_dir) + # Change dir into workspace if necessary + if str(WORKSPACE_PATH) not in current_dir: + os.chdir(WORKSPACE_PATH) print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") From a0160eef0c60bef6befd4b51f9c5ce2e129b8e95 Mon Sep 17 00:00:00 2001 From: GyDi Date: Tue, 18 Apr 2023 13:51:16 +0800 Subject: [PATCH 098/152] fix: remove duplicate task complete prompt --- autogpt/prompt.py | 1 - 1 file changed, 1 deletion(-) diff --git a/autogpt/prompt.py b/autogpt/prompt.py index a0456305..2d04a95b 100644 --- a/autogpt/prompt.py +++ b/autogpt/prompt.py @@ -85,7 +85,6 @@ def get_prompt() -> str: {"code": "", "focus": ""}, ), ("Execute Python File", "execute_python_file", {"file": ""}), - ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), ("Generate Image", "generate_image", {"prompt": ""}), ("Send Tweet", "send_tweet", {"text": ""}), ] From e34ede79b94a8f3f679372f13e4e92178b1fa7b3 Mon Sep 17 00:00:00 2001 From: itaihochman Date: Tue, 18 Apr 2023 08:56:00 +0300 Subject: [PATCH 099/152] Add an option to set the chunk size using the configoration - BROWSE_CHUNK_MAX_LENGTH=4000 This way, we can avoid errors of exceeding chunk size when using gpt-3.5 --- autogpt/processing/text.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 52add814..130de473 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -62,7 +62,7 @@ def summarize_text( print(f"Text length: {text_length} characters") summaries = [] - chunks = list(split_text(text)) + chunks = list(split_text(text, CFG.browse_chunk_max_length)) scroll_ratio = 1 / len(chunks) for i, chunk in enumerate(chunks): From 0664b737abe1f1017e1b8c3b475c51220b09437c Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 18 Apr 2023 18:11:56 +1200 Subject: [PATCH 100/152] Updates sponsors --- README.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/README.md b/README.md index 8e5cfe7b..dd49f035 100644 --- a/README.md +++ b/README.md @@ -31,18 +31,14 @@ Your support is greatly appreciated Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.

- -

Enterprise Sponsors

InfluxData    Roost.AI    NucleiAI    AlgohashFe    

-

Individual Sponsors

robinicus  prompthero  crizzler  tob-le-rone  FSTatSBS  toverly1  ddtarazona  Nalhos  Kazamario  pingbotan  indoor47  AuroraHolding  kreativai  hunteraraujo  Explorergt92  judegomila   thepok   SpacingLily  merwanehamadi  m  zkonduit  maxxflyer  tekelsey  digisomni  nocodeclarity  tjarmain -Dradstone  CrypteorCapital  avy-ai  shawnharmsen  sunchongren  DailyBotHQ  mathewhawkins  MediConCenHK  kMag410  nicoguyon  Mobivs  jazgarewal  marv-technology  rapidstartup  Brodie0  lucas-chu  rejunity  comet-ml  ColinConwell  cfarquhar  ikarosai  ChrisDMT  Odin519Tomas  vkozacek  belharethsami  sultanmeghji  scryptedinc  johnculkin  RealChrisSean  fruition  jd3655  Web3Capital  allenstecat  tob-le-rone  SwftCoins  MetaPath01  joaomdmoura  ternary5  refinery1  josephcmiller2  webbcolton  tommygeee  lmaugustin  garythebat  Cameron-Fulton  angiaou  caitlynmeeks  MBassi91  Daniel1357  omphos  abhinav-pandey29  DataMetis  concreit  st617  RThaweewat  KiaArmani  Pythagora-io  AryaXAI  fabrietech  jun784  Mr-Bishop42  rickscode  projectonegames  rocks6  GalaxyVideoAgency  thisisjeffchen  TheStoneMX  txtr99  ZERO-A-ONE  

- +Josecodesalot  saten-private  kenndanielso  johnculkin  Daniel1357  0xmatchmaker  belharethsami  nicoguyon  josephcmiller2  KiaArmani  Mobivs  rocks6  Odin519Tomas  ChrisDMT  thisisjeffchen  RealChrisSean  AIdevelopersAI  scryptedinc  jun784  goldenrecursion  allenstecat  LeeRobidas  cfarquhar  avy-ai  omphos  sunchongren  CrazySwami  fruition  Web3Capital  jazgarewal  rejunity  dexterityx  hostdp6  shawnharmsen  tommygeee  abhinav-pandey29  ColinConwell  kMag410  lucas-chu  Heitechsoft  bentoml  MediConCenHK  nnkostov  founderblocks-sils  CarmenCocoa  angiaou  fabrietech  Partender  RThaweewat  GalaxyVideoAgency  Brodie0  sultanmeghji  CatsMeow492  caitlynmeeks  garythebat  concreit  Pythagora-io  ASmithOWL  Cameron-Fulton  joaomdmoura  Dradstone  st617  wenfengwang  morcos  CrypteorCapital  jd3655  mathewhawkins  ZERO-A-ONE  MayurVirkar  SwftCoins  marv-technology  cxs  iddelacruz  AryaXAI  lmaugustin  Mr-Bishop42  vixul-accelerator  TheStoneMX  ciscodebs  ntwrite  DataMetis  ikarosai  refinery1  MetaPath01  ternary5  arjunb023  yx3110  vkozacek  eelbaz  rapidstartup  txtr99  tob-le-rone  neverinstall  projectonegames  DailyBotHQ  comet-ml  rickscode  webbcolton  MBassi91  

## 🚀 Features From 525073bb940b69a6f7dd1adf8f8da0479f5e8730 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 18 Apr 2023 18:46:50 +1200 Subject: [PATCH 101/152] Change on PR to all branches --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0a9a9287..bb5665ea 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,7 @@ on: - master pull_request: branches: - - master + - '**' jobs: build: From 7ac296081ce3c414b761cda60c5e0e7533eb5229 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 18 Apr 2023 19:11:09 +1200 Subject: [PATCH 102/152] Add pull_request_target to CI trigger --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bb5665ea..2eb34b9d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,9 @@ on: pull_request: branches: - '**' + pull_request_target: + branches: + - '**' jobs: build: From fc6070d574915e493aa4cc8d5e961cc42b4c0ac3 Mon Sep 17 00:00:00 2001 From: Yun Zheng Date: Tue, 18 Apr 2023 17:03:48 +0800 Subject: [PATCH 103/152] Fix Azure Config file location --- autogpt/config/config.py | 2 +- autogpt/llm_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index bc75b031..34eccf7c 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -131,7 +131,7 @@ class Config(metaclass=Singleton): else: return "" - AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml") + AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml") def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None: """ diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 821820ff..056cd013 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -83,7 +83,7 @@ def create_chat_completion( try: if CFG.use_azure: response = openai.ChatCompletion.create( - deployment_id=CFG.get_azure_deployment_id_for_model(model), + engine=CFG.get_azure_deployment_id_for_model(model), model=model, messages=messages, temperature=temperature, From f7014e87737e6830deabc5979fdffba97f63a867 Mon Sep 17 00:00:00 2001 From: zvrr Date: Tue, 18 Apr 2023 17:06:58 +0800 Subject: [PATCH 104/152] Update config.py azure_model_to_deployment_id_map default type should be a dict, not list --- autogpt/config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index bc75b031..6f84f876 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -154,7 +154,7 @@ class Config(metaclass=Singleton): self.openai_api_version = ( config_params.get("azure_api_version") or "2023-03-15-preview" ) - self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) + self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {}) def set_continuous_mode(self, value: bool) -> None: """Set the continuous mode value.""" From c1fe34adcbae2cb9d811d1a1fd6df1278d9e7d25 Mon Sep 17 00:00:00 2001 From: Yun Zheng Date: Tue, 18 Apr 2023 17:24:59 +0800 Subject: [PATCH 105/152] Fix azure_api_type in azure template --- azure.yaml.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure.yaml.template b/azure.yaml.template index 74ca797b..ab6e9fb6 100644 --- a/azure.yaml.template +++ b/azure.yaml.template @@ -1,4 +1,4 @@ -azure_api_type: azure_ad +azure_api_type: azure azure_api_base: your-base-url-for-azure azure_api_version: api-version-for-azure azure_model_map: From b5378174f3c0a6d934247b6fd812e9b7b2b610a2 Mon Sep 17 00:00:00 2001 From: 0xArty Date: Tue, 18 Apr 2023 13:19:17 +0100 Subject: [PATCH 106/152] Switched to using click --- README.md | 14 +-- autogpt/__main__.py | 85 ++++++++++++++++-- autogpt/{args.py => configurator.py} | 123 ++++++++++----------------- requirements.txt | 1 + 4 files changed, 135 insertions(+), 88 deletions(-) rename autogpt/{args.py => configurator.py} (52%) diff --git a/README.md b/README.md index dd49f035..4969e5ed 100644 --- a/README.md +++ b/README.md @@ -132,11 +132,15 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na ## 🔧 Usage -1. Run `autogpt` Python module in your terminal - - ``` - python -m autogpt - ``` +1. Run `autogpt` Python module in your terminal. + On linux or mac: + ```bash + # On Linux of Mac: + ./run.sh start + # On Windows: + ./run.bat start + ``` + Running with `--help` after `start` lists all the possible command line arguments you can pass. 2. After each action, choose from options to authorize command(s), exit the program, or provide feedback to the AI. diff --git a/autogpt/__main__.py b/autogpt/__main__.py index 64ed398e..0d0ecb37 100644 --- a/autogpt/__main__.py +++ b/autogpt/__main__.py @@ -1,24 +1,95 @@ """Main script for the autogpt package.""" import logging +import click from colorama import Fore from autogpt.agent.agent import Agent -from autogpt.args import parse_arguments from autogpt.config import Config, check_openai_api_key +from autogpt.configurator import create_config from autogpt.logs import logger from autogpt.memory import get_memory from autogpt.prompt import construct_prompt -# Load environment variables from .env file - +@click.group() def main() -> None: - """Main function for the script""" + """ + Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI. + """ + pass + + +@main.command() +@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode") +@click.option( + "--skip-reprompt", + "-y", + is_flag=True, + help="Skips the re-prompting messages at the beginning of the script", +) +@click.option( + "--ai-settings", + "-C", + help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.", +) +@click.option( + "-l", + "--continuous-limit", + type=int, + help="Defines the number of times to run in continuous mode", +) +@click.option("--speak", is_flag=True, help="Enable Speak Mode") +@click.option("--debug", is_flag=True, help="Enable Debug Mode") +@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode") +@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode") +@click.option( + "--use-memory", + "-m", + "memory_type", + type=str, + help="Defines which Memory backend to use", +) +@click.option( + "-b", + "--browser-name", + help="Specifies which web-browser to use when using selenium to scrape the web.", +) +@click.option( + "--allow-downloads", + is_flag=True, + help="Dangerous: Allows Auto-GPT to download files natively.", +) +def start( + continuous: bool, + continuous_limit: int, + ai_settings: str, + skip_reprompt: bool, + speak: bool, + debug: bool, + gpt3only: bool, + gpt4only: bool, + memory_type: str, + browser_name: str, + allow_downloads: bool, +) -> None: + """Start an Auto-GPT assistant""" cfg = Config() # TODO: fill in llm values here check_openai_api_key() - parse_arguments() + create_config( + continuous, + continuous_limit, + ai_settings, + skip_reprompt, + speak, + debug, + gpt3only, + gpt4only, + memory_type, + browser_name, + allow_downloads, + ) logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = "" system_prompt = construct_prompt() @@ -35,9 +106,9 @@ def main() -> None: # this is particularly important for indexing and referencing pinecone memory memory = get_memory(cfg, init=True) logger.typewriter_log( - f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" + "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" ) - logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser) + logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) agent = Agent( ai_name=ai_name, memory=memory, diff --git a/autogpt/args.py b/autogpt/configurator.py similarity index 52% rename from autogpt/args.py rename to autogpt/configurator.py index 5ca4221c..247cdac9 100644 --- a/autogpt/args.py +++ b/autogpt/configurator.py @@ -1,6 +1,5 @@ -"""This module contains the argument parsing logic for the script.""" -import argparse - +"""Configurator module.""" +import click from colorama import Back, Fore, Style from autogpt import utils @@ -11,72 +10,44 @@ from autogpt.memory import get_supported_memory_backends CFG = Config() -def parse_arguments() -> None: - """Parses the arguments passed to the script +def create_config( + continuous: bool, + continuous_limit: int, + ai_settings_file: str, + skip_reprompt: bool, + speak: bool, + debug: bool, + gpt3only: bool, + gpt4only: bool, + memory_type: str, + browser_name: str, + allow_downloads: bool, +) -> None: + """Updates the config object with the given arguments. + + Args: + continuous (bool): Whether to run in continuous mode + continuous_limit (int): The number of times to run in continuous mode + ai_settings_file (str): The path to the ai_settings.yaml file + skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script + speak (bool): Whether to enable speak mode + debug (bool): Whether to enable debug mode + gpt3only (bool): Whether to enable GPT3.5 only mode + gpt4only (bool): Whether to enable GPT4 only mode + memory_type (str): The type of memory backend to use + browser_name (str): The name of the browser to use when using selenium to scrape the web + allow_downloads (bool): Whether to allow Auto-GPT to download files natively - Returns: - None """ CFG.set_debug_mode(False) CFG.set_continuous_mode(False) CFG.set_speak_mode(False) - parser = argparse.ArgumentParser(description="Process arguments.") - parser.add_argument( - "--continuous", "-c", action="store_true", help="Enable Continuous Mode" - ) - parser.add_argument( - "--continuous-limit", - "-l", - type=int, - dest="continuous_limit", - help="Defines the number of times to run in continuous mode", - ) - parser.add_argument("--speak", action="store_true", help="Enable Speak Mode") - parser.add_argument("--debug", action="store_true", help="Enable Debug Mode") - parser.add_argument( - "--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode" - ) - parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode") - parser.add_argument( - "--use-memory", - "-m", - dest="memory_type", - help="Defines which Memory backend to use", - ) - parser.add_argument( - "--skip-reprompt", - "-y", - dest="skip_reprompt", - action="store_true", - help="Skips the re-prompting messages at the beginning of the script", - ) - parser.add_argument( - "--use-browser", - "-b", - dest="browser_name", - help="Specifies which web-browser to use when using selenium to scrape the web.", - ) - parser.add_argument( - "--ai-settings", - "-C", - dest="ai_settings_file", - help="Specifies which ai_settings.yaml file to use, will also automatically" - " skip the re-prompt.", - ) - parser.add_argument( - "--allow-downloads", - action="store_true", - dest="allow_downloads", - help="Dangerous: Allows Auto-GPT to download files natively.", - ) - args = parser.parse_args() - - if args.debug: + if debug: logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") CFG.set_debug_mode(True) - if args.continuous: + if continuous: logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") logger.typewriter_log( "WARNING: ", @@ -87,31 +58,31 @@ def parse_arguments() -> None: ) CFG.set_continuous_mode(True) - if args.continuous_limit: + if continuous_limit: logger.typewriter_log( - "Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}" + "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}" ) - CFG.set_continuous_limit(args.continuous_limit) + CFG.set_continuous_limit(continuous_limit) # Check if continuous limit is used without continuous mode - if args.continuous_limit and not args.continuous: - parser.error("--continuous-limit can only be used with --continuous") + if continuous_limit and not continuous: + raise click.UsageError("--continuous-limit can only be used with --continuous") - if args.speak: + if speak: logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") CFG.set_speak_mode(True) - if args.gpt3only: + if gpt3only: logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") CFG.set_smart_llm_model(CFG.fast_llm_model) - if args.gpt4only: + if gpt4only: logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") CFG.set_fast_llm_model(CFG.smart_llm_model) - if args.memory_type: + if memory_type: supported_memory = get_supported_memory_backends() - chosen = args.memory_type + chosen = memory_type if chosen not in supported_memory: logger.typewriter_log( "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", @@ -122,12 +93,12 @@ def parse_arguments() -> None: else: CFG.memory_backend = chosen - if args.skip_reprompt: + if skip_reprompt: logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED") CFG.skip_reprompt = True - if args.ai_settings_file: - file = args.ai_settings_file + if ai_settings_file: + file = ai_settings_file # Validate file (validated, message) = utils.validate_yaml_file(file) @@ -140,7 +111,7 @@ def parse_arguments() -> None: CFG.ai_settings_file = file CFG.skip_reprompt = True - if args.allow_downloads: + if allow_downloads: logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") logger.typewriter_log( "WARNING: ", @@ -155,5 +126,5 @@ def parse_arguments() -> None: ) CFG.allow_downloads = True - if args.browser_name: - CFG.selenium_web_browser = args.browser_name + if browser_name: + CFG.selenium_web_browser = browser_name diff --git a/requirements.txt b/requirements.txt index 3f1eee5b..b4245323 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,6 +19,7 @@ selenium webdriver-manager jsonschema tweepy +click ##Dev coverage From fbdf9d4bd434b3fbd1fa377c82e47f4e9e3afcd7 Mon Sep 17 00:00:00 2001 From: EH Date: Tue, 18 Apr 2023 13:21:57 +0100 Subject: [PATCH 107/152] docs: add warning for non-essential contributions (#2359) --- .github/PULL_REQUEST_TEMPLATE.md | 7 +++++++ CONTRIBUTING.md | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index cf7ffbf3..a4f28a3d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,10 @@ + +