From 1001e5489e2f4d4e134f0593eab3a76e5a3a849a Mon Sep 17 00:00:00 2001 From: jingxing Date: Mon, 17 Apr 2023 14:24:10 +0800 Subject: [PATCH 001/222] config.py format --- autogpt/config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index fe6f4f32..8dc3b487 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -67,7 +67,7 @@ class Config(metaclass=Singleton): self.pinecone_api_key = os.getenv("PINECONE_API_KEY") self.pinecone_region = os.getenv("PINECONE_ENV") - self.weaviate_host = os.getenv("WEAVIATE_HOST") + self.weaviate_host = os.getenv("WEAVIATE_HOST") self.weaviate_port = os.getenv("WEAVIATE_PORT") self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http") self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None) From 275b2eaae15c21339154371abdf8822e064168be Mon Sep 17 00:00:00 2001 From: lengweiping Date: Tue, 18 Apr 2023 13:06:09 +0800 Subject: [PATCH 002/222] only adjust the order, so argument definitions are consistent with the logical order --- autogpt/args.py | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/autogpt/args.py b/autogpt/args.py index 5ca4221c..bafd41fe 100644 --- a/autogpt/args.py +++ b/autogpt/args.py @@ -22,6 +22,8 @@ def parse_arguments() -> None: CFG.set_speak_mode(False) parser = argparse.ArgumentParser(description="Process arguments.") + parser.add_argument("--debug", action="store_true", help="Enable Debug Mode") + parser.add_argument( "--continuous", "-c", action="store_true", help="Enable Continuous Mode" ) @@ -32,18 +34,21 @@ def parse_arguments() -> None: dest="continuous_limit", help="Defines the number of times to run in continuous mode", ) + parser.add_argument("--speak", action="store_true", help="Enable Speak Mode") - parser.add_argument("--debug", action="store_true", help="Enable Debug Mode") + parser.add_argument( "--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode" ) parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode") + parser.add_argument( "--use-memory", "-m", dest="memory_type", help="Defines which Memory backend to use", ) + parser.add_argument( "--skip-reprompt", "-y", @@ -51,19 +56,20 @@ def parse_arguments() -> None: action="store_true", help="Skips the re-prompting messages at the beginning of the script", ) + + parser.add_argument( + "--ai-settings", + "-C", + dest="ai_settings_file", + help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.", + ) + parser.add_argument( "--use-browser", "-b", dest="browser_name", help="Specifies which web-browser to use when using selenium to scrape the web.", ) - parser.add_argument( - "--ai-settings", - "-C", - dest="ai_settings_file", - help="Specifies which ai_settings.yaml file to use, will also automatically" - " skip the re-prompt.", - ) parser.add_argument( "--allow-downloads", action="store_true", @@ -104,7 +110,6 @@ def parse_arguments() -> None: if args.gpt3only: logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") CFG.set_smart_llm_model(CFG.fast_llm_model) - if args.gpt4only: logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") CFG.set_fast_llm_model(CFG.smart_llm_model) @@ -140,6 +145,9 @@ def parse_arguments() -> None: CFG.ai_settings_file = file CFG.skip_reprompt = True + if args.browser_name: + CFG.selenium_web_browser = args.browser_name + if args.allow_downloads: logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") logger.typewriter_log( @@ -154,6 +162,3 @@ def parse_arguments() -> None: f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}", ) CFG.allow_downloads = True - - if args.browser_name: - CFG.selenium_web_browser = args.browser_name From 4cc90b8eb4c86320cb5f86a06f6ef6e199eb328c Mon Sep 17 00:00:00 2001 From: pkqs90 Date: Wed, 19 Apr 2023 00:01:26 +0800 Subject: [PATCH 003/222] Fix docker usage readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4969e5ed..70b52840 100644 --- a/README.md +++ b/README.md @@ -165,7 +165,7 @@ You can also build this into a docker image and run it: ```bash docker build -t autogpt . -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt ``` Or if you have `docker-compose`: @@ -175,7 +175,7 @@ docker-compose run --build --rm auto-gpt You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: ```bash -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt --gpt3only --continuous +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt --gpt3only --continuous ``` ```bash docker-compose run --build --rm auto-gpt --gpt3only --continuous From fa8562bc0c36d0eeede9efd304f9b74789dc018b Mon Sep 17 00:00:00 2001 From: jazelly Date: Wed, 19 Apr 2023 20:47:36 +0930 Subject: [PATCH 004/222] misc: fix typo in benchmark --- .github/workflows/benchmark.yml | 2 +- ...r.py => benchmark_entrepreneur_gpt_with_difficult_user.py} | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename benchmark/{benchmark_entrepeneur_gpt_with_difficult_user.py => benchmark_entrepreneur_gpt_with_difficult_user.py} (97%) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 53b6e7a0..0d9822f7 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -26,6 +26,6 @@ jobs: pip install -r requirements.txt - name: benchmark run: | - python benchmark/benchmark_entrepeneur_gpt_with_undecisive_user.py + python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py similarity index 97% rename from benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py rename to benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py index 9a5025d3..608e9630 100644 --- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py +++ b/benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py @@ -3,7 +3,7 @@ import subprocess import sys -def benchmark_entrepeneur_gpt_with_difficult_user(): +def benchmark_entrepreneur_gpt_with_difficult_user(): # Test case to check if the write_file command can successfully write 'Hello World' to a file # named 'hello_world.txt'. @@ -102,4 +102,4 @@ Not what I need.""" # Run the test case. if __name__ == "__main__": - benchmark_entrepeneur_gpt_with_difficult_user() + benchmark_entrepreneur_gpt_with_difficult_user() From 9240a554f1219a0836de9682eeb9b256c217dd67 Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Wed, 19 Apr 2023 17:55:36 +0300 Subject: [PATCH 005/222] Added version select to bug template --- .github/ISSUE_TEMPLATE/1.bug.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 6645142e..ba388ac8 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -57,6 +57,20 @@ body: - Other (Please specify in your problem) validations: required: true + - type: dropdown + attributes: + label: Which version of Auto-GPT are you using? + description: | + Please select which version of Auto-GPT you were using when this issue occurred. + If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/issues/2536) make sure you were using the latest code. + **If you weren't please try with the latest code**. + If installed with git you can run `git branch` to see which version of Auto-GPT you are running. + options: + - Latest Release + - Stable (branch) + - Master (branch) + validations: + required: true - type: dropdown attributes: label: GPT-3 or GPT-4? From ec945d1022471f867ea97573d33cc4f096f80cae Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Wed, 19 Apr 2023 17:59:17 +0300 Subject: [PATCH 006/222] Fixed links --- .github/ISSUE_TEMPLATE/1.bug.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index ba388ac8..6d6e9d68 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -62,8 +62,8 @@ body: label: Which version of Auto-GPT are you using? description: | Please select which version of Auto-GPT you were using when this issue occurred. - If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/issues/2536) make sure you were using the latest code. - **If you weren't please try with the latest code**. + If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code. + **If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**. If installed with git you can run `git branch` to see which version of Auto-GPT you are running. options: - Latest Release From 7c45b21aa731f5e0c3316219b67494e2b0973d2c Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 20 Apr 2023 00:11:43 -0500 Subject: [PATCH 007/222] Update ci.yml --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b37e7f00..e4599c64 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -70,3 +70,9 @@ jobs: coverage report coverage xml if: success() || failure() + + - name: Publish coverage report + uses: orgoro/coverage@v3 + with: + coverageFile: coverage.xml + token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From b9623ed424751f57989784c5a076ab44ee0ccb2c Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 20 Apr 2023 00:21:20 -0500 Subject: [PATCH 008/222] fix: add new line back --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e4599c64..1ef02453 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -75,4 +75,4 @@ jobs: uses: orgoro/coverage@v3 with: coverageFile: coverage.xml - token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + token: ${{ secrets.GITHUB_TOKEN }} From 9cb4739e4a19fc34b934c1836e0a150d2c39712a Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 20 Apr 2023 00:22:10 -0500 Subject: [PATCH 009/222] fix: syntax --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ef02453..45482e35 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -73,6 +73,6 @@ jobs: - name: Publish coverage report uses: orgoro/coverage@v3 - with: - coverageFile: coverage.xml - token: ${{ secrets.GITHUB_TOKEN }} + with: + coverageFile: coverage.xml + token: ${{ secrets.GITHUB_TOKEN }} From 51550561984c80feb71de56d9c381bdf7335e4d4 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 20 Apr 2023 00:25:48 -0500 Subject: [PATCH 010/222] feat: permissions --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 45482e35..1ec7e851 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,6 +42,9 @@ jobs: if: success() || failure() test: + permissions: + issues: write + pull-requests: write runs-on: ubuntu-latest strategy: matrix: From c561fe8925d56c1d2fdcbeff19a99c8669abd492 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Thu, 20 Apr 2023 02:19:20 -0500 Subject: [PATCH 011/222] Update app.py --- autogpt/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/app.py b/autogpt/app.py index cf8e29a3..11238a05 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -125,7 +125,7 @@ def execute_command( else: for command in prompt.commands: if command_name == command["label"] or command_name == command["name"]: - return command["function"](*arguments.values()) + return command["function"](**arguments) return ( f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" " list for available commands and only respond in the specified JSON" From 744c94c96ad920fb6f6532d6a6f3406689b73306 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Thu, 20 Apr 2023 02:22:54 -0500 Subject: [PATCH 012/222] Lower label and command provided. --- autogpt/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/app.py b/autogpt/app.py index 11238a05..5e037975 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -124,7 +124,7 @@ def execute_command( shutdown() else: for command in prompt.commands: - if command_name == command["label"] or command_name == command["name"]: + if command_name == command["label"].lower() or command_name == command["name"].lower(): return command["function"](**arguments) return ( f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" From c963a209ab716e4f42f7dd76ca3d1d19deb340b1 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 20 Apr 2023 20:23:03 +1200 Subject: [PATCH 013/222] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3e811a1c..a4fe976a 100644 --- a/README.md +++ b/README.md @@ -34,12 +34,12 @@ Development of this free, open-source project is made possible by all the

From 000389c76220f90a1f921f405e757725250971f8 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 20 Apr 2023 20:55:55 +1200 Subject: [PATCH 014/222] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a4fe976a..2771a18e 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ Development of this free, open-source project is made possible by all the
+Zilliz Roost.AI NucleiAI Algohash From 9f6d6f32a6fecbd4647545dc5cc77562a723422d Mon Sep 17 00:00:00 2001 From: riensen <3340218+riensen@users.noreply.github.com> Date: Thu, 20 Apr 2023 11:17:47 +0200 Subject: [PATCH 015/222] Update plugin instructions and improve clarity --- README.md | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 2771a18e..3e8685c3 100644 --- a/README.md +++ b/README.md @@ -268,19 +268,38 @@ export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" ## Plugins -See https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template for the template of the plugins. +Use the [Auto-GPT Plugin Template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template) as a starting point for creating your own plugins. -⚠️💀 WARNING 💀⚠️: Review the code of any plugin you use, this allows for any Python to be executed and do malicious things. Like stealing your API keys. +⚠️💀 **WARNING** 💀⚠️: Review the code of any plugin you use thoroughly, as plugins can execute any Python code, potentially leading to malicious activities, such as stealing your API keys. -Drop the repo's zipfile in the plugins folder. +### Plugin Installation Steps -![Download Zip](https://raw.githubusercontent.com/BillSchumacher/Auto-GPT/master/plugin.png) +1. **Clone or download the plugin repository:** + Clone the plugin repository, or download the repository as a zip file. + + ![Download Zip](https://raw.githubusercontent.com/BillSchumacher/Auto-GPT/master/plugin.png) -If you add the plugins class name to the `ALLOWLISTED_PLUGINS` in the `.env` you will not be prompted otherwise you'll be warned before loading the plugin: +2. **Install the plugin's dependencies (if any):** + Navigate to the plugin's folder in your terminal, and run the following command to install any required dependencies: -``` -ALLOWLISTED_PLUGINS=example-plugin1,example-plugin2,example-plugin3 -``` + ```bash + pip install -r requirements.txt + ``` + +3. **Package the plugin as a Zip file:** + If you cloned the repository, compress the plugin folder as a Zip file. + +4. **Copy the plugin's Zip file:** + Place the plugin's Zip file in the `plugins` folder of the Auto-GPT repository. + +5. **Allowlist the plugin (optional):** + Add the plugin's class name to the `ALLOWLISTED_PLUGINS` in the `.env` file to avoid being prompted with a warning when loading the plugin: + + ``` + ALLOWLISTED_PLUGINS=example-plugin1,example-plugin2,example-plugin3 + ``` + + If the plugin is not allowlisted, you will be warned before it's loaded. ## Setting Your Cache Type From c09ed61abac6c69a204077635edefacf22c068cc Mon Sep 17 00:00:00 2001 From: Egon Meijers Date: Thu, 20 Apr 2023 14:22:24 +0200 Subject: [PATCH 016/222] doc: update git clone command to use stable branch Since master should not be used for installation as described in the readme, it would be better to checkout the stable branch immediately when cloning to prevent people from reporting issues that are not in the stable environment. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2771a18e..8f980037 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na you can download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) (`Source code (zip)`, bottom of the page). ```bash - git clone https://github.com/Significant-Gravitas/Auto-GPT.git + git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git ``` 3. Navigate to the directory where the repository was downloaded From bb7ca692e35d889723a63da45da34980e7ebf1e9 Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Thu, 20 Apr 2023 15:45:26 +0300 Subject: [PATCH 017/222] include openapi-python-client in docker build (#2669) Fixes #2658 "Docker image crashes on start" --- requirements.txt | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 2052a9ac..92a963e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,6 +33,9 @@ isort gitpython==3.1.31 auto-gpt-plugin-template +# OpenAI and Generic plugins import +openapi-python-client==0.13.4 + # Items below this point will not be included in the Docker Image # Testing dependencies @@ -43,7 +46,3 @@ pytest-benchmark pytest-cov pytest-integration pytest-mock - - -# OpenAI and Generic plugins import -openapi-python-client==0.13.4 From f99c37aede577291aef30dabc45207610c00a984 Mon Sep 17 00:00:00 2001 From: Andres Caicedo Date: Thu, 20 Apr 2023 16:42:34 +0200 Subject: [PATCH 018/222] Update milvus_memory_test.py The 'err' variable in the except block is an instance of the ImportError class. --- tests/milvus_memory_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py index 84fd6e6d..ba1e4fc7 100644 --- a/tests/milvus_memory_test.py +++ b/tests/milvus_memory_test.py @@ -68,5 +68,5 @@ try: stats = self.memory.get_stats() self.assertEqual(15, len(stats)) -except: - print("Milvus not installed, skipping tests") +except ImportError as err: + print(f"Skipping tests for MilvusMemory: {err}") From 6699a8ef3864703e4ba64cc334197a91126dec55 Mon Sep 17 00:00:00 2001 From: Andres Caicedo Date: Thu, 20 Apr 2023 16:49:11 +0200 Subject: [PATCH 019/222] Update .pre-commit-config.yaml Update pre-commit-hooks to latest version v4.4.0 --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3722b25e..a2ea6114 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v0.9.2 + rev: v4.4.0 hooks: - id: check-added-large-files args: ['--maxkb=500'] From 2a20ea638e3166f02e55be49ac4b80166660c938 Mon Sep 17 00:00:00 2001 From: Jartto Date: Fri, 21 Apr 2023 00:07:53 +0800 Subject: [PATCH 020/222] Fix README `./run.sh start` -> `./run.sh` (#2523) Co-authored-by: Reinier van der Leer --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2771a18e..cab7a317 100644 --- a/README.md +++ b/README.md @@ -145,10 +145,10 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na ## 🔧 Usage 1. Run `autogpt` Python module in your terminal. - On linux or mac: - ```bash - # On Linux or Mac: - ./run.sh start + ```bash + # On Linux of Mac: + ./run.sh + # On Windows: .\run.bat ``` From cf7544c146cbe96891312f6400caa82bfd5394dd Mon Sep 17 00:00:00 2001 From: Eddie Cohen <31940792+edcohen08@users.noreply.github.com> Date: Thu, 20 Apr 2023 12:09:20 -0400 Subject: [PATCH 021/222] Cancel in-progress docker CI on outdate (#2619) Co-authored-by: Reinier van der Leer --- .github/workflows/docker-image.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 9c77098e..2fcf6401 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,6 +5,9 @@ on: branches: [ "master" ] pull_request: branches: [ "master" ] +concurrency: + group: ${{ format('docker-ci-{0}', github.event.pull_request.number || github.sha) }} + cancel-in-progress: true jobs: From 9b78e71d1633b1c6ffc9a3eb2027df1f812b7328 Mon Sep 17 00:00:00 2001 From: riensen Date: Thu, 20 Apr 2023 10:46:21 +0200 Subject: [PATCH 022/222] Use allowlist and denylist naming --- autogpt/config/config.py | 7 ++--- autogpt/plugins.py | 14 ++++----- tests/unit/test_plugins.py | 64 ++++++++++++++++++-------------------- 3 files changed, 41 insertions(+), 44 deletions(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 801df2bb..80551514 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -131,11 +131,10 @@ class Config(metaclass=Singleton): plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS") if plugins_allowlist: - plugins_allowlist = plugins_allowlist.split(",") - self.plugins_whitelist = plugins_allowlist + self.plugins_allowlist = plugins_allowlist.split(",") else: - self.plugins_whitelist = [] - self.plugins_blacklist = [] + self.plugins_allowlist = [] + self.plugins_denylist = [] def get_azure_deployment_id_for_model(self, model: str) -> str: """ diff --git a/autogpt/plugins.py b/autogpt/plugins.py index b536acbd..32e29acb 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -222,7 +222,7 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate if ( "_abc_impl" in a_keys and a_module.__name__ != "AutoGPTPluginTemplate" - and blacklist_whitelist_check(a_module.__name__, cfg) + and denylist_allowlist_check(a_module.__name__, cfg) ): loaded_plugins.append(a_module()) # OpenAI plugins @@ -233,7 +233,7 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate manifests_specs, cfg, debug ) for url, openai_plugin_meta in manifests_specs_clients.items(): - if blacklist_whitelist_check(url, cfg): + if denylist_allowlist_check(url, cfg): plugin = BaseOpenAIPlugin(openai_plugin_meta) loaded_plugins.append(plugin) @@ -244,8 +244,8 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate return loaded_plugins -def blacklist_whitelist_check(plugin_name: str, cfg: Config) -> bool: - """Check if the plugin is in the whitelist or blacklist. +def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool: + """Check if the plugin is in the allowlist or denylist. Args: plugin_name (str): Name of the plugin. @@ -254,12 +254,12 @@ def blacklist_whitelist_check(plugin_name: str, cfg: Config) -> bool: Returns: True or False """ - if plugin_name in cfg.plugins_blacklist: + if plugin_name in cfg.plugins_denylist: return False - if plugin_name in cfg.plugins_whitelist: + if plugin_name in cfg.plugins_allowlist: return True ack = input( f"WARNNG Plugin {plugin_name} found. But not in the" - " whitelist... Load? (y/n): " + " allowlist... Load? (y/n): " ) return ack.lower() == "y" diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py index 739e69bb..7c71f69f 100644 --- a/tests/unit/test_plugins.py +++ b/tests/unit/test_plugins.py @@ -2,7 +2,7 @@ import pytest from autogpt.config import Config from autogpt.plugins import ( - blacklist_whitelist_check, + denylist_allowlist_check, inspect_zip_for_module, scan_plugins, ) @@ -19,56 +19,54 @@ def test_inspect_zip_for_module(): @pytest.fixture -def mock_config_blacklist_whitelist_check(): +def mock_config_denylist_allowlist_check(): class MockConfig: - plugins_blacklist = ["BadPlugin"] - plugins_whitelist = ["GoodPlugin"] + plugins_denylist = ["BadPlugin"] + plugins_allowlist = ["GoodPlugin"] return MockConfig() -def test_blacklist_whitelist_check_blacklist( - mock_config_blacklist_whitelist_check, monkeypatch +def test_denylist_allowlist_check_denylist( + mock_config_denylist_allowlist_check, monkeypatch ): monkeypatch.setattr("builtins.input", lambda _: "y") - assert not blacklist_whitelist_check( - "BadPlugin", mock_config_blacklist_whitelist_check + assert not denylist_allowlist_check( + "BadPlugin", mock_config_denylist_allowlist_check ) -def test_blacklist_whitelist_check_whitelist( - mock_config_blacklist_whitelist_check, monkeypatch +def test_denylist_allowlist_check_allowlist( + mock_config_denylist_allowlist_check, monkeypatch ): monkeypatch.setattr("builtins.input", lambda _: "y") - assert blacklist_whitelist_check( - "GoodPlugin", mock_config_blacklist_whitelist_check + assert denylist_allowlist_check("GoodPlugin", mock_config_denylist_allowlist_check) + + +def test_denylist_allowlist_check_user_input_yes( + mock_config_denylist_allowlist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "y") + assert denylist_allowlist_check( + "UnknownPlugin", mock_config_denylist_allowlist_check ) -def test_blacklist_whitelist_check_user_input_yes( - mock_config_blacklist_whitelist_check, monkeypatch -): - monkeypatch.setattr("builtins.input", lambda _: "y") - assert blacklist_whitelist_check( - "UnknownPlugin", mock_config_blacklist_whitelist_check - ) - - -def test_blacklist_whitelist_check_user_input_no( - mock_config_blacklist_whitelist_check, monkeypatch +def test_denylist_allowlist_check_user_input_no( + mock_config_denylist_allowlist_check, monkeypatch ): monkeypatch.setattr("builtins.input", lambda _: "n") - assert not blacklist_whitelist_check( - "UnknownPlugin", mock_config_blacklist_whitelist_check + assert not denylist_allowlist_check( + "UnknownPlugin", mock_config_denylist_allowlist_check ) -def test_blacklist_whitelist_check_user_input_invalid( - mock_config_blacklist_whitelist_check, monkeypatch +def test_denylist_allowlist_check_user_input_invalid( + mock_config_denylist_allowlist_check, monkeypatch ): monkeypatch.setattr("builtins.input", lambda _: "invalid") - assert not blacklist_whitelist_check( - "UnknownPlugin", mock_config_blacklist_whitelist_check + assert not denylist_allowlist_check( + "UnknownPlugin", mock_config_denylist_allowlist_check ) @@ -85,8 +83,8 @@ def mock_config_openai_plugin(): class MockConfig: plugins_dir = PLUGINS_TEST_DIR plugins_openai = [PLUGIN_TEST_OPENAI] - plugins_blacklist = ["AutoGPTPVicuna"] - plugins_whitelist = [PLUGIN_TEST_OPENAI] + plugins_denylist = ["AutoGPTPVicuna"] + plugins_allowlist = [PLUGIN_TEST_OPENAI] return MockConfig() @@ -101,8 +99,8 @@ def mock_config_generic_plugin(): class MockConfig: plugins_dir = PLUGINS_TEST_DIR plugins_openai = [] - plugins_blacklist = [] - plugins_whitelist = ["AutoGPTPVicuna"] + plugins_denylist = [] + plugins_allowlist = ["AutoGPTPVicuna"] return MockConfig() From 5ecb08c8e819face827e44eb06b5a7741f6b01dc Mon Sep 17 00:00:00 2001 From: itsmarble <130370814+itsmarble@users.noreply.github.com> Date: Thu, 20 Apr 2023 19:26:55 +0200 Subject: [PATCH 023/222] add instruction to show .env --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 39f11dcc..02d69c40 100644 --- a/README.md +++ b/README.md @@ -116,9 +116,9 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na ``` 5. Configure Auto-GPT - 1. Locate the file named `.env.template` in the main `/Auto-GPT` folder. + 1. Locate the file named .env.template in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). 2. Create a copy of this file, called `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. - 3. Open the `.env` file in a text editor. _Note: Files starting with a dot might be hidden by your Operating System._ + 3. Open the `.env` file in a text editor. 4. Find the line that says `OPENAI_API_KEY=`. 5. After the `"="`, enter your unique OpenAI API Key (without any quotes or spaces). 6. Enter any other API keys or Tokens for services you would like to utilize. From e006a61c5261d367baab9cedd38881c38fe56b9f Mon Sep 17 00:00:00 2001 From: itsmarble <130370814+itsmarble@users.noreply.github.com> Date: Thu, 20 Apr 2023 19:42:48 +0200 Subject: [PATCH 024/222] hotfix --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 02d69c40..c5a95441 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,7 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na ``` 5. Configure Auto-GPT - 1. Locate the file named .env.template in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). + 1. Locate the file named `.env.template` in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). 2. Create a copy of this file, called `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. 3. Open the `.env` file in a text editor. 4. Find the line that says `OPENAI_API_KEY=`. From 25db6e56b09877d5ed858b1aa8566b7965e2dd5a Mon Sep 17 00:00:00 2001 From: Bsodoge Date: Thu, 20 Apr 2023 18:49:15 +0100 Subject: [PATCH 025/222] Fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 39f11dcc..c1686763 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Demo made by Blake Werlinger

💖 Help Fund Auto-GPT's Development 💖

If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! -Your support is greatly appreciated +Your support is greatly appreciated. Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.

From 14d3ecaae75a80f03cd118b5767e1c6affb4e3cc Mon Sep 17 00:00:00 2001 From: Peter Banda Date: Thu, 20 Apr 2023 19:51:52 +0200 Subject: [PATCH 026/222] Pin BeautifulSoup version to fix `browse_website` (#2680) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 92a963e8..d233f5e4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -beautifulsoup4 +beautifulsoup4>=4.12.2 colorama==0.4.6 openai==0.27.2 playsound==1.2.2 From 0efa0d11853ed8bca1d94b307d0cf20b1202a73b Mon Sep 17 00:00:00 2001 From: Ugo <111749144+ugobok@users.noreply.github.com> Date: Thu, 20 Apr 2023 20:52:45 +0300 Subject: [PATCH 027/222] Replace print statements with logging.error This commit replaces two print statements in the _speech method of the BrianSpeech class with a single call to logging.error. This will log error messages with more detail and make it easier to diagnose issues. The changes are backward compatible and should not affect the functionality of the code. --- autogpt/speech/brian.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py index 821fdf2f..b25b2ee2 100644 --- a/autogpt/speech/brian.py +++ b/autogpt/speech/brian.py @@ -1,12 +1,9 @@ -""" Brian speech module for autogpt """ import os - import requests +import logging from playsound import playsound - from autogpt.speech.base import VoiceBase - class BrianSpeech(VoiceBase): """Brian speech module for autogpt""" @@ -23,9 +20,7 @@ class BrianSpeech(VoiceBase): Returns: bool: True if the request was successful, False otherwise """ - tts_url = ( - f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" - ) + tts_url = f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" response = requests.get(tts_url) if response.status_code == 200: @@ -35,6 +30,5 @@ class BrianSpeech(VoiceBase): os.remove("speech.mp3") return True else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) + logging.error("Request failed with status code: %s, response content: %s", response.status_code, response.content) return False From c145d953127346d9c9dfb3a3534f48be0369f13b Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 20 Apr 2023 23:53:06 +0200 Subject: [PATCH 028/222] Fix #2654 spacy language model installation --- requirements.txt | 2 +- scripts/check_requirements.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index d233f5e4..d5a1c63b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ jsonschema tweepy click spacy>=3.0.0,<4.0.0 -en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.4.0/en_core_web_sm-3.4.0-py3-none-any.whl +en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl ##Dev coverage diff --git a/scripts/check_requirements.py b/scripts/check_requirements.py index e4eab024..3323697b 100644 --- a/scripts/check_requirements.py +++ b/scripts/check_requirements.py @@ -1,3 +1,4 @@ +import re import sys import pkg_resources @@ -16,7 +17,7 @@ def main(): for package in required_packages: if not package: # Skip empty lines continue - package_name = package.strip().split("==")[0] + package_name = re.split("[<>=@ ]+", package.strip())[0] if package_name.lower() not in installed_packages: missing_packages.append(package_name) From 785c90ddb7c5f385f37974b210b90b47ee98530d Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:19:20 +1200 Subject: [PATCH 029/222] Remove hardcoded sponsors --- README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 39f11dcc..479dbe06 100644 --- a/README.md +++ b/README.md @@ -44,12 +44,11 @@ Development of this free, open-source project is made possible by all the
- +*Sponsors Section Under Construction*

-robinicus  prompthero  crizzler  tob-le-rone  FSTatSBS  toverly1  ddtarazona  Nalhos  Kazamario  pingbotan  indoor47  AuroraHolding  kreativai  hunteraraujo  Explorergt92  judegomila   -thepok -  SpacingLily  merwanehamadi  m  zkonduit  maxxflyer  tekelsey  digisomni  nocodeclarity  tjarmain -CrypteorCapital  eelbaz  lucas-chu  tob-le-rone  jazgarewal  yx3110  MetaPath01  tommi-joentakanen  GalaxyVideoAgency  ciscodebs  josephjacks  mathewhawkins  cxs  txtr99  ChrisDMT  MayurVirkar  comet-ml  RThaweewat  DailyBotHQ  jacobyoby  jd3655  MBassi91  chatgpt-prompts  marv-technology  CrazySwami  tullytim  sultanmeghji  cfarquhar  goldenrecursion  ikarosai  avy-ai  MediConCenHK  RealChrisSean  DataMetis  CarmenCocoa  Cameron-Fulton  rejunity  belharethsami  AcountoOU  CatsMeow492  fabrietech  kenndanielso  arthur-x88  Heitechsoft  knifour  rickscode  Brodie0  ternary5  dexterityx  turintech  projectonegames  ZERO-A-ONE  KiaArmani  caitlynmeeks  tommygeee  st617  webbcolton  jondwillis  Mr-Bishop42  ASmithOWL  omphos  neverinstall  lmaugustin  vkozacek  abhinav-pandey29  ColinConwell  Partender  shawnharmsen  TheStoneMX  wenfengwang  allenstecat  johnculkin  Web3Capital  sunchongren  bentoml  angiaou  Dradstone  jun784  ZoneSixGames  Pythagora-io  nnkostov  AIdevelopersAI  josephcmiller2  VoiceBeer  AryaXAI  saten-private  refinery1  Mobivs  ntwrite  Josecodesalot  horazius  nicoguyon  0xmatchmaker  rocks6  SwftCoins  iddelacruz  scryptedinc  thisisjeffchen  kMag410  rapidstartup  founderblocks-sils  concreit  Odin519Tomas  Daniel1357  fruition  SparkplanAI  anvarazizov  joaomdmoura  morcos  LeeRobidas  arjunb023  garythebat  humungasaurus  

+ + +

## 🚀 Features From 121e08c18eed3f144b0f6a696c9ad0f9075b447e Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:19:30 +1200 Subject: [PATCH 030/222] Create sponsors_readme.yml --- .github/workflows/sponsors_readme.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .github/workflows/sponsors_readme.yml diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml new file mode 100644 index 00000000..94ac3230 --- /dev/null +++ b/.github/workflows/sponsors_readme.yml @@ -0,0 +1,25 @@ +name: Generate Sponsors README +on: + workflow_dispatch: + schedule: + - cron: '0 */12 * * *' +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout 🛎️ + uses: actions/checkout@v2 + + - name: Generate Sponsors 💖 + uses: JamesIves/github-sponsors-readme-action@v1 + with: + token: ${{ secrets.PAT }} + file: 'README.md' + minimum: 2500 + maximum: 99999 + + - name: Deploy to GitHub Pages 🚀 + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: main + folder: '.' From 511b0212c625f2521c94813a643b2e8b974a0b49 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:22:32 +1200 Subject: [PATCH 031/222] Update sponsors_readme.yml --- .github/workflows/sponsors_readme.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml index 94ac3230..12091474 100644 --- a/.github/workflows/sponsors_readme.yml +++ b/.github/workflows/sponsors_readme.yml @@ -13,7 +13,7 @@ jobs: - name: Generate Sponsors 💖 uses: JamesIves/github-sponsors-readme-action@v1 with: - token: ${{ secrets.PAT }} + token: ${{ secrets.README_UPDATER_PAT }} file: 'README.md' minimum: 2500 maximum: 99999 From 1dd8e570a5752319a6b1b54768f1973be066cb9f Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:24:18 +1200 Subject: [PATCH 032/222] Update sponsors_readme.yml --- .github/workflows/sponsors_readme.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml index 12091474..576b1062 100644 --- a/.github/workflows/sponsors_readme.yml +++ b/.github/workflows/sponsors_readme.yml @@ -21,5 +21,5 @@ jobs: - name: Deploy to GitHub Pages 🚀 uses: JamesIves/github-pages-deploy-action@v4 with: - branch: main + branch: master folder: '.' From 68e479bdbd70bb0c9f1e712eb60a4da0a999281b Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:26:04 +1200 Subject: [PATCH 033/222] Update sponsors_readme.yml --- .github/workflows/sponsors_readme.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml index 576b1062..ab34825e 100644 --- a/.github/workflows/sponsors_readme.yml +++ b/.github/workflows/sponsors_readme.yml @@ -23,3 +23,4 @@ jobs: with: branch: master folder: '.' + token: ${{ secrets.README_UPDATER_PAT }} From ad6f18b7373e8350d1b9ade7a47c26c5e1c88dce Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:31:37 +1200 Subject: [PATCH 034/222] Update sponsors_readme.yml --- .github/workflows/sponsors_readme.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml index ab34825e..61ebb1ba 100644 --- a/.github/workflows/sponsors_readme.yml +++ b/.github/workflows/sponsors_readme.yml @@ -17,10 +17,13 @@ jobs: file: 'README.md' minimum: 2500 maximum: 99999 - - - name: Deploy to GitHub Pages 🚀 - uses: JamesIves/github-pages-deploy-action@v4 - with: - branch: master - folder: '.' - token: ${{ secrets.README_UPDATER_PAT }} + + - name: Commit and push changes 🚀 + env: + GITHUB_TOKEN: ${{ secrets.README_UPDATER_PAT }} + run: | + git config --global user.name "github-actions[bot]" + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git add . + git diff --quiet && git diff --staged --quiet || git commit -m "Update Sponsors in README" + git push From 48f4119fb77f45068c2234904aadde3cba372639 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:38:18 +1200 Subject: [PATCH 035/222] Update sponsors_readme.yml --- .github/workflows/sponsors_readme.yml | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml index 61ebb1ba..fffdbd4d 100644 --- a/.github/workflows/sponsors_readme.yml +++ b/.github/workflows/sponsors_readme.yml @@ -9,7 +9,6 @@ jobs: steps: - name: Checkout 🛎️ uses: actions/checkout@v2 - - name: Generate Sponsors 💖 uses: JamesIves/github-sponsors-readme-action@v1 with: @@ -17,13 +16,10 @@ jobs: file: 'README.md' minimum: 2500 maximum: 99999 - - - name: Commit and push changes 🚀 - env: - GITHUB_TOKEN: ${{ secrets.README_UPDATER_PAT }} - run: | - git config --global user.name "github-actions[bot]" - git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" - git add . - git diff --quiet && git diff --staged --quiet || git commit -m "Update Sponsors in README" - git push + + - name: Deploy to GitHub Pages 🚀 + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: master + folder: '.' + token: ${{ secrets.README_UPDATER_PAT }} From 8faf4f5f79e1aaba4de40676ea7491c9f9e703fa Mon Sep 17 00:00:00 2001 From: Torantulino Date: Fri, 21 Apr 2023 00:40:07 +0000 Subject: [PATCH 036/222] =?UTF-8?q?Deploying=20to=20master=20from=20@=20Si?= =?UTF-8?q?gnificant-Gravitas/Auto-GPT@48f4119fb77f45068c2234904aadde3cba3?= =?UTF-8?q?72639=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 479dbe06..ae8eb4ee 100644 --- a/README.md +++ b/README.md @@ -46,8 +46,7 @@ Development of this free, open-source project is made possible by all the *Sponsors Section Under Construction*

- - +NoCodeClarityKalilaRobzkonduitNovaLandMatt MullenwegmerwanehamadiLilyArloJude GomilaJohnRosebud AISuperMarioNasser AlhosaniDavidT_AdvanceAISArawnCam FultonPromptHeroWeirenlanCharles FarquharLucas CJun784MARVLOscar A. RangelNathan Rapid Startups Sudhanshu Thakur

From 1e3bcc3f8bf82366122b598c003f794ffbdefd89 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 12:52:22 +1200 Subject: [PATCH 037/222] Update README.md --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ae8eb4ee..c16d0fde 100644 --- a/README.md +++ b/README.md @@ -44,10 +44,11 @@ Development of this free, open-source project is made possible by all the
-*Sponsors Section Under Construction* +

-NoCodeClarityKalilaRobzkonduitNovaLandMatt MullenwegmerwanehamadiLilyArloJude GomilaJohnRosebud AISuperMarioNasser AlhosaniDavidT_AdvanceAISArawnCam FultonPromptHeroWeirenlanCharles FarquharLucas CJun784MARVLOscar A. RangelNathan Rapid Startups Sudhanshu Thakur -

+robinicus  prompthero +Pythagora-io  refinery1  rickscode  pingbotan  CrazySwami  tullytim  SparkplanAI  comet-ml  anvarazizov  Odin519Tomas  Mobivs  Heitechsoft  crizzler  st617  humungasaurus  turintech  horazius  rocks6  thisisjeffchen  DataMetis  angiaou  ternary5  CatsMeow492  rapidstartup  jondwillis  VoiceBeer  garythebat  belharethsami  omphos  Dradstone  CrypteorCapital  ikarosai  Cameron-Fulton  maxxflyer  sultanmeghji  projectonegames  ddtarazona  sunchongren  AuroraHolding  marv-technology  kMag410  0xmatchmaker  MediConCenHK  abhinav-pandey29  avy-ai  jacobyoby  KiaArmani  scryptedinc  tjarmain  tob-le-rone  goldenrecursion  ZERO-A-ONE  webbcolton  kreativai  bentoml  arjunb023  hunteraraujo  Nalhos  digisomni  josephjacks  FSTatSBS  ChrisDMT  MetaPath01  CarmenCocoa  Kazamario  AryaXAI  thepok  SwftCoins  lmaugustin  tommygeee  AIdevelopersAI  arthur-x88  toverly1  Daniel1357  concreit  caitlynmeeks  johnculkin  Web3Capital  founderblocks-sils  ZoneSixGames  zkonduit  AcountoOU  fabrietech  ciscodebs  DailyBotHQ  indoor47  shawnharmsen  saten-private  jazgarewal  TheStoneMX  m  lucas-chu  allenstecat  knifour  josephcmiller2  ntwrite  Mr-Bishop42  iddelacruz  Josecodesalot  jd3655  kenndanielso  RThaweewat  judegomila  nnkostov  Brodie0  morcos  cfarquhar  nocodeclarity  LeeRobidas  GalaxyVideoAgency  ColinConwell  neverinstall  vkozacek  chatgpt-prompts  rejunity  MBassi91  RealChrisSean  wenfengwang  cxs  mathewhawkins  eelbaz  MayurVirkar  Explorergt92  dexterityx  nicoguyon  txtr99  jun784  tekelsey  ASmithOWL  tommi-joentakanen  Partender  joaomdmoura  fruition  merwanehamadi  SpacingLily  yx3110  

+ ## 🚀 Features From f26541188b4c62982044cb3705caa7c246b14644 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 13:53:31 +1200 Subject: [PATCH 038/222] Update README.md --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index c16d0fde..35bbd2fb 100644 --- a/README.md +++ b/README.md @@ -45,10 +45,8 @@ Development of this free, open-source project is made possible by all the
-

-robinicus  prompthero -Pythagora-io  refinery1  rickscode  pingbotan  CrazySwami  tullytim  SparkplanAI  comet-ml  anvarazizov  Odin519Tomas  Mobivs  Heitechsoft  crizzler  st617  humungasaurus  turintech  horazius  rocks6  thisisjeffchen  DataMetis  angiaou  ternary5  CatsMeow492  rapidstartup  jondwillis  VoiceBeer  garythebat  belharethsami  omphos  Dradstone  CrypteorCapital  ikarosai  Cameron-Fulton  maxxflyer  sultanmeghji  projectonegames  ddtarazona  sunchongren  AuroraHolding  marv-technology  kMag410  0xmatchmaker  MediConCenHK  abhinav-pandey29  avy-ai  jacobyoby  KiaArmani  scryptedinc  tjarmain  tob-le-rone  goldenrecursion  ZERO-A-ONE  webbcolton  kreativai  bentoml  arjunb023  hunteraraujo  Nalhos  digisomni  josephjacks  FSTatSBS  ChrisDMT  MetaPath01  CarmenCocoa  Kazamario  AryaXAI  thepok  SwftCoins  lmaugustin  tommygeee  AIdevelopersAI  arthur-x88  toverly1  Daniel1357  concreit  caitlynmeeks  johnculkin  Web3Capital  founderblocks-sils  ZoneSixGames  zkonduit  AcountoOU  fabrietech  ciscodebs  DailyBotHQ  indoor47  shawnharmsen  saten-private  jazgarewal  TheStoneMX  m  lucas-chu  allenstecat  knifour  josephcmiller2  ntwrite  Mr-Bishop42  iddelacruz  Josecodesalot  jd3655  kenndanielso  RThaweewat  judegomila  nnkostov  Brodie0  morcos  cfarquhar  nocodeclarity  LeeRobidas  GalaxyVideoAgency  ColinConwell  neverinstall  vkozacek  chatgpt-prompts  rejunity  MBassi91  RealChrisSean  wenfengwang  cxs  mathewhawkins  eelbaz  MayurVirkar  Explorergt92  dexterityx  nicoguyon  txtr99  jun784  tekelsey  ASmithOWL  tommi-joentakanen  Partender  joaomdmoura  fruition  merwanehamadi  SpacingLily  yx3110  

+

robinicus  arthur-x88  knifour  prompthero  digisomni  sultanmeghji  allenstecat  jd3655  tullytim  shawnharmsen  DailyBotHQ  thepok  jacobyoby  MediConCenHK  concreit  tob-le-rone  Heitechsoft  jsolejr  angiaou  kenndanielso  FSTatSBS  bentoml  cfarquhar  MBassi91  Daniel1357  iddelacruz  josephjacks  arjunb023  johnculkin  indoor47  rapidstartup  RawBanana  comet-ml  MetaPath01  maxxflyer  Nalhos  KiaArmani  SparkplanAI  jenius-eagle  toverly1  st617  judegomila  tekelsey  morcos  turintech  Mr-Bishop42  yx3110  rocks6  Odin519Tomas  Cameron-Fulton  ddtarazona  saten-private  cxs  fruition  Kazamario  MayurVirkar  ikarosai  RThaweewat  founderblocks-sils  RealChrisSean  marv-technology  merwanehamadi  fabrietech  tommi-joentakanen  CrypteorCapital  tommygeee  Josecodesalot  thisisjeffchen  ternary5  CrazySwami  omphos  Explorergt92  DataMetis  belharethsami  rickscode  Brodie0  ChrisDMT  humungasaurus  lucas-chu  avy-ai  ASmithOWL  chatgpt-prompts  ZERO-A-ONE  jazgarewal  mathewhawkins  crizzler  Pythagora-io  neverinstall  Dradstone  sunchongren  pingbotan  dexterityx  CatsMeow492  joaomdmoura  zkonduit  Mobivs  doverhq  projectonegames  nnkostov  TheStoneMX  ColinConwell  hunteraraujo  ciscodebs  rejunity  SpacingLily  lazzacapital  SwftCoins  ZoneSixGames  eelbaz  m  caitlynmeeks  AcountoOU  vkozacek  goldenrecursion  scryptedinc  lmaugustin  webbcolton  VoiceBeer  Web3Capital  garythebat  txtr99  ntwrite  AryaXAI  kreativai  abhinav-pandey29  anvarazizov  AuroraHolding  0xmatchmaker  nicoguyon  MatthewAgs  kMag410  josephcmiller2  AIdevelopersAI  GalaxyVideoAgency  jun784  Partender  tjarmain  horazius  nocodeclarity  refinery1  jondwillis  CarmenCocoa  LeeRobidas  wenfengwang  

## 🚀 Features From 00ecb983e72717b3d986ecd1e58f6f026b4f2697 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Fri, 21 Apr 2023 13:56:59 +1200 Subject: [PATCH 039/222] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 35bbd2fb..743584e5 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Development of this free, open-source project is made possible by all the -

robinicus  arthur-x88  knifour  prompthero  digisomni  sultanmeghji  allenstecat  jd3655  tullytim  shawnharmsen  DailyBotHQ  thepok  jacobyoby  MediConCenHK  concreit  tob-le-rone  Heitechsoft  jsolejr  angiaou  kenndanielso  FSTatSBS  bentoml  cfarquhar  MBassi91  Daniel1357  iddelacruz  josephjacks  arjunb023  johnculkin  indoor47  rapidstartup  RawBanana  comet-ml  MetaPath01  maxxflyer  Nalhos  KiaArmani  SparkplanAI  jenius-eagle  toverly1  st617  judegomila  tekelsey  morcos  turintech  Mr-Bishop42  yx3110  rocks6  Odin519Tomas  Cameron-Fulton  ddtarazona  saten-private  cxs  fruition  Kazamario  MayurVirkar  ikarosai  RThaweewat  founderblocks-sils  RealChrisSean  marv-technology  merwanehamadi  fabrietech  tommi-joentakanen  CrypteorCapital  tommygeee  Josecodesalot  thisisjeffchen  ternary5  CrazySwami  omphos  Explorergt92  DataMetis  belharethsami  rickscode  Brodie0  ChrisDMT  humungasaurus  lucas-chu  avy-ai  ASmithOWL  chatgpt-prompts  ZERO-A-ONE  jazgarewal  mathewhawkins  crizzler  Pythagora-io  neverinstall  Dradstone  sunchongren  pingbotan  dexterityx  CatsMeow492  joaomdmoura  zkonduit  Mobivs  doverhq  projectonegames  nnkostov  TheStoneMX  ColinConwell  hunteraraujo  ciscodebs  rejunity  SpacingLily  lazzacapital  SwftCoins  ZoneSixGames  eelbaz  m  caitlynmeeks  AcountoOU  vkozacek  goldenrecursion  scryptedinc  lmaugustin  webbcolton  VoiceBeer  Web3Capital  garythebat  txtr99  ntwrite  AryaXAI  kreativai  abhinav-pandey29  anvarazizov  AuroraHolding  0xmatchmaker  nicoguyon  MatthewAgs  kMag410  josephcmiller2  AIdevelopersAI  GalaxyVideoAgency  jun784  Partender  tjarmain  horazius  nocodeclarity  refinery1  jondwillis  CarmenCocoa  LeeRobidas  wenfengwang  

+

robinicus  arthur-x88  knifour  prompthero  digisomni  sultanmeghji  allenstecat  jd3655  tullytim  shawnharmsen  DailyBotHQ  thepok  jacobyoby  MediConCenHK  concreit  tob-le-rone  Heitechsoft  jsolejr  angiaou  kenndanielso  FSTatSBS  bentoml  cfarquhar  MBassi91  Daniel1357  iddelacruz  josephjacks  arjunb023  johnculkin  indoor47  rapidstartup  RawBanana  comet-ml  MetaPath01  maxxflyer  Nalhos  KiaArmani  SparkplanAI  jenius-eagle  toverly1  st617  judegomila  tekelsey  morcos  turintech  Mr-Bishop42  yx3110  rocks6  Odin519Tomas  Cameron-Fulton  ddtarazona  saten-private  cxs  fruition  Kazamario  MayurVirkar  ikarosai  RThaweewat  founderblocks-sils  RealChrisSean  marv-technology  merwanehamadi  fabrietech  tommi-joentakanen  CrypteorCapital  tommygeee  Josecodesalot  thisisjeffchen  ternary5  CrazySwami  omphos  Explorergt92  DataMetis  belharethsami  rickscode  Brodie0  ChrisDMT  humungasaurus  lucas-chu  avy-ai  ASmithOWL  chatgpt-prompts  ZERO-A-ONE  jazgarewal  mathewhawkins  crizzler  Pythagora-io  neverinstall  Dradstone  sunchongren  pingbotan  dexterityx  CatsMeow492  joaomdmoura  zkonduit  Mobivs  doverhq  projectonegames  nnkostov  TheStoneMX  ColinConwell  hunteraraujo  ciscodebs  rejunity  SpacingLily  lazzacapital  SwftCoins  ZoneSixGames  eelbaz  m  caitlynmeeks  AcountoOU  vkozacek  goldenrecursion  scryptedinc  lmaugustin  webbcolton  VoiceBeer  Web3Capital  garythebat  txtr99  ntwrite  AryaXAI  kreativai  abhinav-pandey29  anvarazizov  AuroraHolding  0xmatchmaker  nicoguyon  MatthewAgs  kMag410  josephcmiller2  AIdevelopersAI  GalaxyVideoAgency  jun784  Partender  tjarmain  horazius  nocodeclarity  refinery1  jondwillis  CarmenCocoa  LeeRobidas  wenfengwang  

## 🚀 Features From 558003704e0427a08a16516ab93c6e4260a39a10 Mon Sep 17 00:00:00 2001 From: Andres Caicedo <73312784+AndresCdo@users.noreply.github.com> Date: Fri, 21 Apr 2023 04:00:44 +0200 Subject: [PATCH 040/222] Add missing `size` param to generate_image_with_dalle (#2691) --- autogpt/commands/image_gen.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 60cdaec0..962c41fb 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -77,12 +77,13 @@ def generate_image_with_hf(prompt: str, filename: str) -> str: return f"Saved to disk:{filename}" -def generate_image_with_dalle(prompt: str, filename: str) -> str: +def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: """Generate an image with DALL-E. Args: prompt (str): The prompt to use filename (str): The filename to save the image to + size (int): The size of the image Returns: str: The filename of the image From 6b7787ce99030afb68d356e4cb312e41d59c0f19 Mon Sep 17 00:00:00 2001 From: Chris Villa Date: Fri, 21 Apr 2023 14:19:00 +1200 Subject: [PATCH 041/222] docs: fix small typo in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 743584e5..63e8f288 100644 --- a/README.md +++ b/README.md @@ -143,7 +143,7 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na 1. Run `autogpt` Python module in your terminal. ```bash - # On Linux of Mac: + # On Linux or Mac: ./run.sh # On Windows: From a2e75aabdd41c40653bbbc4ed7a8937a0b9ddfb4 Mon Sep 17 00:00:00 2001 From: gklab Date: Fri, 21 Apr 2023 10:19:28 +0800 Subject: [PATCH 042/222] adjust file_operations.py code format --- autogpt/commands/file_operations.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index b73fb987..00ddcd02 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -49,7 +49,7 @@ def log_operation(operation: str, filename: str) -> None: with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: f.write("File Operation Logger ") - append_to_file(LOG_FILE, log_entry, shouldLog=False) + append_to_file(LOG_FILE, log_entry, should_log=False) def split_file( @@ -72,7 +72,7 @@ def split_file( while start < content_length: end = start + max_length if end + overlap < content_length: - chunk = content[start : end + overlap - 1] + chunk = content[start:end + overlap - 1] else: chunk = content[start:content_length] @@ -166,12 +166,13 @@ def write_to_file(filename: str, text: str) -> str: @command( "append_to_file", "Append to file", '"filename": "", "text": ""' ) -def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: +def append_to_file(filename: str, text: str, should_log: bool = True) -> str: """Append text to a file Args: filename (str): The name of the file to append to text (str): The text to append to the file + should_log (bool): Should log output Returns: str: A message indicating success or failure @@ -181,7 +182,7 @@ def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: with open(filepath, "a") as f: f.write(text) - if shouldLog: + if should_log: log_operation("append", filename) return "Text appended successfully." From ec27d5729ccfd84faf1c8e46a2e9c83379f08f53 Mon Sep 17 00:00:00 2001 From: Nick Foster Date: Thu, 20 Apr 2023 21:55:20 -0500 Subject: [PATCH 043/222] Fix label of `download_file` command (#2753) Co-authored-by: Reinier van der Leer --- autogpt/commands/file_operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index b73fb987..7dfd6004 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -239,7 +239,7 @@ def search_files(directory: str) -> list[str]: @command( "download_file", - "Search Files", + "Download File", '"url": "", "filename": ""', CFG.allow_downloads, "Error: You do not have user authorization to download files locally.", From b49ef913a8f380585ceb47ebe01dc0341d0ab85a Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Fri, 21 Apr 2023 05:15:39 +0200 Subject: [PATCH 044/222] Make `load_azure_config` throw if `azure.yaml` does not exist --- autogpt/config/config.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 80551514..be741fb3 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -174,11 +174,8 @@ class Config(metaclass=Singleton): Returns: None """ - try: - with open(config_file) as file: - config_params = yaml.load(file, Loader=yaml.FullLoader) - except FileNotFoundError: - config_params = {} + with open(config_file) as file: + config_params = yaml.load(file, Loader=yaml.FullLoader) self.openai_api_type = config_params.get("azure_api_type") or "azure" self.openai_api_base = config_params.get("azure_api_base") or "" self.openai_api_version = ( From 63c218287085db3bfbdbaa9b263ad3048cea32a6 Mon Sep 17 00:00:00 2001 From: lengweiping1983 Date: Fri, 21 Apr 2023 12:09:17 +0800 Subject: [PATCH 045/222] Fix typo's (#2735) Co-authored-by: lengweiping --- autogpt/commands/analyze_code.py | 4 ++-- autogpt/commands/git_operations.py | 11 ++++++----- autogpt/commands/improve_code.py | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/autogpt/commands/analyze_code.py b/autogpt/commands/analyze_code.py index b87d73e1..47cfc1e7 100644 --- a/autogpt/commands/analyze_code.py +++ b/autogpt/commands/analyze_code.py @@ -22,10 +22,10 @@ def analyze_code(code: str) -> list[str]: improve the code. """ - function_string = "def analyze_code(code: str) -> List[str]:" + function_string = "def analyze_code(code: str) -> list[str]:" args = [code] description_string = ( - "Analyzes the given code and returns a list of suggestions" " for improvements." + "Analyzes the given code and returns a list of suggestions for improvements." ) return call_ai_function(function_string, args, description_string) diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 1fb99e5b..1a6beb95 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -10,20 +10,21 @@ CFG = Config() @command( "clone_repository", - "Clone Repositoryy", + "Clone Repository", '"repository_url": "", "clone_path": ""', CFG.github_username and CFG.github_api_key, "Configure github_username and github_api_key.", ) def clone_repository(repository_url: str, clone_path: str) -> str: - """Clone a GitHub repository locally + """Clone a GitHub repository locally. Args: - repository_url (str): The URL of the repository to clone - clone_path (str): The path to clone the repository to + repository_url (str): The URL of the repository to clone. + clone_path (str): The path to clone the repository to. Returns: - str: The result of the clone operation""" + str: The result of the clone operation. + """ split_url = repository_url.split("//") auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) safe_clone_path = path_in_workspace(clone_path) diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py index 41a369b4..f953cf2b 100644 --- a/autogpt/commands/improve_code.py +++ b/autogpt/commands/improve_code.py @@ -17,14 +17,14 @@ def improve_code(suggestions: list[str], code: str) -> str: chat completion api call. Parameters: - suggestions (List): A list of suggestions around what needs to be improved. + suggestions (list): A list of suggestions around what needs to be improved. code (str): Code to be improved. Returns: A result string from create chat completion. Improved code in response. """ function_string = ( - "def generate_improved_code(suggestions: List[str], code: str) -> str:" + "def generate_improved_code(suggestions: list[str], code: str) -> str:" ) args = [json.dumps(suggestions), code] description_string = ( From 45a9ff6e74e1e978698e8fec31da66486c2c30e8 Mon Sep 17 00:00:00 2001 From: ZHAOKAI WANG Date: Fri, 21 Apr 2023 13:03:52 +0800 Subject: [PATCH 046/222] Update file_operations.py --- autogpt/commands/file_operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 00ddcd02..7b95d972 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -72,7 +72,7 @@ def split_file( while start < content_length: end = start + max_length if end + overlap < content_length: - chunk = content[start:end + overlap - 1] + chunk = content[start: end + overlap - 1] else: chunk = content[start:content_length] From e7c2a4068eb7d3c1e9bdb4acac1c65b951e7cb65 Mon Sep 17 00:00:00 2001 From: ZHAOKAI WANG Date: Fri, 21 Apr 2023 13:06:44 +0800 Subject: [PATCH 047/222] Update file_operations.py --- autogpt/commands/file_operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 7b95d972..7e17a1b0 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -72,7 +72,7 @@ def split_file( while start < content_length: end = start + max_length if end + overlap < content_length: - chunk = content[start: end + overlap - 1] + chunk = content[start : end + overlap - 1] else: chunk = content[start:content_length] From c58cf15565296148c2a4ca98c721561bf4c4f40d Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 00:27:19 -0500 Subject: [PATCH 048/222] hotfix: don't upload results on push --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ec7e851..f426e60c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -75,6 +75,7 @@ jobs: if: success() || failure() - name: Publish coverage report + if: ${{ github.event_name == 'pull_request' }} uses: orgoro/coverage@v3 with: coverageFile: coverage.xml From 7f99fa3da8afee007bd6714862493fab2889b0ab Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 00:30:39 -0500 Subject: [PATCH 049/222] Update ci.yml --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f426e60c..ef3c5e56 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,6 +45,8 @@ jobs: permissions: issues: write pull-requests: write + statuses: write + checks: write runs-on: ubuntu-latest strategy: matrix: From c2f86f6934a76b6522d0350a5820c72d92547300 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 00:34:11 -0500 Subject: [PATCH 050/222] Update ci.yml --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ef3c5e56..e22c6178 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,6 +47,7 @@ jobs: pull-requests: write statuses: write checks: write + comments: write runs-on: ubuntu-latest strategy: matrix: From 185429287e768c74a8f1683e44506a314536d32c Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 00:35:46 -0500 Subject: [PATCH 051/222] Update ci.yml --- .github/workflows/ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e22c6178..645a8160 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,7 +47,8 @@ jobs: pull-requests: write statuses: write checks: write - comments: write + contents: write + actions: read runs-on: ubuntu-latest strategy: matrix: From 81de438569421ee26915b5be3fced995284c7ff9 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 00:41:44 -0500 Subject: [PATCH 052/222] try something new --- .github/workflows/ci.yml | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 645a8160..98978970 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,12 +43,13 @@ jobs: test: permissions: - issues: write + # Gives the action the necessary permissions for publishing new + # comments in pull requests. pull-requests: write - statuses: write - checks: write + # Gives the action the necessary permissions for pushing data to the + # python-coverage-comment-action branch, and for editing existing + # comments (to avoid publishing multiple comments in the same PR) contents: write - actions: read runs-on: ubuntu-latest strategy: matrix: @@ -78,9 +79,17 @@ jobs: coverage xml if: success() || failure() - - name: Publish coverage report - if: ${{ github.event_name == 'pull_request' }} - uses: orgoro/coverage@v3 + - name: Coverage comment + id: coverage_comment + uses: py-cov-action/python-coverage-comment-action@v3 with: - coverageFile: coverage.xml - token: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ github.token }} + + - name: Store Pull Request comment to be posted + uses: actions/upload-artifact@v3 + if: steps.coverage_comment.outputs.COMMENT_FILE_WRITTEN == 'true' + with: + # If you use a different name, update COMMENT_ARTIFACT_NAME accordingly + name: python-coverage-comment-action + # If you use a different name, update COMMENT_FILENAME accordingly + path: python-coverage-comment-action.txt From 7a9c6a52fa6a3455e202e36d518af2350224fb33 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 00:49:07 -0500 Subject: [PATCH 053/222] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 98978970..e6cb82c6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,7 +71,7 @@ jobs: - name: Run unittest tests with coverage run: | - pytest --cov=autogpt --without-integration --without-slow-integration + pytest --cov=autogpt --without-integration --without-slow-integration --cov-report term-missing --cov-branch --cov-report xml --cov-report term - name: Generate coverage report run: | From a63fc643c8dce5d956682d087674d803d7688f08 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 00:55:52 -0500 Subject: [PATCH 054/222] fix:? --- .coveragerc | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .coveragerc diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..05986577 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +relative_files = true \ No newline at end of file From 68803d559ce95e9d3b5662d7128d3dedc25b6555 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 01:00:02 -0500 Subject: [PATCH 055/222] comment the stuff --- .github/workflows/coverage.yml | 36 ++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .github/workflows/coverage.yml diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 00000000..b4ee5ea3 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,36 @@ +# .github/workflows/coverage.yml +name: Post coverage comment + +on: + workflow_run: + workflows: ["CI"] + types: + - completed + +jobs: + test: + name: Run tests & display coverage + runs-on: ubuntu-latest + if: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' + permissions: + # Gives the action the necessary permissions for publishing new + # comments in pull requests. + pull-requests: write + # Gives the action the necessary permissions for editing existing + # comments (to avoid publishing multiple comments in the same PR) + contents: write + # Gives the action the necessary permissions for looking up the + # workflow that launched this workflow, and download the related + # artifact that contains the comment to be published + actions: read + steps: + # DO NOT run actions/checkout here, for security reasons + # For details, refer to https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + - name: Post comment + uses: py-cov-action/python-coverage-comment-action@v3 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_PR_RUN_ID: ${{ github.event.workflow_run.id }} + # Update those if you changed the default values: + # COMMENT_ARTIFACT_NAME: python-coverage-comment-action + # COMMENT_FILENAME: python-coverage-comment-action.txt \ No newline at end of file From 3dd07d311997be5e142f4f948485c66fc8f0fe9c Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 21 Apr 2023 01:02:10 -0500 Subject: [PATCH 056/222] fix: workflow name --- .github/workflows/coverage.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index b4ee5ea3..5dc9b7bd 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -3,7 +3,7 @@ name: Post coverage comment on: workflow_run: - workflows: ["CI"] + workflows: ["Python CI"] types: - completed @@ -33,4 +33,4 @@ jobs: GITHUB_PR_RUN_ID: ${{ github.event.workflow_run.id }} # Update those if you changed the default values: # COMMENT_ARTIFACT_NAME: python-coverage-comment-action - # COMMENT_FILENAME: python-coverage-comment-action.txt \ No newline at end of file + # COMMENT_FILENAME: python-coverage-comment-action.txt From 83644264207a60d7750f60785732a87e4247ee96 Mon Sep 17 00:00:00 2001 From: pkqs90 Date: Fri, 21 Apr 2023 14:18:09 +0800 Subject: [PATCH 057/222] docker-compose --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index be225d7d..50fa1a07 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,8 +10,8 @@ services: env_file: - .env volumes: - - "./autogpt:/app" - - ".env:/app/.env" + - "./autogpt:/home/appuser" + - ".env:/home/appuser/.env" profiles: ["exclude-from-up"] redis: From 04093e9517e55f533b7146688ed285d2d73e8a8c Mon Sep 17 00:00:00 2001 From: Tony H <53189296+T-Higgins@users.noreply.github.com> Date: Fri, 21 Apr 2023 08:37:58 +0100 Subject: [PATCH 058/222] Update README.md Made steps clearer, made some sentences clearer, and generally fixed grammar and punctuation. Reason: I'm a Knowledge Base writer for software products. --- README.md | 97 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 59 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 3081c394..af90eb62 100644 --- a/README.md +++ b/README.md @@ -26,9 +26,8 @@ Demo made by Blake Werlinger

💖 Help Fund Auto-GPT's Development 💖

-If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! -Your support is greatly appreciated. -Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. +If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help to push the boundaries of fully autonomous AI! +Your support is greatly appreciated. Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.

@@ -52,7 +51,7 @@ Development of this free, open-source project is made possible by all the Billing](./docs/imgs/openai-api-key-billing-paid-account.png) -#### **PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING, OTHERWISE NOTHING WILL WORK!** +#### **PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING. OTHERWISE, NOTHING WILL WORK!** ## 💾 Installation To install Auto-GPT, follow these steps: -1. Make sure you have all the **requirements** listed above, if not, install/get them +1. Make sure you have all the **requirements** listed above. If not, install/get them. _To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter._ -2. Clone the repository: For this step, you need Git installed. Alternatively, - you can download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) (`Source code (zip)`, bottom of the page). +2. Clone the repository: For this step, you need Git installed. +Note: If you don't have Git, you can just download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) instead (`Source code (zip)`, at the bottom of the page). ```bash git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git ``` -3. Navigate to the directory where the repository was downloaded +3. Navigate to the directory where you downloaded the repository. ```bash cd Auto-GPT ``` -4. Install the required dependencies +4. Install the required dependencies. ```bash pip install -r requirements.txt ``` -5. Configure Auto-GPT - 1. Locate the file named `.env.template` in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). - 2. Create a copy of this file, called `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. +5. Configure Auto-GPT: + 1. Find the file named `.env.template` in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). + 2. Create a copy of this file and call it `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. 3. Open the `.env` file in a text editor. 4. Find the line that says `OPENAI_API_KEY=`. 5. After the `"="`, enter your unique OpenAI API Key (without any quotes or spaces). - 6. Enter any other API keys or Tokens for services you would like to utilize. + 6. Enter any other API keys or Tokens for services you would like to use. 7. Save and close the `.env` file. - By completing these steps, you have properly configured the API Keys for your project. + After you complete these steps, you'll have properly configured the API keys for your project. - - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to obtain your OpenAI API key. - - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. + Notes: + - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to get your OpenAI API key. + - Get your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps: - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID @@ -141,7 +144,7 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na ## 🔧 Usage -1. Run `autogpt` Python module in your terminal. +1. Run the `autogpt` Python module in your terminal. ```bash # On Linux or Mac: ./run.sh @@ -151,11 +154,12 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na ``` Running with `--help` after `.\run.bat` lists all the possible command line arguments you can pass. -2. After each action, choose from options to authorize command(s), +2. After each response from Auto-GPT, choose from the options to authorize command(s), exit the program, or provide feedback to the AI. - 1. Authorize a single command, enter `y` - 2. Authorize a series of _N_ continuous commands, enter `y -N` - 3. Exit the program, enter `n` + 1. Authorize a single command by entering `y` + 2. Authorize a series of _N_ continuous commands by entering `y -N`. For example, entering `y -10` would run 10 automatic iterations. + 3. Enter any free text to give feedback to Auto-GPT. + 4. Exit the program by entering `n` ### Logs @@ -211,13 +215,13 @@ Here are some common arguments you can use when running Auto-GPT: ## 🗣️ Speech Mode -Use this to use TTS _(Text-to-Speech)_ for Auto-GPT +Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT ```bash python -m autogpt --speak ``` -### List of IDs with names from eleven labs, you can use the name or ID: +### List of IDs with names from eleven labs. You can use the name or ID: - Rachel : 21m00Tcm4TlvDq8ikWAM - Domi : AZnzlk1XvdvUeBnXmlld @@ -231,19 +235,36 @@ python -m autogpt --speak ## 🔍 Google API Keys Configuration -This section is optional, use the official google api if you are having issues with error 429 when running a google search. +Note: +This section is optional. use the official google api if you are having issues with error 429 when running a google search. To use the `google_official_search` command, you need to set up your Google API keys in your environment variables. +Create your project: 1. Go to the [Google Cloud Console](https://console.cloud.google.com/). 2. If you don't already have an account, create one and log in. -3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". Give it a name and click "Create". -4. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard) and click "Enable APIs and Services". Search for "Custom Search API" and click on it, then click "Enable". -5. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page and click "Create Credentials". Choose "API Key". -6. Copy the API key and set it as an environment variable named `GOOGLE_API_KEY` on your machine. See setting up environment variables below. -7. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) -8. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page and click "Add". -9. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. -10. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below. +3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". +4. Give it a name and click "Create". + +Set up a custom search API and add to your .env file: +5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard). +6. Click "Enable APIs and Services". +7. Search for "Custom Search API" and click on it. +8. Click "Enable". +9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page. +10. Click "Create Credentials". +11. Choose "API Key". +12. Copy the API key. +13. Set it as an environment variable named `GOOGLE_API_KEY` on your machine (see how to set up environment variables below). +14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) + +Set up a custom serach engine and add to your .env file: +15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page. +16. Click "Add". +17. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. +18. Once you've created your search engine, click on "Control Panel". +19. Click "Basics". +20. Copy the "Search engine ID". +21. Set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine (see how to set up environment variables below). _Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._ @@ -315,13 +336,13 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that ### Redis Setup > _**CAUTION**_ \ This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all -1. Install docker (or Docker Desktop on Windows) -2. Launch Redis container +1. Install docker (or Docker Desktop on Windows). +2. Launch Redis container. ```bash docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest ``` > See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. -3. Set the following settings in `.env` +3. Set the following settings in `.env`. > Replace **PASSWORD** in angled brackets (<>) ```bash MEMORY_BACKEND=redis @@ -339,7 +360,7 @@ MEMORY_INDEX= ### 🌲 Pinecone API Key Setup -Pinecone enables the storage of vast amounts of vector-based memory, allowing for only relevant memories to be loaded for the agent at any given time. +Pinecone lets you store vast amounts of vector-based memory, allowing the agent to load only relevant memories at any given time. 1. Go to [pinecone](https://app.pinecone.io/) and make an account if you don't already have one. 2. Choose the `Starter` plan to avoid being charged. From 6c5d21cbfc4b649bcdab13d6e3bb898bfd2e6576 Mon Sep 17 00:00:00 2001 From: coditamar <108689937+coditamar@users.noreply.github.com> Date: Fri, 21 Apr 2023 13:24:26 +0300 Subject: [PATCH 059/222] config.py: make load_dotenv(override=True) (#2788) --- autogpt/config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index be741fb3..9a84706e 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -10,7 +10,7 @@ from dotenv import load_dotenv from autogpt.config.singleton import Singleton -load_dotenv(verbose=True) +load_dotenv(verbose=True, override=True) class Config(metaclass=Singleton): From 77de428524ed239652db320ca254380bef5359de Mon Sep 17 00:00:00 2001 From: Taka Okunishi Date: Fri, 21 Apr 2023 21:40:57 +0900 Subject: [PATCH 060/222] Update README.md ( `IMAGE_PROVIDER=sd`=> `IMAGE_PROVIDER=huggingface` modify snippet in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3081c394..506ac132 100644 --- a/README.md +++ b/README.md @@ -486,7 +486,7 @@ By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, Once you have a token, set these variables in your `.env`: ```bash -IMAGE_PROVIDER=sd +IMAGE_PROVIDER=huggingface HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" ``` From 72a56acfb8d69b1135220d1aecf7db51eac47159 Mon Sep 17 00:00:00 2001 From: Kris Date: Fri, 21 Apr 2023 20:39:37 +0300 Subject: [PATCH 061/222] fix indentation of bug template yml --- .github/ISSUE_TEMPLATE/1.bug.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 6d6e9d68..9c82c63f 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -69,8 +69,8 @@ body: - Latest Release - Stable (branch) - Master (branch) - validations: - required: true + validations: + required: true - type: dropdown attributes: label: GPT-3 or GPT-4? From 1520816e6117c25c286f08c3da82aa742e70a4db Mon Sep 17 00:00:00 2001 From: Tommy Brooks Date: Fri, 21 Apr 2023 14:37:03 -0400 Subject: [PATCH 062/222] chore: update readme to more clearly describe usage between platforms --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b259139a..341bd567 100644 --- a/README.md +++ b/README.md @@ -145,11 +145,12 @@ Note: If you don't have Git, you can just download the [latest stable release](h ## 🔧 Usage 1. Run the `autogpt` Python module in your terminal. +* On Linux/MacOS: ```bash - # On Linux or Mac: ./run.sh - - # On Windows: + ``` +* On Windows: + ```bash .\run.bat ``` Running with `--help` after `.\run.bat` lists all the possible command line arguments you can pass. From 9dcdb6d6f858d08549b997e0ea5dc3f6d2e8d67a Mon Sep 17 00:00:00 2001 From: Mikel Calvo Date: Fri, 21 Apr 2023 21:44:02 +0200 Subject: [PATCH 063/222] Add OS Info into the initial prompt (#2587) --- autogpt/config/ai_config.py | 13 +++++++++++++ requirements.txt | 1 + 2 files changed, 14 insertions(+) diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index 1e48ab4d..a622c963 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -5,9 +5,11 @@ A module that contains the AIConfig class object that contains the configuration from __future__ import annotations import os +import platform from pathlib import Path from typing import Optional, Type +import distro import yaml from autogpt.prompts.generator import PromptGenerator @@ -130,6 +132,17 @@ class AIConfig: continue prompt_generator = plugin.post_prompt(prompt_generator) + if cfg.execute_local_commands: + # add OS info to prompt + os_name = platform.system() + os_info = ( + platform.platform(terse=True) + if os_name != "Linux" + else distro.name(pretty=True) + ) + + prompt_start += f"\nThe OS you are running on is: {os_info}" + # Construct full prompt full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n" for i, goal in enumerate(self.ai_goals): diff --git a/requirements.txt b/requirements.txt index d5a1c63b..5d98e612 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ beautifulsoup4>=4.12.2 colorama==0.4.6 +distro==1.8.0 openai==0.27.2 playsound==1.2.2 python-dotenv==1.0.0 From 504a85bbdb57c496971c0af313a8ce84994822b6 Mon Sep 17 00:00:00 2001 From: jlxip Date: Fri, 21 Apr 2023 22:01:06 +0200 Subject: [PATCH 064/222] Use readline if available --- autogpt/utils.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/autogpt/utils.py b/autogpt/utils.py index e93d5ac7..1b4fa204 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -5,6 +5,12 @@ import yaml from colorama import Fore from git import Repo +# Use readline if available (for clean_input) +try: + import readline +except: + pass + def clean_input(prompt: str = ""): try: From 3bf5934b20b906f977776e2d41033027ecb5df9b Mon Sep 17 00:00:00 2001 From: Raju Komati Date: Sat, 22 Apr 2023 02:52:13 +0530 Subject: [PATCH 065/222] fixed typo --- autogpt/commands/google_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py index fcc1a9f4..264daaff 100644 --- a/autogpt/commands/google_search.py +++ b/autogpt/commands/google_search.py @@ -110,7 +110,7 @@ def safe_google_results(results: str | list) -> str: """ if isinstance(results, list): safe_message = json.dumps( - [result.enocde("utf-8", "ignore") for result in results] + [result.encode("utf-8", "ignore") for result in results] ) else: safe_message = results.encode("utf-8", "ignore").decode("utf-8") From 7c4921758c627ae126b7385b35f2897fe8c64e8c Mon Sep 17 00:00:00 2001 From: Omri Grossman Date: Sat, 22 Apr 2023 01:13:32 +0300 Subject: [PATCH 066/222] Added basic spinner tests and modified spinner method docstring --- autogpt/spinner.py | 4 +-- tests/unit/test_spinner.py | 69 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 tests/unit/test_spinner.py diff --git a/autogpt/spinner.py b/autogpt/spinner.py index 4e33d742..ed02eb44 100644 --- a/autogpt/spinner.py +++ b/autogpt/spinner.py @@ -54,8 +54,8 @@ class Spinner: def update_message(self, new_message, delay=0.1): """Update the spinner message Args: - new_message (str): New message to display - delay: Delay in seconds before updating the message + new_message (str): New message to display. + delay (float): The delay in seconds between each spinner update. """ time.sleep(delay) sys.stdout.write( diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py new file mode 100644 index 00000000..55b39c8a --- /dev/null +++ b/tests/unit/test_spinner.py @@ -0,0 +1,69 @@ + +# Generated by CodiumAI +from autogpt.spinner import Spinner +import time + + +import unittest + +""" +Code Analysis + +Main functionalities: +The Spinner class provides a simple way to display a spinning animation while a process is running. It can be used to indicate that a process is ongoing and to provide visual feedback to the user. The class can be used as a context manager, which means that it can be used with the 'with' statement to automatically start and stop the spinner animation. + +Methods: +- __init__(self, message: str = "Loading...", delay: float = 0.1) -> None: Initializes the Spinner class with a message to display and a delay between each spinner update. +- spin(self) -> None: Spins the spinner animation while the process is running. +- __enter__(self): Starts the spinner animation when used as a context manager. +- __exit__(self, exc_type, exc_value, exc_traceback) -> None: Stops the spinner animation when used as a context manager. +- update_message(self, new_message, delay=0.1): Updates the message displayed by the spinner animation. + +Fields: +- spinner: An itertools.cycle object that contains the characters used for the spinner animation. +- delay: The delay between each spinner update. +- message: The message to display. +- running: A boolean value that indicates whether the spinner animation is running. +- spinner_thread: A threading.Thread object that runs the spin method in a separate thread. +""" + +ALMOST_DONE_MESSAGE = "Almost done..." +PLEASE_WAIT = "Please wait..." + +class TestSpinner(unittest.TestCase): + + # Tests that the spinner initializes with default values. + def test_spinner_initializes_with_default_values(self): + with Spinner() as spinner: + self.assertEqual(spinner.message, "Loading...") + self.assertEqual(spinner.delay, 0.1) + + # Tests that the spinner initializes with custom message and delay values. + def test_spinner_initializes_with_custom_values(self): + with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner: + self.assertEqual(spinner.message, PLEASE_WAIT) + self.assertEqual(spinner.delay, 0.2) + + # Tests that the spinner starts spinning and stops spinning without errors. + def test_spinner_stops_spinning(self): + with Spinner() as spinner: + time.sleep(1) + spinner.update_message(ALMOST_DONE_MESSAGE) + time.sleep(1) + self.assertFalse(spinner.running) + + # Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning. + def test_spinner_updates_message_and_still_spins(self): + with Spinner() as spinner: + self.assertTrue(spinner.running) + time.sleep(1) + spinner.update_message(ALMOST_DONE_MESSAGE) + time.sleep(1) + self.assertEqual(spinner.message, ALMOST_DONE_MESSAGE) + self.assertFalse(spinner.running) + + # Tests that the spinner can be used as a context manager. + def test_spinner_can_be_used_as_context_manager(self): + with Spinner() as spinner: + self.assertTrue(spinner.running) + self.assertFalse(spinner.running) From f3ac658dd09f387b6322e937179a2974e749173a Mon Sep 17 00:00:00 2001 From: Omri Grossman Date: Sat, 22 Apr 2023 01:18:03 +0300 Subject: [PATCH 067/222] Reorder imports --- tests/unit/test_spinner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py index 55b39c8a..1e6d3b6e 100644 --- a/tests/unit/test_spinner.py +++ b/tests/unit/test_spinner.py @@ -1,10 +1,10 @@ # Generated by CodiumAI -from autogpt.spinner import Spinner import time - - import unittest +from autogpt.spinner import Spinner + + """ Code Analysis From e8aaba9ce2c049e81ade2d39335edb753a6646df Mon Sep 17 00:00:00 2001 From: Omri Grossman Date: Sat, 22 Apr 2023 01:25:20 +0300 Subject: [PATCH 068/222] Run pre commit manually to fix linting and sorting issues --- tests/unit/test_spinner.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py index 1e6d3b6e..ec75a687 100644 --- a/tests/unit/test_spinner.py +++ b/tests/unit/test_spinner.py @@ -1,11 +1,9 @@ - # Generated by CodiumAI import time import unittest + from autogpt.spinner import Spinner - - """ Code Analysis @@ -30,8 +28,8 @@ Fields: ALMOST_DONE_MESSAGE = "Almost done..." PLEASE_WAIT = "Please wait..." -class TestSpinner(unittest.TestCase): +class TestSpinner(unittest.TestCase): # Tests that the spinner initializes with default values. def test_spinner_initializes_with_default_values(self): with Spinner() as spinner: @@ -63,6 +61,7 @@ class TestSpinner(unittest.TestCase): self.assertFalse(spinner.running) # Tests that the spinner can be used as a context manager. + def test_spinner_can_be_used_as_context_manager(self): with Spinner() as spinner: self.assertTrue(spinner.running) From 3ab67e746d9a364c5badca7457009223cf340b63 Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 21 Apr 2023 19:17:38 -0700 Subject: [PATCH 069/222] Add file op tests (#2205) Co-authored-by: Steven Byerly --- autogpt/commands/file_operations.py | 4 +- tests/unit/__init__.py | 0 tests/unit/test_file_operations.py | 150 ++++++++++++++++++++++++++++ 3 files changed, 152 insertions(+), 2 deletions(-) create mode 100644 tests/unit/__init__.py create mode 100644 tests/unit/test_file_operations.py diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 7dfd6004..1c74fcab 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -49,7 +49,7 @@ def log_operation(operation: str, filename: str) -> None: with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: f.write("File Operation Logger ") - append_to_file(LOG_FILE, log_entry, shouldLog=False) + append_to_file(str(LOG_FILE_PATH), log_entry, shouldLog=False) def split_file( @@ -94,8 +94,8 @@ def read_file(filename: str) -> str: Returns: str: The contents of the file """ + filepath = path_in_workspace(filename) try: - filepath = path_in_workspace(filename) with open(filepath, "r", encoding="utf-8") as f: content = f.read() return content diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py new file mode 100644 index 00000000..dfcde571 --- /dev/null +++ b/tests/unit/test_file_operations.py @@ -0,0 +1,150 @@ +import os +import shutil +import unittest +from pathlib import Path + +from autogpt.commands.file_operations import ( + LOG_FILE_PATH, + append_to_file, + check_duplicate_operation, + delete_file, + log_operation, + read_file, + search_files, + split_file, + write_to_file, +) +from autogpt.config import Config +from autogpt.workspace import path_in_workspace + + +class TestFileOperations(unittest.TestCase): + """ + This set of unit tests is designed to test the file operations that autoGPT has access to. + """ + + def setUp(self): + self.test_file = "test_file.txt" + self.test_file2 = "test_file2.txt" + self.test_directory = "test_directory" + self.file_content = "This is a test file.\n" + self.file_logger_logs = "file_logger.txt" + + with open(path_in_workspace(self.test_file), "w") as f: + f.write(self.file_content) + + if os.path.exists(LOG_FILE_PATH): + os.remove(LOG_FILE_PATH) + + def tearDown(self): + if os.path.exists(path_in_workspace(self.test_file)): + os.remove(path_in_workspace(self.test_file)) + + if os.path.exists(self.test_directory): + shutil.rmtree(self.test_directory) + + def test_check_duplicate_operation(self): + log_operation("write", self.test_file) + self.assertTrue(check_duplicate_operation("write", self.test_file)) + + # Test logging a file operation + def test_log_operation(self): + if os.path.exists(self.file_logger_logs): + os.remove(self.file_logger_logs) + + log_operation("log_test", self.test_file) + with open(LOG_FILE_PATH, "r") as f: + content = f.read() + self.assertIn("log_test: test_file.txt", content) + + # Test splitting a file into chunks + def test_split_file(self): + content = "abcdefghij" + chunks = list(split_file(content, max_length=4, overlap=1)) + expected = ["abcd", "defg", "ghij"] + self.assertEqual(chunks, expected) + + def test_read_file(self): + content = read_file(self.test_file) + self.assertEqual(content, self.file_content) + + def test_write_to_file(self): + new_content = "This is new content.\n" + write_to_file(self.test_file, new_content) + with open(path_in_workspace(self.test_file), "r") as f: + content = f.read() + self.assertEqual(content, new_content) + + def test_append_to_file(self): + with open(path_in_workspace(self.test_file), "r") as f: + content_before = f.read() + + append_text = "This is appended text.\n" + append_to_file(self.test_file, append_text) + with open(path_in_workspace(self.test_file), "r") as f: + content = f.read() + + self.assertEqual(content, content_before + append_text) + + def test_delete_file(self): + delete_file(self.test_file) + self.assertFalse(os.path.exists(path_in_workspace(self.test_file))) + + def test_search_files(self): + # Case 1: Create files A and B, search for A, and ensure we don't return A and B + file_a = "file_a.txt" + file_b = "file_b.txt" + + with open(path_in_workspace(file_a), "w") as f: + f.write("This is file A.") + + with open(path_in_workspace(file_b), "w") as f: + f.write("This is file B.") + + # Create a subdirectory and place a copy of file_a in it + if not os.path.exists(path_in_workspace(self.test_directory)): + os.makedirs(path_in_workspace(self.test_directory)) + + with open( + path_in_workspace(os.path.join(self.test_directory, file_a)), "w" + ) as f: + f.write("This is file A in the subdirectory.") + + files = search_files(path_in_workspace("")) + self.assertIn(file_a, files) + self.assertIn(file_b, files) + self.assertIn(os.path.join(self.test_directory, file_a), files) + + # Clean up + os.remove(path_in_workspace(file_a)) + os.remove(path_in_workspace(file_b)) + os.remove(path_in_workspace(os.path.join(self.test_directory, file_a))) + os.rmdir(path_in_workspace(self.test_directory)) + + # Case 2: Search for a file that does not exist and make sure we don't throw + non_existent_file = "non_existent_file.txt" + files = search_files("") + self.assertNotIn(non_existent_file, files) + + # Test to ensure we cannot read files out of workspace + def test_restrict_workspace(self): + CFG = Config() + with open(self.test_file2, "w+") as f: + f.write("test text") + + CFG.restrict_to_workspace = True + + # Get the absolute path of self.test_file2 + test_file2_abs_path = os.path.abspath(self.test_file2) + + with self.assertRaises(ValueError): + read_file(test_file2_abs_path) + + CFG.restrict_to_workspace = False + read_file(test_file2_abs_path) + + os.remove(test_file2_abs_path) + + +if __name__ == "__main__": + unittest.main() From 6e5df9e9e7c198bbcf0a900d8514d90ee0d24619 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Sat, 22 Apr 2023 00:45:29 -0500 Subject: [PATCH 070/222] feat: add code cov --- .github/workflows/ci.yml | 92 +++++++++++++++------------------- .github/workflows/coverage.yml | 36 ------------- 2 files changed, 40 insertions(+), 88 deletions(-) delete mode 100644 .github/workflows/coverage.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e6cb82c6..d3dee93d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,32 +14,32 @@ jobs: lint: runs-on: ubuntu-latest env: - min-python-version: '3.10' + min-python-version: "3.10" steps: - - name: Check out repository - uses: actions/checkout@v3 + - name: Check out repository + uses: actions/checkout@v3 - - name: Set up Python ${{ env.min-python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ env.min-python-version }} + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ env.min-python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt - - name: Lint with flake8 - run: flake8 + - name: Lint with flake8 + run: flake8 - - name: Check black formatting - run: black . --check - if: success() || failure() + - name: Check black formatting + run: black . --check + if: success() || failure() - - name: Check isort formatting - run: isort . --check - if: success() || failure() + - name: Check isort formatting + run: isort . --check + if: success() || failure() test: permissions: @@ -53,43 +53,31 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.10', '3.11'] + python-version: ["3.10", "3.11"] steps: - - name: Check out repository - uses: actions/checkout@v3 + - name: Check out repository + uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt - - name: Run unittest tests with coverage - run: | - pytest --cov=autogpt --without-integration --without-slow-integration --cov-report term-missing --cov-branch --cov-report xml --cov-report term + - name: Run unittest tests with coverage + run: | + pytest --cov=autogpt --without-integration --without-slow-integration --cov-report term-missing --cov-branch --cov-report xml --cov-report term - - name: Generate coverage report - run: | - coverage report - coverage xml - if: success() || failure() + - name: Generate coverage report + run: | + coverage report + coverage xml + if: success() || failure() - - name: Coverage comment - id: coverage_comment - uses: py-cov-action/python-coverage-comment-action@v3 - with: - GITHUB_TOKEN: ${{ github.token }} - - - name: Store Pull Request comment to be posted - uses: actions/upload-artifact@v3 - if: steps.coverage_comment.outputs.COMMENT_FILE_WRITTEN == 'true' - with: - # If you use a different name, update COMMENT_ARTIFACT_NAME accordingly - name: python-coverage-comment-action - # If you use a different name, update COMMENT_FILENAME accordingly - path: python-coverage-comment-action.txt + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml deleted file mode 100644 index 5dc9b7bd..00000000 --- a/.github/workflows/coverage.yml +++ /dev/null @@ -1,36 +0,0 @@ -# .github/workflows/coverage.yml -name: Post coverage comment - -on: - workflow_run: - workflows: ["Python CI"] - types: - - completed - -jobs: - test: - name: Run tests & display coverage - runs-on: ubuntu-latest - if: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' - permissions: - # Gives the action the necessary permissions for publishing new - # comments in pull requests. - pull-requests: write - # Gives the action the necessary permissions for editing existing - # comments (to avoid publishing multiple comments in the same PR) - contents: write - # Gives the action the necessary permissions for looking up the - # workflow that launched this workflow, and download the related - # artifact that contains the comment to be published - actions: read - steps: - # DO NOT run actions/checkout here, for security reasons - # For details, refer to https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - - name: Post comment - uses: py-cov-action/python-coverage-comment-action@v3 - with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_PR_RUN_ID: ${{ github.event.workflow_run.id }} - # Update those if you changed the default values: - # COMMENT_ARTIFACT_NAME: python-coverage-comment-action - # COMMENT_FILENAME: python-coverage-comment-action.txt From 798d2d697852f770e7c43643e8982064d2340ae6 Mon Sep 17 00:00:00 2001 From: Matthew Lam Date: Fri, 21 Apr 2023 23:51:05 -0700 Subject: [PATCH 071/222] update readme for speech --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index 341bd567..421067f5 100644 --- a/README.md +++ b/README.md @@ -216,6 +216,16 @@ Here are some common arguments you can use when running Auto-GPT: ## 🗣️ Speech Mode +Eleven Labs provides voice technologies such as voice design, speech synthesis, and premade voices that auto gpt can use for speech. + +1. Go to [Eleven Labs](https://beta.elevenlabs.io/) and make an account if you don't already have one. +2. Choose and setup the `Starter` plan. +3. Click the top right icon and find "Profile" to locate your API Key. + +In the `.env` file set: +- `ELEVENLABS_API_KEY` +- `ELEVENLABS_VOICE_1_ID` (example: _"premade/Adam"_) + Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT ```bash From ba87cb08676b95f1f539c7f63c4e915c11b91002 Mon Sep 17 00:00:00 2001 From: Didier Durand Date: Sat, 22 Apr 2023 09:14:52 +0200 Subject: [PATCH 072/222] Fixing typos in README.md Fixing some typos in README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 341bd567..7ff660b4 100644 --- a/README.md +++ b/README.md @@ -258,7 +258,7 @@ Set up a custom search API and add to your .env file: 13. Set it as an environment variable named `GOOGLE_API_KEY` on your machine (see how to set up environment variables below). 14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) -Set up a custom serach engine and add to your .env file: +Set up a custom search engine and add to your .env file: 15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page. 16. Click "Add". 17. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. @@ -472,7 +472,7 @@ Memory pre-seeding is a technique for improving AI accuracy by ingesting relevan ⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file. -⚠️For other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. +⚠️For other memory backends, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backends, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. From 1b043305c119a56bea860eafda56dd34d99d9ec1 Mon Sep 17 00:00:00 2001 From: Didier Durand Date: Sat, 22 Apr 2023 09:32:35 +0200 Subject: [PATCH 073/222] Fixing header of CONTRIBUTING.md ProjectName -> Auto-GPT --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 79169a0c..13a8d452 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -# Contributing to ProjectName +# Contributing to Auto-GPT First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request. @@ -102,4 +102,4 @@ Happy coding, and once again, thank you for your contributions! Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: -https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ \ No newline at end of file +https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ From fe0baf233dc22d93646992af3d0b2a152c6c787f Mon Sep 17 00:00:00 2001 From: Didier Durand Date: Sat, 22 Apr 2023 09:39:18 +0200 Subject: [PATCH 074/222] Fixing link --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 13a8d452..a32f9ffa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -102,4 +102,4 @@ Happy coding, and once again, thank you for your contributions! Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: -https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ +https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ From e39cd1bf57eb23bba79084df0c0b933b52955ba9 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sat, 22 Apr 2023 22:14:18 +1200 Subject: [PATCH 075/222] Fix(tests): restore config values after changing them in tests (#2904) --- tests/test_config.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tests/test_config.py b/tests/test_config.py index b472a24c..5dd9e9c6 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -38,47 +38,89 @@ class TestConfig(TestCase): """ Test if the set_continuous_mode() method updates the continuous_mode attribute. """ + # Store continuous mode to reset it after the test + continuous_mode = self.config.continuous_mode + self.config.set_continuous_mode(True) self.assertTrue(self.config.continuous_mode) + # Reset continuous mode + self.config.set_continuous_mode(continuous_mode) + def test_set_speak_mode(self): """ Test if the set_speak_mode() method updates the speak_mode attribute. """ + # Store speak mode to reset it after the test + speak_mode = self.config.speak_mode + self.config.set_speak_mode(True) self.assertTrue(self.config.speak_mode) + # Reset speak mode + self.config.set_speak_mode(speak_mode) + def test_set_fast_llm_model(self): """ Test if the set_fast_llm_model() method updates the fast_llm_model attribute. """ + # Store model name to reset it after the test + fast_llm_model = self.config.fast_llm_model + self.config.set_fast_llm_model("gpt-3.5-turbo-test") self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test") + # Reset model name + self.config.set_fast_llm_model(fast_llm_model) + def test_set_smart_llm_model(self): """ Test if the set_smart_llm_model() method updates the smart_llm_model attribute. """ + # Store model name to reset it after the test + smart_llm_model = self.config.smart_llm_model + self.config.set_smart_llm_model("gpt-4-test") self.assertEqual(self.config.smart_llm_model, "gpt-4-test") + # Reset model name + self.config.set_smart_llm_model(smart_llm_model) + def test_set_fast_token_limit(self): """ Test if the set_fast_token_limit() method updates the fast_token_limit attribute. """ + # Store token limit to reset it after the test + fast_token_limit = self.config.fast_token_limit + self.config.set_fast_token_limit(5000) self.assertEqual(self.config.fast_token_limit, 5000) + # Reset token limit + self.config.set_fast_token_limit(fast_token_limit) + def test_set_smart_token_limit(self): """ Test if the set_smart_token_limit() method updates the smart_token_limit attribute. """ + # Store token limit to reset it after the test + smart_token_limit = self.config.smart_token_limit + self.config.set_smart_token_limit(9000) self.assertEqual(self.config.smart_token_limit, 9000) + # Reset token limit + self.config.set_smart_token_limit(smart_token_limit) + def test_set_debug_mode(self): """ Test if the set_debug_mode() method updates the debug_mode attribute. """ + # Store debug mode to reset it after the test + debug_mode = self.config.debug_mode + self.config.set_debug_mode(True) self.assertTrue(self.config.debug_mode) + + # Reset debug mode + self.config.set_debug_mode(debug_mode) From e9f3f9bd1d7b1e3e2152049962d993e31dcfa793 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Sat, 22 Apr 2023 06:04:39 -0500 Subject: [PATCH 076/222] Add CodeCov CI coverage requirements (#2881) --- CONTRIBUTING.md | 2 +- codecov.yml | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 codecov.yml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a32f9ffa..b3a1585f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -60,7 +60,7 @@ If you have an idea for a new feature or improvement, please create an issue on When submitting a pull request, please ensure that your changes meet the following criteria: - Your pull request should be atomic and focus on a single change. -- Your pull request should include tests for your change. +- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status) - You should have thoroughly tested your changes with multiple different prompts. - You should have considered potential risks and mitigations for your changes. - You should have documented your changes clearly and comprehensively. diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..61acfc08 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + target: auto + threshold: 1% \ No newline at end of file From ad5d8b2341d8fe6da8a1731e9106f0cc51e75a8f Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sat, 22 Apr 2023 12:56:22 +0100 Subject: [PATCH 077/222] Re-work Docs and split out README (using MkDocs) (#2894) * Initial Documentation re-org * remove testing link from readme * rewrite quickstart * get code blocks working across mkdocs and github * add link to plugins repo * add link to plugins repo and move readme to plugin template repo * Add emoji to "Extensibility with Plugins" in readme Co-authored-by: Reinier van der Leer * Make docs deploy workflow path-selective * Also run workflow when the workflow is updated * fix readme links under configuration subfolder * shrink subheadings in readme --------- Co-authored-by: Toran Bruce Richards Co-authored-by: Reinier van der Leer --- .github/workflows/documentation.yml | 26 ++ .gitignore | 1 + README.md | 512 +--------------------------- docs/configuration/imagegen.md | 15 + docs/configuration/memory.md | 166 +++++++++ docs/configuration/search.md | 50 +++ docs/configuration/voice.md | 29 ++ docs/index.md | 5 + docs/installation.md | 97 ++++++ docs/plugins.md | 8 + docs/testing.md | 39 +++ docs/usage.md | 88 +++++ mkdocs.yml | 20 ++ requirements.txt | 2 + 14 files changed, 564 insertions(+), 494 deletions(-) create mode 100644 .github/workflows/documentation.yml create mode 100644 docs/configuration/imagegen.md create mode 100644 docs/configuration/memory.md create mode 100644 docs/configuration/search.md create mode 100644 docs/configuration/voice.md create mode 100644 docs/index.md create mode 100644 docs/installation.md create mode 100644 docs/plugins.md create mode 100644 docs/testing.md create mode 100644 docs/usage.md create mode 100644 mkdocs.yml diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 00000000..e1fd422e --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,26 @@ +name: Docs + +on: + push: + branches: [ stable ] + paths: + - 'docs/**' + - 'mkdocs.yml' + - '.github/workflows/documentation.yml' + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - uses: actions/setup-python@v2 + - run: pip install --upgrade pip && pip install mkdocs mkdocs-gen-files + - run: git config user.name 'github-actions[bot]' && git config user.email 'github-actions[bot]@users.noreply.github.com' + - name: Publish docs + run: mkdocs gh-deploy diff --git a/.gitignore b/.gitignore index 0c277d89..1ac55f6b 100644 --- a/.gitignore +++ b/.gitignore @@ -94,6 +94,7 @@ instance/ # Sphinx documentation docs/_build/ +site/ # PyBuilder target/ diff --git a/README.md b/README.md index c1843df7..497519ad 100644 --- a/README.md +++ b/README.md @@ -55,477 +55,41 @@ Your support is greatly appreciated. Development of this free, open-source proje - 🧠 GPT-4 instances for text generation - 🔗 Access to popular websites and platforms - 🗃️ File storage and summarization with GPT-3.5 +- 🔌 Extensibility with Plugins ## 📋 Requirements - Environment (pick one) - - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers): It has been configured in the .devcontainer folder and can be used directly - - Docker - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) -- [OpenAI API key](https://platform.openai.com/account/api-keys) + - Docker [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) + - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) -### Optional -- Memory backend (pick one) - - [Pinecone](https://www.pinecone.io/) - - [Milvus](https://milvus.io/) - - [Redis](https://redis.io) - - [Weaviate](https://weaviate.io) -- ElevenLabs Key (If you want the AI to speak) +## Quickstart -## ⚠️ OpenAI API Keys Configuration ⚠️ +1. Set up your OpenAI [API Keys](https://platform.openai.com/account/api-keys) +2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) +3. Install the [dependencies](/docs/installation.md) and set your [environment variables](/docs/installation.md) +4. Configure any additional features you want, or install some [plugins](/docs/plugins.md) +5. [Run](/docs/usage.md) the app -Get your OpenAI API key from: https://platform.openai.com/account/api-keys. +Please see the [documentation](/docs) linked below for full setup instructions and configuration options. -To use OpenAI API key for Auto-GPT, you **NEED** to have billing set up (AKA paid account). +## 💾 Documentation -You can set up paid account at https://platform.openai.com/account/billing/overview. +[Installation](/docs/installation.md) -Important: It's highly recommended that you track your usage on [the Usage page](https://platform.openai.com/account/usage) -You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). +[Usage](/docs/usage.md) -![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./docs/imgs/openai-api-key-billing-paid-account.png) +[Speech](/docs/configuration/voice.md) -#### **PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING. OTHERWISE, NOTHING WILL WORK!** +[Search](/docs/configuration/search.md) -## 💾 Installation +[Plugins](/docs/plugins.md) -To install Auto-GPT, follow these steps: +[Memory](/docs/configuration/memory.md) -1. Make sure you have all the **requirements** listed above. If not, install/get them. +[Image Generation](/docs/configuration/imagegen.md) -_To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter._ - -2. Clone the repository: For this step, you need Git installed. -Note: If you don't have Git, you can just download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) instead (`Source code (zip)`, at the bottom of the page). - - ```bash - git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git - ``` - -3. Navigate to the directory where you downloaded the repository. - - ```bash - cd Auto-GPT - ``` - -4. Install the required dependencies. - - ```bash - pip install -r requirements.txt - ``` - -5. Configure Auto-GPT: - 1. Find the file named `.env.template` in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). - 2. Create a copy of this file and call it `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. - 3. Open the `.env` file in a text editor. - 4. Find the line that says `OPENAI_API_KEY=`. - 5. After the `"="`, enter your unique OpenAI API Key (without any quotes or spaces). - 6. Enter any other API keys or Tokens for services you would like to use. - 7. Save and close the `.env` file. - - After you complete these steps, you'll have properly configured the API keys for your project. - - Notes: - - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to get your OpenAI API key. - - Get your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps: - - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: - - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID - - `smart_llm_model_deployment_id` - your gpt-4 deployment ID - - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID - - Please specify all of these values as double-quoted strings - ```yaml - # Replace string in angled brackets (<>) to your own ID - azure_model_map: - fast_llm_model_deployment_id: "" - ... - ``` - - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. - -## 🔧 Usage - -1. Run the `autogpt` Python module in your terminal. -* On Linux/MacOS: - ```bash - ./run.sh - ``` -* On Windows: - ```bash - .\run.bat - ``` - Running with `--help` after `.\run.bat` lists all the possible command line arguments you can pass. - -2. After each response from Auto-GPT, choose from the options to authorize command(s), -exit the program, or provide feedback to the AI. - 1. Authorize a single command by entering `y` - 2. Authorize a series of _N_ continuous commands by entering `y -N`. For example, entering `y -10` would run 10 automatic iterations. - 3. Enter any free text to give feedback to Auto-GPT. - 4. Exit the program by entering `n` - - -### Logs - -Activity and error logs are located in the `./output/logs` - -To print out debug logs: - -``` -python -m autogpt --debug -``` - -### Docker - -You can also build this into a docker image and run it: - -```bash -docker build -t autogpt . -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt -``` - -Or if you have `docker-compose`: -```bash -docker-compose run --build --rm auto-gpt -``` - -You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: -```bash -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt --gpt3only --continuous -``` -```bash -docker-compose run --build --rm auto-gpt --gpt3only --continuous -``` - -### Command Line Arguments -Here are some common arguments you can use when running Auto-GPT: -> Replace anything in angled brackets (<>) to a value you want to specify - -* View all available command line arguments - ```bash - python -m autogpt --help - ``` -* Run Auto-GPT with a different AI Settings file - ```bash - python -m autogpt --ai-settings - ``` -* Specify a memory backend - ```bash - python -m autogpt --use-memory - ``` - -> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python -m autogpt --help` for more information - -## 🗣️ Speech Mode - -Eleven Labs provides voice technologies such as voice design, speech synthesis, and premade voices that auto gpt can use for speech. - -1. Go to [Eleven Labs](https://beta.elevenlabs.io/) and make an account if you don't already have one. -2. Choose and setup the `Starter` plan. -3. Click the top right icon and find "Profile" to locate your API Key. - -In the `.env` file set: -- `ELEVENLABS_API_KEY` -- `ELEVENLABS_VOICE_1_ID` (example: _"premade/Adam"_) - -Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT - -```bash -python -m autogpt --speak -``` - -### List of IDs with names from eleven labs. You can use the name or ID: - -- Rachel : 21m00Tcm4TlvDq8ikWAM -- Domi : AZnzlk1XvdvUeBnXmlld -- Bella : EXAVITQu4vr4xnSDxMaL -- Antoni : ErXwobaYiN019PkySvjV -- Elli : MF3mGyEYCl7XYWbV9V6O -- Josh : TxGEqnHWrfWFTfGW9XjX -- Arnold : VR6AewLTigWG4xSOukaG -- Adam : pNInz6obpgDQGcFmaJgB -- Sam : yoZ06aMxZJJ28mfd3POQ - -## 🔍 Google API Keys Configuration - -Note: -This section is optional. use the official google api if you are having issues with error 429 when running a google search. -To use the `google_official_search` command, you need to set up your Google API keys in your environment variables. - -Create your project: -1. Go to the [Google Cloud Console](https://console.cloud.google.com/). -2. If you don't already have an account, create one and log in. -3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". -4. Give it a name and click "Create". - -Set up a custom search API and add to your .env file: -5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard). -6. Click "Enable APIs and Services". -7. Search for "Custom Search API" and click on it. -8. Click "Enable". -9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page. -10. Click "Create Credentials". -11. Choose "API Key". -12. Copy the API key. -13. Set it as an environment variable named `GOOGLE_API_KEY` on your machine (see how to set up environment variables below). -14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) - -Set up a custom search engine and add to your .env file: -15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page. -16. Click "Add". -17. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. -18. Once you've created your search engine, click on "Control Panel". -19. Click "Basics". -20. Copy the "Search engine ID". -21. Set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine (see how to set up environment variables below). - -_Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._ - -### Setting up environment variables - -For Windows Users: - -```bash -setx GOOGLE_API_KEY "YOUR_GOOGLE_API_KEY" -setx CUSTOM_SEARCH_ENGINE_ID "YOUR_CUSTOM_SEARCH_ENGINE_ID" -``` - -For macOS and Linux users: - -```bash -export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" -export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" -``` - -## Plugins - -Use the [Auto-GPT Plugin Template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template) as a starting point for creating your own plugins. - -⚠️💀 **WARNING** 💀⚠️: Review the code of any plugin you use thoroughly, as plugins can execute any Python code, potentially leading to malicious activities, such as stealing your API keys. - -### Plugin Installation Steps - -1. **Clone or download the plugin repository:** - Clone the plugin repository, or download the repository as a zip file. - - ![Download Zip](https://raw.githubusercontent.com/BillSchumacher/Auto-GPT/master/plugin.png) - -2. **Install the plugin's dependencies (if any):** - Navigate to the plugin's folder in your terminal, and run the following command to install any required dependencies: - - ```bash - pip install -r requirements.txt - ``` - -3. **Package the plugin as a Zip file:** - If you cloned the repository, compress the plugin folder as a Zip file. - -4. **Copy the plugin's Zip file:** - Place the plugin's Zip file in the `plugins` folder of the Auto-GPT repository. - -5. **Allowlist the plugin (optional):** - Add the plugin's class name to the `ALLOWLISTED_PLUGINS` in the `.env` file to avoid being prompted with a warning when loading the plugin: - - ``` - ALLOWLISTED_PLUGINS=example-plugin1,example-plugin2,example-plugin3 - ``` - - If the plugin is not allowlisted, you will be warned before it's loaded. - -## Setting Your Cache Type - -By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone. - -To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want: - -* `local` (default) uses a local JSON cache file -* `pinecone` uses the Pinecone.io account you configured in your ENV settings -* `redis` will use the redis cache that you configured -* `milvus` will use the milvus cache that you configured -* `weaviate` will use the weaviate cache that you configured - -## Memory Backend Setup - -### Redis Setup -> _**CAUTION**_ \ -This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all -1. Install docker (or Docker Desktop on Windows). -2. Launch Redis container. - ```bash - docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest - ``` - > See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. -3. Set the following settings in `.env`. - > Replace **PASSWORD** in angled brackets (<>) - ```bash - MEMORY_BACKEND=redis - REDIS_HOST=localhost - REDIS_PORT=6379 - REDIS_PASSWORD= - ``` - - You can optionally set `WIPE_REDIS_ON_START=False` to persist memory stored in Redis. - -You can specify the memory index for redis using the following: -```bash -MEMORY_INDEX= -``` - -### 🌲 Pinecone API Key Setup - -Pinecone lets you store vast amounts of vector-based memory, allowing the agent to load only relevant memories at any given time. - -1. Go to [pinecone](https://app.pinecone.io/) and make an account if you don't already have one. -2. Choose the `Starter` plan to avoid being charged. -3. Find your API key and region under the default project in the left sidebar. - -In the `.env` file set: -- `PINECONE_API_KEY` -- `PINECONE_ENV` (example: _"us-east4-gcp"_) -- `MEMORY_BACKEND=pinecone` - -Alternatively, you can set them from the command line (advanced): - -For Windows Users: - -```bash -setx PINECONE_API_KEY "" -setx PINECONE_ENV "" # e.g: "us-east4-gcp" -setx MEMORY_BACKEND "pinecone" -``` - -For macOS and Linux users: - -```bash -export PINECONE_API_KEY="" -export PINECONE_ENV="" # e.g: "us-east4-gcp" -export MEMORY_BACKEND="pinecone" -``` - -### Milvus Setup - -[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search. - -- setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues. - - setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md) - - or setup by [Zilliz Cloud](https://zilliz.com/cloud) -- set `MILVUS_ADDR` in `.env` to your milvus address `host:ip`. -- set `MEMORY_BACKEND` in `.env` to `milvus` to enable milvus as backend. - -**Optional:** -- set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name. - - -### Weaviate Setup -[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store data objects and vector embeddings from ML-models and scales seamlessly to billion of data objects. [An instance of Weaviate can be created locally (using Docker), on Kubernetes or using Weaviate Cloud Services](https://weaviate.io/developers/weaviate/quickstart). -Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) is supported which allows the Auto-GPT process itself to start a Weaviate instance. To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`. - -#### Install the Weaviate client - -Install the Weaviate client before usage. - -``` -$ pip install weaviate-client -``` - -#### Setting up environment variables - -In your `.env` file set the following: - -``` -MEMORY_BACKEND=weaviate -WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance -WEAVIATE_PORT="8080" -WEAVIATE_PROTOCOL="http" -WEAVIATE_USERNAME="your username" -WEAVIATE_PASSWORD="your password" -WEAVIATE_API_KEY="your weaviate API key if you have one" -WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" # this is optional and indicates where the data should be persisted when running an embedded instance -USE_WEAVIATE_EMBEDDED=False # set to True to run Embedded Weaviate -MEMORY_INDEX="Autogpt" # name of the index to create for the application -``` - -## View Memory Usage - -View memory usage by using the `--debug` flag :) - - -## 🧠 Memory pre-seeding -Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT. - -```bash -# python data_ingestion.py -h -usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] - -Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script. - -options: - -h, --help show this help message and exit - --file FILE The file to ingest. - --dir DIR The directory containing the files to ingest. - --init Init the memory and wipe its content (default: False) - --overlap OVERLAP The overlap size between chunks when ingesting files (default: 200) - --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000) - -# python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000 -``` -In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/autogpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000. - -Note that you can also use the `--file` argument to ingest a single file into memory and that data_ingestion.py will only ingest files within the `/auto_gpt_workspace` directory. - -The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. - -You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: -- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. -- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks. -- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI. - -Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT. - -⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file. - -⚠️For other memory backends, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backends, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. - -Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. - -## 💀 Continuous Mode ⚠️ - -Run the AI **without** user authorization, 100% automated. -Continuous mode is NOT recommended. -It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorize. -Use at your own risk. - -1. Run the `autogpt` python module in your terminal: - - ```bash - python -m autogpt --speak --continuous - ``` - -2. To exit the program, press Ctrl + C - -## GPT3.5 ONLY Mode - -If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! - -```bash -python -m autogpt --speak --gpt3only -``` - -It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. - -## 🖼 Image Generation - -By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [Hugging Face API Token](https://huggingface.co/settings/tokens) is required. - -Once you have a token, set these variables in your `.env`: - -```bash -IMAGE_PROVIDER=huggingface -HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" -``` - -## Selenium -```bash -sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 -``` ## ⚠️ Limitations @@ -562,43 +126,3 @@ We look forward to connecting with you and hearing your thoughts, ideas, and exp Star History Chart

- -## Run tests - -To run all tests, run the following command: - -```bash -pytest - -``` - -To run just without integration tests: - -``` -pytest --without-integration -``` - -To run just without slow integration tests: - -``` -pytest --without-slow-integration -``` - -To run tests and see coverage, run the following command: - -```bash -pytest --cov=autogpt --without-integration --without-slow-integration -``` - -## Run linter - -This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information. - -To run the linter, run the following command: - -```bash -flake8 autogpt/ tests/ - -# Or, if you want to run flake8 with the same configuration as the CI: -flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 -``` diff --git a/docs/configuration/imagegen.md b/docs/configuration/imagegen.md new file mode 100644 index 00000000..5d9e3547 --- /dev/null +++ b/docs/configuration/imagegen.md @@ -0,0 +1,15 @@ +## 🖼 Image Generation + +By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [Hugging Face API Token](https://huggingface.co/settings/tokens) is required. + +Once you have a token, set these variables in your `.env`: + +``` shell +IMAGE_PROVIDER=huggingface +HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" +``` + +## Selenium +``` shell +sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 +``` \ No newline at end of file diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md new file mode 100644 index 00000000..8ce713be --- /dev/null +++ b/docs/configuration/memory.md @@ -0,0 +1,166 @@ +## Setting Your Cache Type + +By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone. + +To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want: + +* `local` (default) uses a local JSON cache file +* `pinecone` uses the Pinecone.io account you configured in your ENV settings +* `redis` will use the redis cache that you configured +* `milvus` will use the milvus cache that you configured +* `weaviate` will use the weaviate cache that you configured + +## Memory Backend Setup + +Links to memory backends + +- [Pinecone](https://www.pinecone.io/) +- [Milvus](https://milvus.io/) +- [Redis](https://redis.io) +- [Weaviate](https://weaviate.io) + +### Redis Setup +> _**CAUTION**_ \ +This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all +1. Install docker (or Docker Desktop on Windows). +2. Launch Redis container. + +``` shell + docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest +``` +> See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. + +3. Set the following settings in `.env`. + > Replace **PASSWORD** in angled brackets (<>) + +``` shell +MEMORY_BACKEND=redis +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD= +``` + + You can optionally set `WIPE_REDIS_ON_START=False` to persist memory stored in Redis. + +You can specify the memory index for redis using the following: +``` shell +MEMORY_INDEX= +``` + +### 🌲 Pinecone API Key Setup + +Pinecone lets you store vast amounts of vector-based memory, allowing the agent to load only relevant memories at any given time. + +1. Go to [pinecone](https://app.pinecone.io/) and make an account if you don't already have one. +2. Choose the `Starter` plan to avoid being charged. +3. Find your API key and region under the default project in the left sidebar. + +In the `.env` file set: +- `PINECONE_API_KEY` +- `PINECONE_ENV` (example: _"us-east4-gcp"_) +- `MEMORY_BACKEND=pinecone` + +Alternatively, you can set them from the command line (advanced): + +For Windows Users: + +``` shell +setx PINECONE_API_KEY "" +setx PINECONE_ENV "" # e.g: "us-east4-gcp" +setx MEMORY_BACKEND "pinecone" +``` + +For macOS and Linux users: + +``` shell +export PINECONE_API_KEY="" +export PINECONE_ENV="" # e.g: "us-east4-gcp" +export MEMORY_BACKEND="pinecone" +``` + +### Milvus Setup + +[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search. + +- setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues. + - setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md) + - or setup by [Zilliz Cloud](https://zilliz.com/cloud) +- set `MILVUS_ADDR` in `.env` to your milvus address `host:ip`. +- set `MEMORY_BACKEND` in `.env` to `milvus` to enable milvus as backend. + +**Optional:** +- set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name. + + +### Weaviate Setup +[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store data objects and vector embeddings from ML-models and scales seamlessly to billion of data objects. [An instance of Weaviate can be created locally (using Docker), on Kubernetes or using Weaviate Cloud Services](https://weaviate.io/developers/weaviate/quickstart). +Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) is supported which allows the Auto-GPT process itself to start a Weaviate instance. To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`. + +#### Install the Weaviate client + +Install the Weaviate client before usage. + +``` shell +$ pip install weaviate-client +``` + +#### Setting up environment variables + +In your `.env` file set the following: + +``` shell +MEMORY_BACKEND=weaviate +WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance +WEAVIATE_PORT="8080" +WEAVIATE_PROTOCOL="http" +WEAVIATE_USERNAME="your username" +WEAVIATE_PASSWORD="your password" +WEAVIATE_API_KEY="your weaviate API key if you have one" +WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" # this is optional and indicates where the data should be persisted when running an embedded instance +USE_WEAVIATE_EMBEDDED=False # set to True to run Embedded Weaviate +MEMORY_INDEX="Autogpt" # name of the index to create for the application +``` + +## View Memory Usage + +View memory usage by using the `--debug` flag :) + + +## 🧠 Memory pre-seeding +Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT. + +``` shell +# python data_ingestion.py -h +usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] + +Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script. + +options: + -h, --help show this help message and exit + --file FILE The file to ingest. + --dir DIR The directory containing the files to ingest. + --init Init the memory and wipe its content (default: False) + --overlap OVERLAP The overlap size between chunks when ingesting files (default: 200) + --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000) + +# python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000 +``` + +In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/autogpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000. + +Note that you can also use the `--file` argument to ingest a single file into memory and that data_ingestion.py will only ingest files within the `/auto_gpt_workspace` directory. + +The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. + +You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: +- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. +- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks. +- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI. + +Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT. + +⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file. + +⚠️For other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. + +Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. \ No newline at end of file diff --git a/docs/configuration/search.md b/docs/configuration/search.md new file mode 100644 index 00000000..205a7721 --- /dev/null +++ b/docs/configuration/search.md @@ -0,0 +1,50 @@ +## 🔍 Google API Keys Configuration + +Note: +This section is optional. use the official google api if you are having issues with error 429 when running a google search. +To use the `google_official_search` command, you need to set up your Google API keys in your environment variables. + +Create your project: +1. Go to the [Google Cloud Console](https://console.cloud.google.com/). +2. If you don't already have an account, create one and log in. +3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". +4. Give it a name and click "Create". + +Set up a custom search API and add to your .env file: +5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard). +6. Click "Enable APIs and Services". +7. Search for "Custom Search API" and click on it. +8. Click "Enable". +9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page. +10. Click "Create Credentials". +11. Choose "API Key". +12. Copy the API key. +13. Set it as an environment variable named `GOOGLE_API_KEY` on your machine (see how to set up environment variables below). +14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) + +Set up a custom serach engine and add to your .env file: +15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page. +16. Click "Add". +17. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. +18. Once you've created your search engine, click on "Control Panel". +19. Click "Basics". +20. Copy the "Search engine ID". +21. Set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine (see how to set up environment variables below). + +_Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._ + +### Setting up environment variables + +For Windows Users: + +``` +setx GOOGLE_API_KEY "YOUR_GOOGLE_API_KEY" +setx CUSTOM_SEARCH_ENGINE_ID "YOUR_CUSTOM_SEARCH_ENGINE_ID" +``` + +For macOS and Linux users: + +``` +export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" +export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" +``` \ No newline at end of file diff --git a/docs/configuration/voice.md b/docs/configuration/voice.md new file mode 100644 index 00000000..a0b522b6 --- /dev/null +++ b/docs/configuration/voice.md @@ -0,0 +1,29 @@ +## Voice + +Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT + +``` shell +python -m autogpt --speak +``` + +Eleven Labs provides voice technologies such as voice design, speech synthesis, and premade voices that auto gpt can use for speech. + +1. Go to [Eleven Labs](https://beta.elevenlabs.io/) and make an account if you don't already have one. +2. Choose and setup the `Starter` plan. +3. Click the top right icon and find "Profile" to locate your API Key. + +In the `.env` file set: +- `ELEVENLABS_API_KEY` +- `ELEVENLABS_VOICE_1_ID` (example: _"premade/Adam"_) + +### List of IDs with names from eleven labs. You can use the name or ID: + +- Rachel : 21m00Tcm4TlvDq8ikWAM +- Domi : AZnzlk1XvdvUeBnXmlld +- Bella : EXAVITQu4vr4xnSDxMaL +- Antoni : ErXwobaYiN019PkySvjV +- Elli : MF3mGyEYCl7XYWbV9V6O +- Josh : TxGEqnHWrfWFTfGW9XjX +- Arnold : VR6AewLTigWG4xSOukaG +- Adam : pNInz6obpgDQGcFmaJgB +- Sam : yoZ06aMxZJJ28mfd3POQ \ No newline at end of file diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..669577bc --- /dev/null +++ b/docs/index.md @@ -0,0 +1,5 @@ +# Auto-GPT + +Welcome to Auto-GPT. Please follow the [Installation](installation.md) guide to get started. + +It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 00000000..f80b11f7 --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,97 @@ +# 💾 Installation + +## ⚠️ OpenAI API Keys Configuration + +Get your OpenAI API key from: https://platform.openai.com/account/api-keys. + +To use OpenAI API key for Auto-GPT, you **NEED** to have billing set up (AKA paid account). + +You can set up paid account at https://platform.openai.com/account/billing/overview. + +Important: It's highly recommended that you track your usage on [the Usage page](https://platform.openai.com/account/usage) +You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). + +![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./docs/imgs/openai-api-key-billing-paid-account.png) + +**PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING. OTHERWISE, NOTHING WILL WORK!** + +## Steps + +To install Auto-GPT, follow these steps: + +1. Make sure you have all the **requirements** listed in the [README](../README.md). If not, install/get them. + +_To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter._ + +2. Clone the repository: For this step, you need Git installed. +Note: If you don't have Git, you can just download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) instead (`Source code (zip)`, at the bottom of the page). + +``` shell + git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git +``` + +3. Navigate to the directory where you downloaded the repository. + +``` shell + cd Auto-GPT +``` + +4. Install the required dependencies. + +``` shell + pip install -r requirements.txt +``` + +5. Configure Auto-GPT: + 1. Find the file named `.env.template` in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). + 2. Create a copy of this file and call it `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. + 3. Open the `.env` file in a text editor. + 4. Find the line that says `OPENAI_API_KEY=`. + 5. After the `"="`, enter your unique OpenAI API Key (without any quotes or spaces). + 6. Enter any other API keys or Tokens for services you would like to use. + 7. Save and close the `.env` file. + + After you complete these steps, you'll have properly configured the API keys for your project. + + Notes: + - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to get your OpenAI API key. + - Get your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. + - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps: + - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: + - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID + - `smart_llm_model_deployment_id` - your gpt-4 deployment ID + - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID + - Please specify all of these values as double-quoted strings + +``` shell +# Replace string in angled brackets (<>) to your own ID +azure_model_map: + fast_llm_model_deployment_id: "" + ... +``` + - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. + +## Docker + +You can also build this into a docker image and run it: + +``` shell +docker build -t autogpt . +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt +``` + +Or if you have `docker-compose`: +``` shell +docker-compose run --build --rm auto-gpt +``` + +You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: +``` shell +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt --gpt3only --continuous +``` + +``` shell +docker-compose run --build --rm auto-gpt --gpt3only --continuous +``` + +Alternatively, you can pull the latest release directly from [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) \ No newline at end of file diff --git a/docs/plugins.md b/docs/plugins.md new file mode 100644 index 00000000..cc4a3299 --- /dev/null +++ b/docs/plugins.md @@ -0,0 +1,8 @@ +## Plugins + +⚠️💀 **WARNING** 💀⚠️: Review the code of any plugin you use thoroughly, as plugins can execute any Python code, potentially leading to malicious activities, such as stealing your API keys. + +See our [Plugins Repo](https://github.com/Significant-Gravitas/Auto-GPT-Plugins) for more info on how to install all the amazing plugins the community has built! + +Alternatively, developers can use the [Auto-GPT Plugin Template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template) as a starting point for creating your own plugins. + diff --git a/docs/testing.md b/docs/testing.md new file mode 100644 index 00000000..d87c9acd --- /dev/null +++ b/docs/testing.md @@ -0,0 +1,39 @@ +## Run tests + +To run all tests, run the following command: + +``` +pytest +``` + +To run just without integration tests: + +``` +pytest --without-integration +``` + +To run just without slow integration tests: + +``` +pytest --without-slow-integration +``` + +To run tests and see coverage, run the following command: + +``` +pytest --cov=autogpt --without-integration --without-slow-integration +``` + +## Run linter + +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information. + +To run the linter, run the following command: + +``` +flake8 autogpt/ tests/ + +# Or, if you want to run flake8 with the same configuration as the CI: + +flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 +``` \ No newline at end of file diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 00000000..9b2185ae --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,88 @@ +# Usage + +1. Run the `autogpt` Python module in your terminal. +* On Linux/MacOS: + ``` + ./run.sh + ``` +* On Windows: + ``` + .\run.bat + ``` + Running with `--help` after `.\run.bat` lists all the possible command line arguments you can pass. + +2. After each response from Auto-GPT, choose from the options to authorize command(s), +exit the program, or provide feedback to the AI. + 1. Authorize a single command by entering `y` + 2. Authorize a series of _N_ continuous commands by entering `y -N`. For example, entering `y -10` would run 10 automatic iterations. + 3. Enter any free text to give feedback to Auto-GPT. + 4. Exit the program by entering `n` + + +## Command Line Arguments +Here are some common arguments you can use when running Auto-GPT: +> Replace anything in angled brackets (<>) to a value you want to specify + +* View all available command line arguments + ``` + python -m autogpt --help + ``` +* Run Auto-GPT with a different AI Settings file + ``` + python -m autogpt --ai-settings + ``` +* Specify a memory backend + ``` + python -m autogpt --use-memory + ``` + +> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python -m autogpt --help` for more information + +### Speak Mode + +Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT + +``` +python -m autogpt --speak +``` + +### 💀 Continuous Mode ⚠️ + +Run the AI **without** user authorization, 100% automated. +Continuous mode is NOT recommended. +It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorize. +Use at your own risk. + +1. Run the `autogpt` python module in your terminal: + + ``` + python -m autogpt --continuous + ``` + +2. To exit the program, press Ctrl + C + +### GPT3.5 ONLY Mode + +If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! + +``` shell +python -m autogpt --gpt3only +``` + +### GPT4 ONLY Mode + +If you do have access to the GPT4 api, this mode will allow you to use Auto-GPT solely using the GPT-4 API for increased intelligence (and cost!) + +``` shell +python -m autogpt --gpt4only +``` + +## Logs + +Activity and error logs are located in the `./output/logs` + +To print out debug logs: + +``` shell +python -m autogpt --debug +``` \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..b8e529ea --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,20 @@ +site_name: Auto-GPT +site_url: https://github.com/Significant-Gravitas/Auto-GPT +repo_url: https://github.com/Significant-Gravitas/Auto-GPT +nav: + - Home: index.md + - Installation: installation.md + - Usage: usage.md + - Plugins: plugins.md + - Testing: testing.md + - Configuration: + - Search: configuration/search.md + - Memory: configuration/memory.md + - Voice: configuration/voice.md + - Image Generation: configuration/imagegen.md + + - Code of Conduct: ../CODE_OF_CONDUCT.md + - Contributing: ../CONTRIBUTING.md + - License: ../LICENSE + +theme: readthedocs \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 5d98e612..de550791 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,6 +33,8 @@ black isort gitpython==3.1.31 auto-gpt-plugin-template +mkdocs + # OpenAI and Generic plugins import openapi-python-client==0.13.4 From 1ffa9b2ebef915dfdb5d02185051ac1ae5f0ec64 Mon Sep 17 00:00:00 2001 From: lee <77390758+dharana77@users.noreply.github.com> Date: Sat, 22 Apr 2023 22:00:23 +0900 Subject: [PATCH 078/222] ci: selenium safari bug fixed ModuleNotFoundError: No module named 'selenium.webdriver.safari.options when install <=4.1.3 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index de550791..2d4624a1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ pinecone-client==2.2.1 redis orjson Pillow -selenium +selenium==4.1.4 webdriver-manager jsonschema tweepy From 894027f5f6ea7e418e8d5ec948041ad0a0feb9e9 Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sat, 22 Apr 2023 16:25:03 +0100 Subject: [PATCH 079/222] run black and isort on behalf of OP --- autogpt/speech/brian.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py index b25b2ee2..ffa4e51e 100644 --- a/autogpt/speech/brian.py +++ b/autogpt/speech/brian.py @@ -1,9 +1,12 @@ -import os -import requests import logging +import os + +import requests from playsound import playsound + from autogpt.speech.base import VoiceBase + class BrianSpeech(VoiceBase): """Brian speech module for autogpt""" @@ -20,7 +23,9 @@ class BrianSpeech(VoiceBase): Returns: bool: True if the request was successful, False otherwise """ - tts_url = f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" + tts_url = ( + f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" + ) response = requests.get(tts_url) if response.status_code == 200: @@ -30,5 +35,9 @@ class BrianSpeech(VoiceBase): os.remove("speech.mp3") return True else: - logging.error("Request failed with status code: %s, response content: %s", response.status_code, response.content) + logging.error( + "Request failed with status code: %s, response content: %s", + response.status_code, + response.content, + ) return False From ea26988a95f8d9853243dcd4cd0e413e52e24cbf Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sat, 22 Apr 2023 16:58:21 +0100 Subject: [PATCH 080/222] run black and isort on behalf of OP --- autogpt/app.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/autogpt/app.py b/autogpt/app.py index 5e037975..237feae2 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -124,7 +124,10 @@ def execute_command( shutdown() else: for command in prompt.commands: - if command_name == command["label"].lower() or command_name == command["name"].lower(): + if ( + command_name == command["label"].lower() + or command_name == command["name"].lower() + ): return command["function"](**arguments) return ( f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" From 8173e4d139f16619e79aad3f682537e2b6d8a99b Mon Sep 17 00:00:00 2001 From: riensen <3340218+riensen@users.noreply.github.com> Date: Sat, 22 Apr 2023 18:02:11 +0200 Subject: [PATCH 081/222] Fix: Mulitple plugins per zip for Auto-GPT-Plugins --- autogpt/plugins.py | 48 ++++++++++++++++++++------------------ tests/unit/test_plugins.py | 8 +++---- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/autogpt/plugins.py b/autogpt/plugins.py index 32e29acb..abc70446 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -18,26 +18,27 @@ from autogpt.config import Config from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin -def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]: +def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]: """ - Inspect a zipfile for a module. + Inspect a zipfile for a modules. Args: zip_path (str): Path to the zipfile. debug (bool, optional): Enable debug logging. Defaults to False. Returns: - Optional[str]: The name of the module if found, else None. + list[str]: The list of module names found or empty list if none were found. """ + result = [] with zipfile.ZipFile(zip_path, "r") as zfile: for name in zfile.namelist(): if name.endswith("__init__.py"): if debug: print(f"Found module '{name}' in the zipfile at: {name}") - return name + result.append(name) if debug: print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") - return None + return result def write_dict_to_json_file(data: dict, file_path: str) -> None: @@ -207,24 +208,25 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate # Generic plugins plugins_path_path = Path(cfg.plugins_dir) for plugin in plugins_path_path.glob("*.zip"): - if module := inspect_zip_for_module(str(plugin), debug): - plugin = Path(plugin) - module = Path(module) - if debug: - print(f"Plugin: {plugin} Module: {module}") - zipped_package = zipimporter(str(plugin)) - zipped_module = zipped_package.load_module(str(module.parent)) - for key in dir(zipped_module): - if key.startswith("__"): - continue - a_module = getattr(zipped_module, key) - a_keys = dir(a_module) - if ( - "_abc_impl" in a_keys - and a_module.__name__ != "AutoGPTPluginTemplate" - and denylist_allowlist_check(a_module.__name__, cfg) - ): - loaded_plugins.append(a_module()) + if moduleList := inspect_zip_for_modules(str(plugin), debug): + for module in moduleList: + plugin = Path(plugin) + module = Path(module) + if debug: + print(f"Plugin: {plugin} Module: {module}") + zipped_package = zipimporter(str(plugin)) + zipped_module = zipped_package.load_module(str(module.parent)) + for key in dir(zipped_module): + if key.startswith("__"): + continue + a_module = getattr(zipped_module, key) + a_keys = dir(a_module) + if ( + "_abc_impl" in a_keys + and a_module.__name__ != "AutoGPTPluginTemplate" + and denylist_allowlist_check(a_module.__name__, cfg) + ): + loaded_plugins.append(a_module()) # OpenAI plugins if cfg.plugins_openai: manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg) diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py index 7c71f69f..c8bda3cf 100644 --- a/tests/unit/test_plugins.py +++ b/tests/unit/test_plugins.py @@ -3,7 +3,7 @@ import pytest from autogpt.config import Config from autogpt.plugins import ( denylist_allowlist_check, - inspect_zip_for_module, + inspect_zip_for_modules, scan_plugins, ) @@ -13,9 +13,9 @@ PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__. PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/" -def test_inspect_zip_for_module(): - result = inspect_zip_for_module(str(f"{PLUGINS_TEST_DIR}/{PLUGIN_TEST_ZIP_FILE}")) - assert result == PLUGIN_TEST_INIT_PY +def test_inspect_zip_for_modules(): + result = inspect_zip_for_modules(str(f"{PLUGINS_TEST_DIR}/{PLUGIN_TEST_ZIP_FILE}")) + assert result == [PLUGIN_TEST_INIT_PY] @pytest.fixture From 996a3b331a999c65c7d3a84f924727691613d76b Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 23 Apr 2023 00:17:26 +0200 Subject: [PATCH 082/222] Add CI smoke test (#2461) --- .github/workflows/goal_oriented_tasks.yml | 37 ++ autogpt/config/config.py | 8 + requirements.txt | 2 + .../cassettes/write_file.vcr.yml | 616 ++++++++++++++++++ .../goal_oriented/goal_oriented_tasks.md | 10 + .../goal_oriented/test_write_file.py | 109 ++++ tests/integration/goal_oriented/vcr_helper.py | 43 ++ tests/smoke_test.py | 59 -- 8 files changed, 825 insertions(+), 59 deletions(-) create mode 100644 .github/workflows/goal_oriented_tasks.yml create mode 100644 tests/integration/goal_oriented/cassettes/write_file.vcr.yml create mode 100644 tests/integration/goal_oriented/goal_oriented_tasks.md create mode 100644 tests/integration/goal_oriented/test_write_file.py create mode 100644 tests/integration/goal_oriented/vcr_helper.py delete mode 100644 tests/smoke_test.py diff --git a/.github/workflows/goal_oriented_tasks.yml b/.github/workflows/goal_oriented_tasks.yml new file mode 100644 index 00000000..816b64ff --- /dev/null +++ b/.github/workflows/goal_oriented_tasks.yml @@ -0,0 +1,37 @@ +name: Goal Oriented Tasks + +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + matrix: + python-version: ['3.10'] + + steps: + - name: Check out repository + uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Generate coverage report + run: | + coverage run --source=autogpt -m pytest -s -k tests/integration/goal_oriented + env: + OPENAI_API_KEY: 'dummy_api_key' diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 9a84706e..7ae362a3 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -255,6 +255,14 @@ class Config(metaclass=Singleton): """Set the plugins value.""" self.plugins = value + def set_temperature(self, value: int) -> None: + """Set the temperature value.""" + self.temperature = value + + def set_memory_backend(self, value: int) -> None: + """Set the temperature value.""" + self.memory_backend = value + def check_openai_api_key() -> None: """Check if the OpenAI API key is set in config.py or as an environment variable.""" diff --git a/requirements.txt b/requirements.txt index 2d4624a1..4932ecc3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,3 +49,5 @@ pytest-benchmark pytest-cov pytest-integration pytest-mock +vcrpy +pytest-vcr diff --git a/tests/integration/goal_oriented/cassettes/write_file.vcr.yml b/tests/integration/goal_oriented/cassettes/write_file.vcr.yml new file mode 100644 index 00000000..ec31a7d6 --- /dev/null +++ b/tests/integration/goal_oriented/cassettes/write_file.vcr.yml @@ -0,0 +1,616 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search + Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete + GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: + \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", + \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. + message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \n13. Task Complete + (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can beparsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past:\n\n\n"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0.0, "max_tokens": 3210}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3531' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7yTT4/TMBDF73yK0Vx6cas21bbZ3Lgg9oIEAi2CoMpNpomp4wn2RC2q8t1R/nS7 + G8QJaa/zZt772R5f0OSYYFZqyarazrfx22/H08auyG0fo3fm6zr6dfcpjxv6cPiICnn/kzIZJxYZ + V7UlMexQYeZJC+WYrDZxtIpXm3WksOKcLCZY1DJfL+7m0vg9z5fr5QoVNkEXhMkFa89VLTvhI7mA + yTbeKrx5P9WjZaRQWLR9Kt3H963CrGSTUcDk+wUrCldbz5YwQR2CCaKddJDshFx3gEvqAABSlJKb + opSQYgJjcRToLF0xxQc4GWuhCQRSEpy8EdoJ7w7GEmRcVdrlIDwIMHtP1jI8srf5DIwTBg19q9MV + 5TArO3136vSFnGW2SFE9D/akAzvjiiH9c0lQsLZgwn+EKOgh/4nfmZcEFQcBXdeea2+03PQDe5Cy + 69LhOCWurXYD7By+vNItZd6IyUyorm/kiHp/cqHxHYMWeABdQROMK3qmjL2nTG4546Vo6eW+bALs + qRvoOIXcdcj+nhKEmvTxtTZkCG7VdW1H07+2tnMYmF5ATNC1L6YLPwhd781iwvHCZPpJnp3pStsT + j+Cpa7FVeDDOhHI3rDgmGIRrVGhcTmdMlu2P9s0fAAAA//8DAKiDWQmUBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bbf4f1f88c715ca-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 22 Apr 2023 16:40:46 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '14187' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - dd915979acb69f7fba7d79dc709e4440 + status: + code: 200 + message: OK +- request: + body: '{"input": ["Assistant Reply: {\n \"thoughts\": {\n \"text\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write + ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file + command is the most appropriate command for this task.\",\n \"plan\": + \"- Use the write_to_file command to write ''Hello World'' into a file named + ''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I am + using the correct file name and that the file is being written correctly.\",\n \"speak\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": + \"Hello World\"\n }\n }\n} \nResult: Command write_to_file returned: + File written to successfully. \nHuman Feedback: "], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1071' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6WQ+6TLPn/fspnjy3zhtW6eK5Y5NdWgUVJ5MJKC4gsnYDfXK++0T/J2dybkyA + DqabqvotVf/xr7/++rvJy+I6/v3PX3+/X8P49//63rtlY/b3P3/973/99ddff/3H7/d/rCzqvLjd + Xp/Hb/nv4etzK+a///mL/+87/3/RP3/9fciYTRYqV4yVY1SjUT1e6fYA557e/M8AY3Z/0W3Kezmf + nNe8Au1Rot4Oa+WwuT8AqkqgdPtOxn6hp1uBRmSKpLoYSjzJU7dbv2lEsQnGHs2pJe/QEqgrvLnD + sx81xl2Ro+ZbIo1ulS/+zpvAGBuTXvR2iknMzzJcL8oem9aT5ONe+qyQIXsR3VApzNnILgQeXnal + 7rSN89naRAD1m99SfdBe8cJv2gQ29LPC2iO95sNTEm31u19shh2L59Z9LOrz7u/o1k3eQb8f+AbK + WhtxZhEuX/bLo1H19hRGS62xuM/bdYr686nF+i1bgu77fpTy6z0O0/M2WGxjJ6NZiM5YK4ZXPDXZ + 0YBYIDlR1VTNi8u+1dZpltrYPKp9Xr7C9ISMVpOw1toKI0FYhurV9km0aNIbTcomTWB4iDrWHuqR + LS7KCarifKHOZ3cox9cVXeGsKRR7Kzvsh+vYL8rtltzxpYmnnGFV89Vtu+siEJ9NyTaaeQV+yvrf + eccT5yk8+PL7iQ0a9zETRCIj3mNbap+eQ9zobjso5i0hNHusakTfOnTg2GzC9sXjzPa0TmuQ6rzA + +PL49AsqZlF9PYmII+7Uo3kO1BU4/Emj4fvaoYHYfgKXOOBoiCW3F6bnvEParYwirtYCJm28Cw/5 + bdtRcxi0crm+piuEqW/QTf42cnG6bg5KLR18irtzYIroWF6hTJMYe+V7X3Yv0Fao0Jsz3gV0QVNe + vCu4APQU+8cb+hNf7V728Zl3TvlQ5V0CphO/aXj0H2zitOoKnankEXqIbUmO2qVGlSLL2FAOt3x2 + TkW6tvjXg3BJRRClqzlRgwGlWDP1JGays0pBuG1zujX1ATXuJKdwzKo6miNLQ8LzHDQomZjyPe8g + lmYJQhj79T1678cdEtDovIBMrMfBWjwEorQaXBB0sYmWLZQlMcU5hHo6ptGcbd9smt24hnd4Eqh5 + CN7l4vkXQLEw5DhMwe4HTqsKlYuaEIfnnA+mQ75LQU3qJ9aeLxxPUrdcVSKRGw4MxQ3mi74nXJi6 + Bg3O4RK8hSGoIT6IT2xyt9ScnYb5cD9abxzpFPrhvt9pan2736hm1ls2dxs/BGSzA41YrgdTzL2j + 3/po5k4sJ/NTtYHaIo+dZn4HkxFdHtAckErt+aMhejztyFpYMqBuqznBIu7dlZIk/IK94KkzsZ7H + g3Le6XbEm7wUz8LqcQIpUZ/4WBIn7yK4d7A52jyN4uBSNmm8WcHhnh2oO3thMKvyxwdbOLOI8zQz + np7x86DYjrPB2nb3zllCtwoSL7JB8WZVlMxS4hqOn8uZ8EmfB4uiFuJ/5X8yxKXg3IflT71Tqmno + CbmAhsx7k2AfB37OLscgReujfaIW70aM1/eyDWVJnWg98q9+OQdzpGpRJkXSRxjMsRRcA4pWQtTu + KTbHM+emYN30C/ayw7ofoyw9wC9+vFknbJqy6QSxrOlUC+gBsb7oBzDVtYE1bXoG85LxPoTlQ6Ru + 6pX95ItNCCei8ThSMhawiOgr2EsoxNu6eJTLruyuiHfuH2qM1YoN3ZvvQOvOHHV4R4wX2iwhBFMH + 2DtJcbBcnW2CjINK6AbIZDKF6q7yPR/q8/05n+6u3oF+w2vCb5QybxnTCOC9fKGXUpPNxbdRijTB + OGK3ybf9JDRtCM2tOhBpk8xxfQsFH+1zK8c55jaMzbrRwNHAEvV9PPdjs+4e8Gl3OVmvejvolNcu + VRciyNQTX89gaUPTB/XOJdh5reOcTNKUqp4QxOSXb6J/SX2lPOz9aJKUTUA/22QHDp9o9LCFsp9W + ClS/eKURXo0xOQfrUPEGe0MNQmbEQHlM6om7znSLBjPnj7e2Qyudq+kmeljl3KxfL/Ux7E54c7yL + 5byMqQKobcPovT3N5TBzGxfMe5dEzDkjczpbeEFGn9zo8RE/zWkWxAXtjsnnVw/7xkD8ABuzkyLF + uAWIfz8zH33rD1k0aYMY4uUVGHIQYXsbJfl036cGWsWX8r/ip5muLmyPpYPDWq/yqajrBu5m15B2 + WyFzFq00Qt9r+l1fUr/WZLVqrygK0e0ZM+njWrCuXzHGvnKOx/pwn5StMnzw5hyVwdyKwQraIU+o + 4/W7ctSYVIB6OCBsH+GA2LJzReVbD+g5+nj5fC/cCeybG5G1+NLN6VhPBixS1GBrqMp8QoHro23N + WeTNv3uTvoNAUyB82oQj3sRmvs4nZJHbCW9vk2EKS2U8IH6uZBxMZWay2b9oIAqSj7XsWAZLe9w/ + VFAYH3HpiS+XCM4d9Hr8wMF7ecWkPe5fsNhdQuaiepuTO8mZ+s3HSAkmk/EZ/qQg3J86xp5W5oOt + 0BRt5YseqWu9MJtM2RH1yzciJRWeaFBAl5XVcfSoqTy5uNUNdYDq2vi44E4Bmz7JO4Sx/awjaA5n + NsPm6qPrgbtTa0JGP3vPlYEUx/6Q1j8TNP/w9psf1En3DzZZu9cCwSGNsK09joh+4xEumTXj9Pp4 + sflw8QEOQUejz2bdIaKRyQXQ3ZgG+nQ0mSH7J6SWRKa62PBs7vWyg1DaXXChfu750PfDFYkXxaD4 + 6io5aY+XFxpnaPGB133zx78Qq1GMQw47wXw3jh1a2keEtT7TkJihdwcoewj4kuj1l6/tB9ic04xe + 0U2PF3fZnGB/GAe8CefUpKvqXcCS2ntCY/MaT1yshwDMreiByhVip62foI7kb9IGzycahZcQgZOV + MY2k5RH/8A+MKu9xaJKypD++80rkDU20lV8yuGxE9dqfImoGJIuZnaYGOG3fRUzvn/lczc8CSfp2 + FT064ROzxV4KCMuXGPEvpUfVEpkp7EJOouGzewfUW4k2fOMR++e1yxp5LTfom8/UE1Jikpm/8LCh + 7xU1qGwhMTjOFVqlBsXOl3/RL9+CcnrK1ItIZPKleZtg1NMjNnXbQWx7Nxe42MeIdNePU37zwQUz + 110aXO9+PB2TfAfBsnphu0lcxq+5poA5jGOCHvud2dXNSYHSqiZ6HZkfTPJa7pB4rq4E8AEH85S+ + FGWKlIhqrNywebVjC3jp6UX/1AtYpBNqO4fQYAgzNLjTlKmm9wlouGN6L9gXHsB09m/q1P3FJLKV + GKoUyHEkq1JXfvlJpdyVw4a6UnbP2X7gO1jaV/TN11fAnGQuVGB+hc1jumcT5y08XG2X/PhkSfXl + 8EC912B6+eLd8uV3v/zEDptENn3rLXz6Wsf6c97nvRXKGaAgK7CtBmE8B9ySgVKHKyLxA0aTV+Qd + snDCUW/kjXLO3vUDbqrMsMkXdfzFn+gXP1SvR98U+NsmUypdFSLFx/ueJfJ1B03D2djc14M5EdtI + 1MgpUzKF9iFn6XM6ATyHDz2BfokX1+p8ZMkHE/slzszpuplS2BwtnmLTnfs5Np0OUuukElQ6gMa6 + gpdsT5pH7wI6ffmvXECpCVuq+W+ODc5TiWAtrXa/eAmW6+VlQyV/DtT9nveMDp6N3m1QU+cUJGj8 + MGUAP4iu2OvJn/ddoYGhjV4f387F230vQ9PvBmzt1sec17l8hyo1KrDpnFEwdPIlBSfIABunjdsT + Idj7MATdA2/FYDbJ8hkUMLvgTuCD5HxWA3OFnm2bU8/xX+XMtVoCe+FT46i3dNZyli1C9tyVeKdN + ekDaMPBRRy5vGt7iN1o8SfXhU/U4atc+Q+2nq07AZaZGN1vUBxPQBFSiO1okSnISEGw4Lzi1lwZr + neDk43OrHtCkCCeSwiiV9JFzPuzqqYrWafruJ6SvJ7gf7Te2fVVjgqe2LjRUw0Scbls2P9smg+sx + 3mBvugr5lET9Fbn1aU+Dh9j20/7Y1Ci/4Y4Gi6SZkvJeFmhC90gjK3uZ87J/L8hvbz62jju+H/HD + tdGSWnvqRAZF85F9+cKh0fFtufTxUhwqUO8N7/7wOJ73EgU4CpuZRgo39ENwPj1AfV4O2Hixjzk4 + 2vkAXBucqUNXszlJnXJVKu5lUetiZDGJIriCOUVt9Mr1Zz6P94emWo9JpUlRDuVCF7cG1j/u9Eqo + wWjzfoggHrgd3QQCZ85KeNlBRI0Cu7M3/PicDzlpP1/8XtD4bnIbJDJt8S20OzR/+QRy2raLyi0e + UHVySAL87TGR5RVt+1EYggpWdrVg97z1S3FXdgUU2Vhho91y5TNkmwKln1Aiwmapg8m+GimarlXz + 5f+fYPKClYJqflXhQGzfiKK+qxCZ5p7qO2XL3uig20Dqw4NIaJ2zWZxRCltV21L/Fj6DiQaKixpP + pBE8N0H5OSCFh0dAjAh2TC8F951UqrKGJ5HWjdvPNy9bIHNuInbuRYgkDhYDxFa74GA8bdE0atkD + 6JELqLZaSwGL8ncEiwIBtg/+aC6vrDbgpioMB4/9ZDZekTfo6PltdJeuLhpCWtrgnTSgQVu5wcRi + FCranefxHlVj/4fPj+17Tfi15ZRtn7gZSLnWffmgbnbKK03hIuprbK2tT7lkaOwAlJmnZri5oJ5j + fQd5MI9YS7EUL178OKnTWb1GL9Z1JXMpJ8P5Nt4J2NXU00sgGPDzU2w1GPL5qiQvldcGjdq++mCM + Ci9Qy6mUqS57Tj6rSXaCaFkMarDNPlhOVZ0Buvs6DdWHUk4xN4aQ38sd9RrZQ5OBYICFRwbdaoFm + jknV1fD1e6LqfHrkS6XLPApDkxLxzlf5rMrUB/vmR9jdKktJxL22UqJnsqFB4vbmON4b7ccXqWlq + OZoC5jUQRE1H1kbkIcIJs41WlfQg6GKZbEALeoBA2pSsv3g8nO+K8sNPHH733+ftnMJXL9PixlXB + 3CG2KC/dfxEl0qV4OcGygvlGd9gtZtEcDms1ResncgiThU2/HKXAhSssATWS6hQPD6u0UJtxZxpG + 47GfvECUIa4OBf7WbzTHhbLASv6YRPzq3cVIQYSrKbk/vtK/3+FnhSr5faAGX5v9LPmuAWg7WPhi + idDPBTbqX72j1iH7lNPP77HMVUg38nnHmmnfAprs5x0fo4fVi9KA09/3xa+9R9GIRucB9XRO8Xa5 + 9Dm5XjoL0vie4KhZnQK2bi8nlH4iidSDTfrFi5sEPsqQYi20l3hsuSxBXT0dqSOIBhKUNRRgHDhC + rbXl9MKat5o/+Od9/adl61kdLFLY4BPPjyYtLqMCjcAkajm1an7r8YLKRSqoeXpZaNH7SwXHqxx+ + 9WeB2Ncfg59+CgfrVi7pOkmhvjOTWsddUs4vuovUH576X39qCh6VDPxC9ji6CnMw0gOyIUnEhboG + 0krRmL0M4gP/xD/+K3h9mMLjcOuwNvPrePz5k5HHr8jJwEa8uFv5gVIFsm++NiaTDLuGWQjP+EaM + ISCNHZ/AlZaIrOe6yQkTews8LwrI8/Ep8rkxphV8/UCyOr9k1Dkr5QAa4QLq5M62FJSptoDF3oTN + hKWx5GbPAzqQVxtNhdOw+Ycfd2zmv++L+FE7vNDXP4ykezGwgfMWEXz7s6M/fF4O8WkFw826UWs6 + tSYd8q2/5lrvjJ2PEAZk9AURVvLbpJssp/nS2HGiSgn3JNHWfZRLojyHn94n9fx5oCVR2gFOxOCj + VWAk5Sjb7RW8rHG/+Z6VJcxZqgB30Yjk2eegvdxFgtq0nSP1nWx79voUk/KqjOuP75kj9Qbyi//o + tWqv/aIR2Qe3TvY0Rv0hXx7TM0EON7Dffspl53An5HlhgK2V9sinU3m5Iv65SzD+6jca6DhbR6Ff + UZ0tHzYRQDLaxbqO3bxqEbHXxhXdosj96lUpZ31RDsi33zvs2e82WGa1iRDNqjPdXaa2n/qLM4Ci + 60UkP6RX0K9s7arOq30Vseb4zFm9Kgzl5/ewhKW5YF8A0NCuI2wGRInbMh5t+GSuRa9XEuYyF3sh + fPERB1WDf/XNhT08UhzFwbpvzcdiwVePYOdq2iX5kGhAX75HYNWN/bz3w2Ud716Iuu1bK6cNz1XK + xBUviletYLKrxclAwjvDRriVgkkXQxltLkSLutwWTHq9WQVKpGEg8ufOlzPVTglC6RbT28vR488t + Fi3YVcUqmlfzmo2THbmQNNWRWitNi2fdEAicF6RRe9w9S6aV1wQ8qfjiT6ub0+aU1aA9dws99M93 + P512MMBeWodYnwcwR/14KNC3vkTzpQnzRXh8FtBMw6JRWH7Mb3wsqJSuXYT0STDnFLXiT88RwnLd + nAgbX0DL9k14mu3MqaOmBt/6TO1PShHrZvcKJz20qPPFf2J+kAHFEDyp3w7vmHpnn6DumKTYcupb + MJzvi6Iuy+eKNa7Y9VMdqSn6+q9fvRaZQvPZLBBa2Z1iq7WCZrFUDcFNNWnYj0VAGjWU0defJuSx + k0pmhXIKSjLrdKOJY04HcdoBO95ORLo2WvDH7/76E3SzWWyT7B+mDNYx87CbcVPc7dvsgN75PcTa + OV7FrEuRAjapAuysKj0WLDodfn4GdfiBspm1SwK7eqlwhKo6WLbih4fmVh+IdtXVYAnNqwgF/9pH + PKfw8R9/7SUUFzJrXGnSWH8R1W/vfjR9453f8FL9px5tvKpn86XdWKi0lRPVr0LaL+WyVOrz7u6w + vzZO5ZJOkaHkxzqj2ntNShpVrEJ6vDexlfQomI0q7WD38lJqff0+Fkm7RE10B2NT3g0B4/PmiuRW + GKgj7NuYilAMcLinB/rzX9hxFVpwmqeUCM8mLyeJd0X45hs24dPnU1VNBE12eY+WTfQOFnnmIpSl + a4adL38TT7aXKeHtNvz8mXgkSl38+gfRXI9+wKpNHsGqLQZSPwITTSenPqE0y2zC8fqrp18/BM61 + hQmH92I5+eOFV0giljjccJLZfvtPP7+VKM6l6idS0RP6+ZF2HQGblFJNZFcoGhy+uHvPFjKKYAhC + gz0lqdm0skwDjshVcCEtj/w97FoeSFPZ+BtvJru9+AM4j/pGdTfEMfVWKxv2wrum+HbY5uzyaXyk + 64JNf/0m8kxWIQrtYYMDnA0x9a0REDveT9Qt3WM+8a91Crp9PZCpcFw2uWkpAuh+jI1TZwRMoZ4L + V9nl6Kba1Ghqspvx4490+3KeOT8U3AP9+k/OWdFzsbYMCz4ucSLh68d9+3sHFAg7N3q73CGfDff9 + +tM/0/BT7udNsvVBIsuWbh31XXYXzgphTd+PSJrvZjnPgbD60x94v7kMzavMcNUjSTzsVq3DvvHU + gFXzNtXctW3y69Ek8NNTx1PrlUuTyjYoJUrpNuytWHg3saUUKJaxRTmv/O0PRbf6Sc3P6skWacAZ + fOOfmrr9+VMv0EdOF+xskn3+1V8rCHy9+OGDyS6BqoEinI+ktx0+JsdiVYOdiQ3VlNeZLev4EEFX + XSkZrt6BjQGnpLDT3xZZ7ZQ6nsfTpQN7Mjwcfa+Hn9/61cOEhOUnmNdPqiDEDUfqn6jww1sCPz8O + X90sXqzxxcNP//jOpSpZ3BY2eGnywoGsrPOB3fQKoO9veBu903zcJo9CvSrFG9s9pea8csYU3r6O + //RXJKy6PhKijYWNX/44Qq9AwpwH9Y5IiefnVt2BPjl8NGmB0//8/HV/1E0cRqNQsvlOG3RUi0dU + 1KPRDwHzOji2RKUbbR+jCZPbAj+9YJf6rhzxarTAUS/bSOo/e/bHP/727776OOkl17UesNYThi1/ + /+rnV62L6mHA5q9/VdIff5AOLcKXb/9nHi5TqL4O1R1vyWdTCoOTF/ARN2EkHCQTLYviNVD10xP7 + 8avtWSkmK6jLx4c8v3pxSj2phpYMSgQcr/XD189VN+csw4E9jGhC7dtXvn4d6YvpVrKTyUBdtdcB + 2xGt+4WHg6Lyd/6Et/FzDuabKAAEymehP3089fzCQ0ZWQMMpv7Lp3TUvyDSCsZ898775PWfX1MVu + lols/umh7/uJpKVpOfuKUUEpBBaNIl7Jf/4QEu6lTlbBhwSkW9cn+PSV/kd/jddwPv3whmql+Swn + 4WQVUOmcEC1fPGZPNbNhXuSZJnshCtjTbh6QL5eAbo8Xg82taK7Uv39TAf/5r7/++j+/CYO6uRXv + 72DAWMzjv/97VODf2S37N8+L/6bin0kEMmSP4u9//msI4e+2b+p2/L9jUxWf4e9//hJF+c+8wd9j + M2bv//HgX9//+89//T8AAAD//wMAAQU0SOUgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bbf4f924e7a15ca-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 22 Apr 2023 16:40:50 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '96' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - de62efcb5fa7139d5deab0536eca1086 + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command + to use, and respond using the format specified above:''}, {''role'': ''assistant'', + ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I will use the + write_to_file command to write \\''Hello World\\'' into a file named \\''hello_world.txt\\''.\",\\n \"reasoning\": + \"The goal is to write \\''Hello World\\'' into a file named \\''hello_world.txt\\'', + and the write_to_file command is the most appropriate command for this task.\",\\n \"plan\": + \"- Use the write_to_file command to write \\''Hello World\\'' into a file named + \\''hello_world.txt\\''.\",\\n \"criticism\": \"I need to ensure that + I am using the correct file name and that the file is being written correctly.\",\\n \"speak\": + \"I will use the write_to_file command to write \\''Hello World\\'' into a file + named \\''hello_world.txt\\''.\"\\n },\\n \"command\": {\\n \"name\": + \"write_to_file\",\\n \"args\": {\\n \"filename\": \"hello_world.txt\",\\n \"text\": + \"Hello World\"\\n }\\n }\\n}''}, {''role'': ''system'', ''content'': + ''Command write_to_file returned: File written to successfully.''}]"], "model": + "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1258' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6Ww+yyrbl+/4VK+vV3hFRqWK9cb9ThYCInU5HEBEQkVsV1Mn57x39Tk7nvJhw + swhzzjHHGLP+419//fV3l9VFPv39z19/v6px+vt/fc/db9Pt73/++t//+uuvv/76j9/v/7izaLPi + fq/e5e/238XqfS+Wv//5i/vvM///pn/++lsX5wuxXVNi+8eNVdCIxQb77dVz6a2dYvi8BAnR3QsZ + aH88GnA4vUNiQHhmAxJvEsy0sSZFeS6y+fQKePiajBVt9f1YM+XWUXCQjAzLZVtl7F7NASyb+oS1 + bVsNbF8wCW4l2yRy8bEyeoJyAMsjVoiJbs2wtlu/FDrZTLD+ST/uMn9IAKb36JKrXutgiZQsOSqe + fJqFnbu6s6M7JTSfmjcfz/HKpqb1cuGh7xC2Ba0eaBeHG9EKXAlrgXmr6X159qIy9CWR949XxGrq + l9ATBh+b70AeeG5kFmQkKdHCTyYg79jqBP6ZpjhTAsX9zJtSgtVsLVirLi1gR+3qgdZ1DKxzdB/R + E/kI0A7PPZE8c8kGGYQBDE+BioJRb+pVyFwIHoKDkKAaR3cYkroRfaXYzUAumoFwMu4Bbo53xFHd + Z2SfDslBVQuLyHz8cftaEWL47CIeGw9NVZnu5B2YW7rF4WMKBnbf3RNIs3lG/HgdInY6vgJ463wH + Mf6wGYZdO1G4dR/neXNOZrZI26wDWtAEJMU6Uhc+8GOQ6rQiuW/dh3UfmD3Y7PBnpvqS1uuh2swg + 2RwMHAVvP6KCNlSQFlyDw8tt47LH00VQ8sn2T3zX6JNw0J/SkNzL3eKuu22PQJEmZzTJHxAt25k2 + MHlrD3IdsjRab6JPgUnKAHHDNc/4/hMk0MtfR+KGN5/tt1s5EUi3/aBtIh+iQXfiHjKzPOJ4FZaM + eafGg7di9ybyRQgA3Zgoh5kRFDh61snwuYpuDs77R4B224MfjSfyFOAh8k7YRq8smunlY0EMXBv7 + jz4eWOe+QxhFxUQ8pD7BIheVJXYx9rA6aW694ltewuOEKqKeV3ugHXIDuP1UH6Lu+9yllHsGcHai + FjuzmjE+MJwUmub4we7l1bv0Gm1CuDvzLpbjiAAKVSv/Ez9wi581iZ0lFpWHbaIDGknW328Lgjlc + rVmMju96iWfZgtslPmEng4k7WvVrFj/rESPeXJ/uYlN8E25LO2FUnjfR8nKrTryGQj8vpybNPuPj + 1G5VztgTu3bLaNEslMBrU7ywm78fbDEjp4evZGcgrn0XgEoi7qAaPzri34xrvcLHoYGrusYEcUDN + Vk1Temi1txG94zc/sNL43OD5rvk4CtA80Ft57uA2CkXiCGRy2YndW4iqmc18r67f+tcrEFy9OzrY + FhfR1U1usAbBETHxzavL83nhQdnqHo41Mkb0vGmQiFSzQYdzZjDC5akBZJt/E0dI5mi875cETA+h + xoocpUP3BrkjPIeNSbRtq9SLIWUtuG0Kft5orImWQXIdIJCrh5ZGy8FyiBcI5aJKUTcZZr1P/TiE + s5/lM9ufxGw6vp8KDCYBYX/D9xlViI9AfbBV4mT1PmL8dixBDjwfa1SfAOPHXSHyqbLD9i40VHbc + 2AXs6Pwixrdeh+igePAQUw9bavxUR90oISh09MDKs+/rWb/bN1i8F4FITp24a7TbeHB6zy72i8ch + Wx8pqSBwhRd23LXPxtQuGyhNF4T4+OwCRl7TQeB5zcboAR/RKryHGHzzk9hp/lHXdKWGiG7AJTI4 + aCpn46wBIgEW9lrzGdFd3qeQ1lVCFDk61NS4pgos3x+TaN96pSSrKngqtSMxrkcDMJ+uvTisNCcZ + kPdsefdHDVy9XsQOjLqMttU5hVL6uWN7hVM2trhNYPaONlj3Kk9dHP6hwUC+8cQVuqNLqAZTuHlR + jcj97Zyxw/5IIcofBpEVr3c/c/1oQcf4DBtukA3Lxg5zscPPG/ZK5z2sMfQP4B1KFUb3zzNba0VI + 4KNcR+L7cAJ0vewo7B/pmySPuwwWZV8KMMtLB6smyAAbageCogd0vhpnNePbrV8BT2TqLMDxra6X + 65KIr4MUYMlwLtGf/Pdn6BPfSkNA7/slhu/jaSbqpA31Uk1bBNpynxKkv2TArqWbAOfZWERlt8Zd + xnD1IFHDFJvl08l2Tz5ohfnsa9hQUOuS8728icmwBtie63mYiWuvkGz9M1qLios+zRhbEJxDC+vO + a3WZvHMamD/5AQHDBBH19YcFaNt7RLkwCDqnUjrxatcm2gEDM7bSyoPXd5thKdQGl4DnGULzGjRo + k+pGxuvJcYYxznpiapqnzu9Ui4XculsY6a8nYHcwSfD7/UkSnwcw+m7LwWbMVWwI/ib60M5DwB/4 + C9aM6J0tD6daoci1Atq6mVkvbepIYHfmXCxV73PNuhMPAbzkK/aXFNTjrX0loDksKzY3QIl2lVA0 + wnHo3tiZ+rvLVlFuxHGersQ7SlrNfcsCiGpGsWUlCaCiQBRQzI2MeAW1KgvDxhCrTAK4UPHnF89Y + 9B4TweZzbIdlfFwbyKr8gg2nKNzZ20uKqOqKSvyONGy6h/cS9jvziD7CXYnYSnsPBouk4/w1V8OE + t4cEvNJMxsZpG6lLxRkH6Kfzk9hffrNvFNiD43YRiYUiOtDgEyLIV5uIGH2+ZOR7XQh6mhLPehhs + bLaSADova7AsMzdazsM+hyG/jbEjnc71WPluCo3tUSYoi6zhh4/wY1Ue0bhirJf4vRSQ8vqCbdcs + 2ciDAwc/BjwRmbyWoZNCqxA27DPj4hvPke01BG/F/o2tpFXdpSi4A3Q1u8bu4muAPidOAEowqTiS + 7s+BerGEIBDEK0HUfrHJ1iUkbgetwSbfEcZi55hAZejKuWrzTl0OqA+Fj9spJMvfD8Be52GE/L7T + sKt8SrB0O7TCvXI9EJV/PNhyfJ5iuIQvB98n4amu33jA2ukdYtDVcRdpG/Wikx8Sot+LczYz2U9g + Vmg6cQ0zywgZdi2wdL+a14OD1d33/b49LSJqN3iA6tI1+C8+FH/CejWndoWieqWIe4MnmwKx7eBb + 2H2I3sPGnfdDoIlGZ96IWVruwMWrxwvKZk2wpjaELR9Q5PDqmgpx32THWC+OFJ6SKcFW4jcRvdDn + CrfvzTzvS8ut50tpS+CXHz+8YAwfZkHxX943v0V1dZt0AyStNEkeRlc2GEGTQN7IBRKVqZDNNb6O + UIzFAW3OxB9IKdgbmDfwSfxg0COiO3EHRpa5X/6pDcz81AYc3vyGeKVjDmvCRY7YoJQQR33sAR2D + 2QMm+7zRtunVel25SYDAz8uZZUlTf7+/BHsvXGYQU2NYlMEuIZKfn3mzm0qw8BV3E58pd8IXc/DV + FZejB9gC4Pw4d8eBbLd2DB97KZ0390Cp9/7eSSCWVhOra9fWDPvbAh6Vk45//XL51hs425sLlvnp + zSZPjgr4rW/0bDQI1rfbbCC8KhMxjxczYmt/q4DIWzusxK9bvQbHavNbn0hffs4OS28AYdqdiHxU + y2E5vwiFr+fazH0+R2Amh1sHTwwR4l7tV7b4sVICkRwtkl8zqtKHVVCQiIlLjNfprP7qH0j0rv+p + J9bplxZc4neKJjnIhzUH/Qh+/SwL2Vizyfo4cA51NMMvXs34lldQC9qA+F/+/DQ9YfPDF6Jyr7Ke + TcbnMGVRRuxxy4Y/fJ8YN/TDX7baa5YKZNQf2BarwuWeUqMA2Dcdlj6J7/Jf/gCGxXvi8Gaf6+Us + HQJI+rrBbigTt8eT2wLLWO5Y//WDUjrNsL29B7QWgLlsH0Y9MFXpQL4w746gPm7AVeRMfP2uN56U + q/Lr1zgUZ8d9W9gOhePBkYhi7NnA4s1JEJvLGs7b4C5mpDSeN3gat8offjm3hx0nXvCxxW5jcaw7 + a2ADzkVvYenqPTP242O/59mzTuqFMauCn+clnmnjUvD9nhY8zZU477/x53R25OBk1PLsW8tS010l + GDBk+wJbB33vrtb1Ww/c1phZ/pTd/WkLN5AeV4WoX/7JpDPnwJ3s3OYt+4Tgy39bMLVOgP2bcRwW + VH4EKBwoIX57Hd2lPa2xqET8EavTvf/mX1iKIbI8NDRCGDF3s9PAx9iciPmpt2Ah1o5Cw3ch9g5v + nS3SCgPQHvrtL98iioqzAOP+YhHvi0c0Y9oIC8X4YBkKV8Asa2ng/uY/yH3D99HyFjsHljA5EKPf + BSp534IE7mTrRs7OJmDrdNW8P/H+6hvGLtw5B5xwmNAReXAYO3kQoH0fDlhL7swdC6fMwe6SGj9+ + zLptGx+gvdkrSMjqfTb/9OnZuuXYOyBT3SvIvsGq4muM8JVjJD7JCvj2k3kz3ONs7mupA8A9vDBu + enXgLOobwuUT2ViP1ac7vrtJA1//Yd7OBT/UPz0mPu8a/vXzXSUkDdzhtpiPqj+59Pt+ULjaJYJX + 5RRRBYwNaDDs59X2vWjHjoEn6iEViH07TOyzze0KJhvBQIcNhzKO7TUPRJvUwVYX9+7y3gc5/Op/ + giaZDL9+B358S7/vOZXeynsHJBwJWOZjW2XPa5DDKlMA2rdFoy4eBxVotemIDVNShwUsmxEYEo+w + by2ngXH+W4E0G2cETwJQ6eKXGvS324lIY1FHjBP2B4hmVcZaRfWaeRcgQHS9BOgjpXY92y+owMbj + OIxnfc+WW3dMYHaDGGOqLGwxGGeJPOHzebVb111+/sj6ebyIG1/eYDlzAS8ey+RKdJeiH38vxfzJ + DUS/68mw7vTj5oenBD1NAKbyc0Twx08DiGnExs+tg09qFkSi+8qlswVyGJ8/lx/+ubQzQQst2Kcz + JyjvYdpWVwQUEWVom3lcPV74YwGro4GJxJ3zmlLZmuEBqum8ouUdjetltwpz9LGJ90htMBZaSmES + cAxx942lrsIEFOAr+Y78+B9D9jEHg3t+EfcuFTXdJbcZCtVTxeZeRvVaLnN4SGihkO/xML37xYB+ + WdrElQut5i1jasFoFj52x8yMaMq9KTSrzsRufDHBXExlII76ISROFSKwlPcTB/tYu5JzaLzdMTvG + Kex3+hG7S6sOe9F3eSCamkwQowZbpkOQiIoFsvmL//VsPPEN2m85m5eLQNloRFYC/bKysaRmW7A6 + m3WFIpO2RA8N0yXYfFPoCkpBnDkX6hm34AAn4ynjMN0cM7peRAqtwJbwNIhvl03owMF5KCi2Z4cA + Khw0DR4uBwUXD7jNyLk+GMK1yV9zzvcumN0m/fL7sEHAu3PutPp9Cr96hnzxH3APq1ihsqEJQevm + k613y7FANHIY7T+wGFghwxU+tFOEQ0XnXfLemjzwz+1AtOQeqSvgnBF+8ZI4+i6o6Xvn5cff+6P1 + 5dRj0ig3yNmknufX8Rqt9mOMIUQmJoahPyLqZOtGdJVX9/VrQLbIaZgD+4EZGv0nzPpYOfdQeaUc + wW72rtdxShJ4fuIGbf30We89Ny7F99lKEVW5Tf3QRRGBSeBlxLoQuWM8iBbUnW1I7Ptbj/aW8hTg + MFYE//JpPXmhBmldJuSbHxm/tXMK99lyw06/kdxJEFLjT7+QmgbXHTgpAixz8iHe8sD1fshZAKdI + uWFcG1a0g5dYgDvRvKC9Gc4R7W59D7DfB0QKNdflXqoBob4NQqwG7ymb4Mn3wKgLIZFiVg3MefEJ + NFXlMD999zhMsgsM8Lh3O6yLqs3o1UwEaN1l++svuBlfvINGdErFx/jGzoxxstn/8I64S1sP9Pjw + Y+GF1QLtv/yM5Qk6APR0DrP47SfzI31XIDWTOzHknRmN2zYWYBNpT6J/FvPP/0G04z5Ybu5gINgk + Kxx6yGH5kemA1q3iQMqbC7E2CgVEW+7cMS9mbr4ly5HR+rEW4iVxI8R7WarOwntI4HbsH0j86iu2 + XIYC7JTxjESD29fjC+49+MV3rDmjoS7TIU1gQnMFJ1+9tLwXi0L2anzibdwSrGoDHUi5J08yea9m + NH6cKXB0g3zj+c5GGW9WwEs+woawBWDtmnoWqtlZ0CFLy2HHB3oMD/HqoWHLdlG3bPgNCMupm0Gv + owF0KergepMk7N18f9gz9RpD7FQ3jFnAud18Dnn4xTNshAsdxp8f9rnW43x42/6wi2fZ+fm15Kvn + f/osB/sjN8/DN9+WvoQdHLj4grNKPGRMTR/afuOlTyzbcRmNgsPQz1+ZP4WkqavwrhM4oHtG7ORB + 1M/V7g9geyIKtqc0jH58QRQMtUcnbxXAAIV4/ukPgnc+UtfYmmL41c9Y2RzbaNXrk/Fffp/GtGxV + XDGFYS7zCMSXN5vuOu/AIK5G1N/NdaDaU1nhsyVg3uT7mq3Pe279+PUshIJU85oTJrDvE32+7dxV + Xa99Mf/yhWjAr8HHWk8GiFeHzluOI9m3nmNY+qFG/FKNarodxxbiEo9IeMlGtLTNJwXf+QCWD73M + KGvuDpy8dfqD/zRVgwPsUkGc4V516t1qhjP86U2zn3OVinbgwO68mD58pB8297XVQ9yAOxK/fgPd + Xncl+OI1QXaeuXRXrQbU6tnA8rUY3dGLLQ9KA4vQ5Tm2da8nxxG6oL1+/YKN2zobYYX8taVYadVP + xK5jNYuF2jVY5R9btsihQwV26wii9/wYrYlQcVANwYSOajsAttyBBcF+VxPp0rXurz8LEycL2P/q + 30VujjyYqVOhKjhXdX9EY/vzf4i1Hw8uD6+lIErSnp+38Nxkk/C6xBCGi0z8dnUA4/JAE6POKQjW + C58tT4VKsDLODtHJ/BzG4wJTYBRkj1q69u5PH8IXd7jNEOwslzqV08Gs2x2xLnWH7Fs/I2xeHzgH + S1CyuaksCr/xIW4jrNHozL0Ev/oN0d5yarqsjxbs1S1DYhAaA3s8VQTDQ40RL2wzsBZPS4APwUL4 + Vt9dd39zFwSfU6mg5y341OMmevHQkwKXIK5qhrZW1kS8bXJ+XsxLG423mw9B1+cHIjf3bJim4bCB + XOvBmdZSHFEqUCjK+eaOvWsWqPQqmwiq8b1Dr1qh2Wdg9fxbj1zPcch27Jh6v3kZzj4jAezb7356 + GKvna8/ow0pW0L+6GEeojzLWApkDX78A7Ti6z6bqTSrg+U5MlGfv1MsGV85PHxO7uirs55+B2qfS + vD0tUvSLP2je/h5j1dp9/eFuA45lfP35JWwNkzeFS2PrxPPIhv3wGs7u/oloL3TqH7xke8si8uW5 + ujTy5hkSdjTnJjim7k4Y3tKf/NJjVXY5KkujULyZMK+H2meLrVsI9mFZYOnbP0dIrzGs/VXCSO3e + YE3bdBSW8O3Mc7RGwwQ2hQZf0us2Uz/063VT8y3E2VOYhc/t5dIX3HqwULQPkVhdMLqM5gGSa6lh + PTxo9d4kwgq5/br/4rfEWIFyB0o7iydIFFNG6/HEwcSWXazD3lVpeJa0P/hrH+1H9J2P9X/8kGSU + vGHVP90KLxx6ExPJOiD8tqlEkSnbn7+pfvkW/fUn9PniL7mu601Et6NLJBK1bAjKuQKn687DmONI + RJbLkIOrufUQU4eaUZL11Q8vsI08WM/Pi5bDT2a+Z3r15Iiup9b48SOCUr3Nlsvp3ENxvvjz5lne + arqeZg3GkthitUyFaHnCLoTBouj4V++02NgStI5lhU/7xytjMriFoNrBK5aahtTLwg3Nb14xb6vJ + Hvad+w5A4/HcDOVLMCzC65LAYWP0aKnDrUqEixX89A0ST1rjsqzoQujUlorxS1WiNaqxA07MI0QT + uJc7B4Zzg3M02ETf2bxLPZ9W4hPnM1GCs1J3rny14LfeyDcf2CpkKoTifqYYe76e7bAhaUBuEwkJ + GeTdyeEfBtwORoMNOe/qOSdqDDPV6nAx3ONo3ttbCIvPZfedJyvRXvRVHt7D+20exVVxl+ispKDM + 7Jm47sBna3DsN2B/CNncN917GI5OJ/ye/85bry794rO4Sl3xpx/T+iEU4MfXvvOHaLrcdxIEzTRh + LWcu+JPvz0uY/Jknr/ucSIByNU+s1+aUTUK6h7DvVufLh+2Ifv1o4Ru/eefHdr0cFOcArwU6INFs + z+qa5XkIFyFdsLMV7GiRi94BwXRApFA7E3AdckNwG84SqrsQqeOFflZY+oFGTqfmEH31dSJ88ZVY + uPAYPaRid3RS74XdZatE3JPzWniQu4Tcu6R1F1ZeCyHV1wpBWDzUr19ViX//dgX857/++uv//HYY + tN29eH03BkzFMv37v7cK/Pt2v/2b4/h/E/7PToR5vJXF3//81yaEvz9D136m/zt1TfEe//7nLx6C + P/sN/p666fb6Hxf+9V3vP//1/wAAAP//AwBlmDGz5SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bbf4f957a7315ca-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 22 Apr 2023 16:40:51 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '227' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - a25379b9d7bfed73e1a46145146b1478 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search + Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete + GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: + \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", + \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. + message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \n13. Task Complete + (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can beparsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past:\n[''Assistant + Reply: {\\n \"thoughts\": {\\n \"text\": \"I will use the write_to_file + command to write \\''Hello World\\'' into a file named \\''hello_world.txt\\''.\",\\n \"reasoning\": + \"The goal is to write \\''Hello World\\'' into a file named \\''hello_world.txt\\'', + and the write_to_file command is the most appropriate command for this task.\",\\n \"plan\": + \"- Use the write_to_file command to write \\''Hello World\\'' into a file named + \\''hello_world.txt\\''.\",\\n \"criticism\": \"I need to ensure that + I am using the correct file name and that the file is being written correctly.\",\\n \"speak\": + \"I will use the write_to_file command to write \\''Hello World\\'' into a file + named \\''hello_world.txt\\''.\"\\n },\\n \"command\": {\\n \"name\": + \"write_to_file\",\\n \"args\": {\\n \"filename\": \"hello_world.txt\",\\n \"text\": + \"Hello World\"\\n }\\n }\\n} \\nResult: Command write_to_file returned: + File written to successfully. \\nHuman Feedback: '']\n\n"}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write + ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file + command is the most appropriate command for this task.\",\n \"plan\": + \"- Use the write_to_file command to write ''Hello World'' into a file named + ''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I am + using the correct file name and that the file is being written correctly.\",\n \"speak\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": + \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0.0, "max_tokens": 2704}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5708' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA8xTwW7bMAy97ysIXnJRgjhNs9S3HQq02GGHpRuGeQhUmbG1yJIh0a2LwP8+KHaa + NF0HDLvsZpOP5HtP5A51jimqUrKqajN+v/wg5yW3/PDt7vq2+DRvb5Lky7VvVlcfFQp09z9J8VAx + Ua6qDbF2FgUqT5IpxzRZLGfJMllczgRWLieDKRY1jy8ml2Nu/L0bTy+mCQpsgiwI0x3W3lU1r9lt + yQZMk9lyLvDY/DkxS2YC2bE0R+z8atEJVKXTigKm33dYUTg09s4QpihD0IGl5UjTWSYbJewyCwCQ + IZeuKUoOGaYwBIcEtRyDGd5CKR8IQqMUhbBpjHmCR6+ZycLohoxx8NV5k49AW3YgYaMNgZUV5TAq + Y379GPMTbnk0gVVJYKllCEw16ADsoAkEXBKwDNv1IJ5AuaqSNo+A59gBNclQnNL1JIOz2hY959UA + G/pHuvT3ZGE/vST7m/nwWVtFMJgjjSeZH33Zw6LI/ZT4F+eI/de5+Fe9oQnaFm8bcq69NtL2ssdw + 989OKq9ZKx2qw+tbon0l2dD4WCf5DyN0iK+Zg3Lek2LzNJh4UhQxh7r8xV6dcwk1ye3/vYU93U4c + DmqAv7qnyKRX8qL9mWDpi/NTPN3vYbmjh28aeKzsBm6Z7bATuNFWh3Ldd8IUA7saBWqbU4vptPvR + vfsFAAD//wMAbKbLwRMFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bbf4f9c6b3c15ca-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 22 Apr 2023 16:41:07 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '15821' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 19520475f9dabce5bc67ccb90bec24f1 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/goal_oriented/goal_oriented_tasks.md b/tests/integration/goal_oriented/goal_oriented_tasks.md new file mode 100644 index 00000000..02e3c343 --- /dev/null +++ b/tests/integration/goal_oriented/goal_oriented_tasks.md @@ -0,0 +1,10 @@ +If the goal oriented task pipeline fails, it means: +- you somehow changed the way the system prompt is generated +- or you broke autogpt. + +To know which one, you can run the following command: +```bash +pytest -s -k tests/integration/goal_oriented + +If the test is successul, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline +will pass diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py new file mode 100644 index 00000000..033c783b --- /dev/null +++ b/tests/integration/goal_oriented/test_write_file.py @@ -0,0 +1,109 @@ +import concurrent +import os +import unittest + +import pytest +import vcr + +from autogpt.agent import Agent +from autogpt.commands.command import CommandRegistry +from autogpt.commands.file_operations import LOG_FILE, delete_file, read_file +from autogpt.config import AIConfig, Config, check_openai_api_key +from autogpt.memory import get_memory + +# from autogpt.prompt import Prompt +from autogpt.workspace import WORKSPACE_PATH +from tests.integration.goal_oriented.vcr_helper import before_record_request + +current_file_dir = os.path.dirname(os.path.abspath(__file__)) +# tests_directory = os.path.join(current_file_dir, 'tests') + +my_vcr = vcr.VCR( + cassette_library_dir=os.path.join(current_file_dir, "cassettes"), + record_mode="new_episodes", + before_record_request=before_record_request, +) + +CFG = Config() + + +@pytest.mark.integration_test +def test_write_file() -> None: + # if file exist + file_name = "hello_world.txt" + + file_path_to_write_into = f"{WORKSPACE_PATH}/{file_name}" + if os.path.exists(file_path_to_write_into): + os.remove(file_path_to_write_into) + file_logger_path = f"{WORKSPACE_PATH}/{LOG_FILE}" + if os.path.exists(file_logger_path): + os.remove(file_logger_path) + + delete_file(file_name) + agent = create_writer_agent() + try: + with my_vcr.use_cassette( + "write_file.vcr.yml", + filter_headers=[ + "authorization", + "X-OpenAI-Client-User-Agent", + "User-Agent", + ], + ): + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit(agent.start_interaction_loop) + try: + result = future.result(timeout=45) + except concurrent.futures.TimeoutError: + assert False, "The process took longer than 45 seconds to complete." + # catch system exit exceptions + except SystemExit: # the agent returns an exception when it shuts down + content = "" + content = read_file(file_name) + os.remove(file_path_to_write_into) + + assert content == "Hello World", f"Expected 'Hello World', got {content}" + + +def create_writer_agent(): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="write_to_file-GPT", + ai_role="an AI designed to use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\" and then use the task_complete command to complete the task.", + ai_goals=[ + "Use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\".", + "Use the task_complete command to complete the task.", + "Do not use any other commands.", + ], + ) + ai_config.command_registry = command_registry + memory = get_memory(CFG, init=True) + triggering_prompt = ( + "Determine which next command to use, and respond using the" + " format specified above:" + ) + system_prompt = ai_config.construct_full_prompt() + + agent = Agent( + ai_name="", + memory=memory, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=triggering_prompt, + ) + CFG.set_continuous_mode(True) + CFG.set_memory_backend("no_memory") + CFG.set_temperature(0) + os.environ["TIKTOKEN_CACHE_DIR"] = "" + + return agent + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/integration/goal_oriented/vcr_helper.py b/tests/integration/goal_oriented/vcr_helper.py new file mode 100644 index 00000000..58504520 --- /dev/null +++ b/tests/integration/goal_oriented/vcr_helper.py @@ -0,0 +1,43 @@ +import json +import re + + +def replace_timestamp_in_request(request): + # Check if the request body contains a JSON object + + try: + if not request or not request.body: + return request + body = json.loads(request.body) + except ValueError: + return request + + if "messages" not in body: + return request + + for message in body["messages"]: + if "content" in message and "role" in message and message["role"] == "system": + timestamp_regex = re.compile(r"\w{3} \w{3} \d{2} \d{2}:\d{2}:\d{2} \d{4}") + message["content"] = timestamp_regex.sub( + "Tue Jan 01 00:00:00 2000", message["content"] + ) + + request.body = json.dumps(body) + return request + + +def filter_hostnames(request): + allowed_hostnames = ["api.openai.com"] # List of hostnames you want to allow + + if any(hostname in request.url for hostname in allowed_hostnames): + return request + else: + return None + + +def before_record_request(request): + filtered_request = filter_hostnames(request) + filtered_request_without_dynamic_data = replace_timestamp_in_request( + filtered_request + ) + return filtered_request_without_dynamic_data diff --git a/tests/smoke_test.py b/tests/smoke_test.py deleted file mode 100644 index 1b9d643f..00000000 --- a/tests/smoke_test.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Smoke test for the autogpt package.""" -import os -import subprocess -import sys - -import pytest - -from autogpt.commands.file_operations import delete_file, read_file - - -@pytest.mark.integration_test -def test_write_file() -> None: - """ - Test case to check if the write_file command can successfully write 'Hello World' to a file - named 'hello_world.txt'. - - Read the current ai_settings.yaml file and store its content. - """ - env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"} - ai_settings = None - if os.path.exists("ai_settings.yaml"): - with open("ai_settings.yaml", "r") as f: - ai_settings = f.read() - os.remove("ai_settings.yaml") - - try: - if os.path.exists("hello_world.txt"): - # Clean up any existing 'hello_world.txt' file before testing. - delete_file("hello_world.txt") - # Prepare input data for the test. - input_data = """write_file-GPT -an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task. -Use the write_file command to write 'Hello World' into a file named "hello_world.txt". -Use the task_complete command to complete the task. -Do not use any other commands. - -y -5 -EOF""" - command = f"{sys.executable} -m autogpt" - - # Execute the script with the input data. - process = subprocess.Popen( - command, - stdin=subprocess.PIPE, - shell=True, - env={**os.environ, **env_vars}, - ) - process.communicate(input_data.encode()) - - # Read the content of the 'hello_world.txt' file created during the test. - content = read_file("hello_world.txt") - finally: - if ai_settings: - # Restore the original ai_settings.yaml file. - with open("ai_settings.yaml", "w") as f: - f.write(ai_settings) - - # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'. - assert content == "Hello World", f"Expected 'Hello World', got {content}" From eb5a8a87d869d7cb591f48890df7fdad75f002de Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sat, 22 Apr 2023 12:39:56 -0700 Subject: [PATCH 083/222] add decorator to tests Signed-off-by: Merwane Hamadi --- .github/workflows/ci.yml | 4 +- .github/workflows/goal_oriented_tasks.yml | 37 ------------------- tests/conftest.py | 5 +++ tests/integration/goal_oriented/decorators.py | 14 +++++++ .../goal_oriented/test_write_file.py | 4 ++ 5 files changed, 26 insertions(+), 38 deletions(-) delete mode 100644 .github/workflows/goal_oriented_tasks.yml create mode 100644 tests/conftest.py create mode 100644 tests/integration/goal_oriented/decorators.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d3dee93d..069e5633 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -72,7 +72,9 @@ jobs: - name: Run unittest tests with coverage run: | pytest --cov=autogpt --without-integration --without-slow-integration --cov-report term-missing --cov-branch --cov-report xml --cov-report term - + pytest --cov=autogpt tests/integration/goal_oriented --cov-report term-missing --cov-branch --cov-report xml --cov-report term + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - name: Generate coverage report run: | coverage report diff --git a/.github/workflows/goal_oriented_tasks.yml b/.github/workflows/goal_oriented_tasks.yml deleted file mode 100644 index 816b64ff..00000000 --- a/.github/workflows/goal_oriented_tasks.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Goal Oriented Tasks - -on: - push: - branches: - - master - pull_request: - branches: - - master - -jobs: - build: - runs-on: ubuntu-latest - - strategy: - matrix: - python-version: ['3.10'] - - steps: - - name: Check out repository - uses: actions/checkout@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - - - name: Generate coverage report - run: | - coverage run --source=autogpt -m pytest -s -k tests/integration/goal_oriented - env: - OPENAI_API_KEY: 'dummy_api_key' diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..36233c18 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,5 @@ +from dotenv import load_dotenv + +load_dotenv() + +# Your other pytest configurations and fixtures diff --git a/tests/integration/goal_oriented/decorators.py b/tests/integration/goal_oriented/decorators.py new file mode 100644 index 00000000..9f35fc33 --- /dev/null +++ b/tests/integration/goal_oriented/decorators.py @@ -0,0 +1,14 @@ +import os +import pytest + + +def requires_openai_api_key(func): + def wrapper(*args, **kwargs): + if not os.environ.get('OPENAI_API_KEY'): + pytest.skip( + "Environment variable 'OPENAI_API_KEY' is not set, skipping the test." + ) + else: + return func(*args, **kwargs) + + return wrapper diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index 033c783b..55c51f20 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -13,6 +13,7 @@ from autogpt.memory import get_memory # from autogpt.prompt import Prompt from autogpt.workspace import WORKSPACE_PATH +from tests.integration.goal_oriented.decorators import requires_openai_api_key from tests.integration.goal_oriented.vcr_helper import before_record_request current_file_dir = os.path.dirname(os.path.abspath(__file__)) @@ -27,8 +28,11 @@ my_vcr = vcr.VCR( CFG = Config() +@requires_openai_api_key @pytest.mark.integration_test def test_write_file() -> None: + # Your test code here + # if file exist file_name = "hello_world.txt" From 78a6b44b21316369dd36b97ec513608454450456 Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sat, 22 Apr 2023 19:28:16 +0100 Subject: [PATCH 084/222] Plugins: debug line always printed in plugin load --- autogpt/plugins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/plugins.py b/autogpt/plugins.py index abc70446..8aaa35e9 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -36,7 +36,7 @@ def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]: if debug: print(f"Found module '{name}' in the zipfile at: {name}") result.append(name) - if debug: + if debug and len(result) == 0: print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") return result From b7cd56f72b5cefcf3a96a141f6c1d491c1580ab4 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sat, 22 Apr 2023 12:48:47 -0700 Subject: [PATCH 085/222] move decorator higher up Signed-off-by: Merwane Hamadi --- .github/workflows/ci.yml | 3 +-- tests/conftest.py | 2 -- tests/integration/goal_oriented/decorators.py | 14 -------------- .../goal_oriented/test_write_file.py | 8 ++------ tests/local_cache_test.py | 6 ++++++ tests/test_image_gen.py | 3 +++ tests/utils.py | 18 ++++++++++++++++++ 7 files changed, 30 insertions(+), 24 deletions(-) delete mode 100644 tests/integration/goal_oriented/decorators.py create mode 100644 tests/utils.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 069e5633..780db14f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,8 +71,7 @@ jobs: - name: Run unittest tests with coverage run: | - pytest --cov=autogpt --without-integration --without-slow-integration --cov-report term-missing --cov-branch --cov-report xml --cov-report term - pytest --cov=autogpt tests/integration/goal_oriented --cov-report term-missing --cov-branch --cov-report xml --cov-report term + pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - name: Generate coverage report diff --git a/tests/conftest.py b/tests/conftest.py index 36233c18..bf6bd6c5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,3 @@ from dotenv import load_dotenv load_dotenv() - -# Your other pytest configurations and fixtures diff --git a/tests/integration/goal_oriented/decorators.py b/tests/integration/goal_oriented/decorators.py deleted file mode 100644 index 9f35fc33..00000000 --- a/tests/integration/goal_oriented/decorators.py +++ /dev/null @@ -1,14 +0,0 @@ -import os -import pytest - - -def requires_openai_api_key(func): - def wrapper(*args, **kwargs): - if not os.environ.get('OPENAI_API_KEY'): - pytest.skip( - "Environment variable 'OPENAI_API_KEY' is not set, skipping the test." - ) - else: - return func(*args, **kwargs) - - return wrapper diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index 55c51f20..d995c7a3 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -2,7 +2,6 @@ import concurrent import os import unittest -import pytest import vcr from autogpt.agent import Agent @@ -13,8 +12,8 @@ from autogpt.memory import get_memory # from autogpt.prompt import Prompt from autogpt.workspace import WORKSPACE_PATH -from tests.integration.goal_oriented.decorators import requires_openai_api_key from tests.integration.goal_oriented.vcr_helper import before_record_request +from tests.utils import requires_api_key current_file_dir = os.path.dirname(os.path.abspath(__file__)) # tests_directory = os.path.join(current_file_dir, 'tests') @@ -28,11 +27,8 @@ my_vcr = vcr.VCR( CFG = Config() -@requires_openai_api_key -@pytest.mark.integration_test +@requires_api_key("OPENAI_API_KEY") def test_write_file() -> None: - # Your test code here - # if file exist file_name = "hello_world.txt" diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py index bb108626..bbaf8e57 100644 --- a/tests/local_cache_test.py +++ b/tests/local_cache_test.py @@ -7,6 +7,7 @@ import unittest import pytest from autogpt.memory.local import LocalCache +from tests.utils import requires_api_key def mock_config() -> dict: @@ -32,17 +33,20 @@ class TestLocalCache(unittest.TestCase): self.cfg = mock_config() self.cache = LocalCache(self.cfg) + @requires_api_key("OPENAI_API_KEY") def test_add(self) -> None: """Test adding a text to the cache""" text = "Sample text" self.cache.add(text) self.assertIn(text, self.cache.data.texts) + @requires_api_key("OPENAI_API_KEY") def test_clear(self) -> None: """Test clearing the cache""" self.cache.clear() self.assertEqual(self.cache.data.texts, []) + @requires_api_key("OPENAI_API_KEY") def test_get(self) -> None: """Test getting a text from the cache""" text = "Sample text" @@ -50,6 +54,7 @@ class TestLocalCache(unittest.TestCase): result = self.cache.get(text) self.assertEqual(result, [text]) + @requires_api_key("OPENAI_API_KEY") def test_get_relevant(self) -> None: """Test getting relevant texts from the cache""" text1 = "Sample text 1" @@ -59,6 +64,7 @@ class TestLocalCache(unittest.TestCase): result = self.cache.get_relevant(text1, 1) self.assertEqual(result, [text1]) + @requires_api_key("OPENAI_API_KEY") def test_get_stats(self) -> None: """Test getting the cache stats""" text = "Sample text" diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index 19c57e42..58b8337f 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -7,6 +7,7 @@ from PIL import Image from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui from autogpt.config import Config from autogpt.workspace import path_in_workspace +from tests.utils import requires_api_key def lst(txt): @@ -18,6 +19,7 @@ class TestImageGen(unittest.TestCase): def setUp(self): self.config = Config() + @requires_api_key("OPENAI_API_KEY") def test_dalle(self): self.config.image_provider = "dalle" @@ -36,6 +38,7 @@ class TestImageGen(unittest.TestCase): self.assertEqual(img.size, (512, 512)) image_path.unlink() + @requires_api_key("HUGGINGFACE_API_TOKEN") def test_huggingface(self): self.config.image_provider = "huggingface" diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..a09daa8d --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,18 @@ +import os + +import pytest + + +def requires_api_key(env_var): + def decorator(func): + def wrapper(*args, **kwargs): + if not os.environ.get(env_var): + pytest.skip( + f"Environment variable '{env_var}' is not set, skipping the test." + ) + else: + return func(*args, **kwargs) + + return wrapper + + return decorator From 2b5852f7daf83e07ec87538f239c9bc6064d2bad Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Sun, 23 Apr 2023 02:07:28 +0200 Subject: [PATCH 086/222] Tests utils suite (#2961) * Update Python version and benchmark file in benchmark.yml * Refactor main function and imports in cli.py * Update import statement in ai_config.py * Add set_temperature and set_memory_backend methods in config.py * Remove unused import in prompt.py * Add goal oriented tasks workflow * Added agent_utils to create agent * added pytest and vcrpy * added write file cassette * created goal oriented task write file with cassettes to not pay openai tokens * solve conflicts * add ability set azure because github workflow needs it off * solve conflicts in cli.py * black because linter fails * solve conflict * setup github action to v3 Signed-off-by: Merwane Hamadi * fix conflicts Signed-off-by: Merwane Hamadi * Plugins: debug line always printed in plugin load * add decorator to tests Signed-off-by: Merwane Hamadi * move decorator higher up Signed-off-by: Merwane Hamadi * init * more tests * passing tests * skip gitbranch decorator on ci * decorator skiponci * black * Update tests/utils.py decorator of skipping ci Co-authored-by: Nicholas Tindle * black * I oopsed the name * black * finally * black * wrong file --------- Signed-off-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Richard Beales Co-authored-by: Nicholas Tindle Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- autogpt/utils.py | 8 +- tests/test_utils.py | 200 ++++++++++++++++++++++++++++++++++++++++++++ tests/utils.py | 7 ++ 3 files changed, 212 insertions(+), 3 deletions(-) create mode 100644 tests/test_utils.py diff --git a/autogpt/utils.py b/autogpt/utils.py index 3608ab74..c8553eaf 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -49,15 +49,17 @@ def readable_file_size(size, decimal_places=2): return f"{size:.{decimal_places}f} {unit}" -def get_bulletin_from_web() -> str: +def get_bulletin_from_web(): try: response = requests.get( "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" ) if response.status_code == 200: return response.text - except: - return "" + except requests.exceptions.RequestException: + pass + + return "" def get_current_git_branch() -> str: diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..fdea83d4 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,200 @@ +import os +from unittest.mock import Mock, patch + +import pytest +import requests +from colorama import Fore +from git import Repo + +from autogpt.utils import ( + clean_input, + get_bulletin_from_web, + get_current_git_branch, + get_latest_bulletin, + readable_file_size, + validate_yaml_file, +) +from tests.utils import skip_in_ci + + +def test_validate_yaml_file_valid(): + with open("valid_test_file.yaml", "w") as f: + f.write("setting: value") + result, message = validate_yaml_file("valid_test_file.yaml") + os.remove("valid_test_file.yaml") + + assert result == True + assert "Successfully validated" in message + + +def test_validate_yaml_file_not_found(): + result, message = validate_yaml_file("non_existent_file.yaml") + + assert result == False + assert "wasn't found" in message + + +def test_validate_yaml_file_invalid(): + with open("invalid_test_file.yaml", "w") as f: + f.write( + "settings:\n first_setting: value\n second_setting: value\n nested_setting: value\n third_setting: value\nunindented_setting: value" + ) + result, message = validate_yaml_file("invalid_test_file.yaml") + os.remove("invalid_test_file.yaml") + print(result) + print(message) + assert result == False + assert "There was an issue while trying to read" in message + + +def test_readable_file_size(): + size_in_bytes = 1024 * 1024 * 3.5 # 3.5 MB + readable_size = readable_file_size(size_in_bytes) + + assert readable_size == "3.50 MB" + + +@patch("requests.get") +def test_get_bulletin_from_web_success(mock_get): + mock_get.return_value.status_code = 200 + mock_get.return_value.text = "Test bulletin" + bulletin = get_bulletin_from_web() + + assert bulletin == "Test bulletin" + + +@patch("requests.get") +def test_get_bulletin_from_web_failure(mock_get): + mock_get.return_value.status_code = 404 + bulletin = get_bulletin_from_web() + print(bulletin) + assert bulletin == "" + + +@skip_in_ci +def test_get_current_git_branch(): + branch_name = get_current_git_branch() + + # Assuming that the branch name will be non-empty if the function is working correctly. + assert branch_name != "" + + +def test_get_latest_bulletin_no_file(): + if os.path.exists("CURRENT_BULLETIN.md"): + os.remove("CURRENT_BULLETIN.md") + + with patch("autogpt.utils.get_bulletin_from_web", return_value=""): + bulletin = get_latest_bulletin() + assert bulletin == "" + + +def test_get_latest_bulletin_with_file(): + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write("Test bulletin") + + with patch("autogpt.utils.get_bulletin_from_web", return_value=""): + bulletin = get_latest_bulletin() + assert bulletin == "Test bulletin" + + os.remove("CURRENT_BULLETIN.md") + + +def test_get_latest_bulletin_with_new_bulletin(): + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write("Old bulletin") + + with patch("autogpt.utils.get_bulletin_from_web", return_value="New bulletin"): + bulletin = get_latest_bulletin() + assert "New bulletin" in bulletin + + os.remove("CURRENT_BULLETIN.md") + + +@patch("requests.get") +def test_get_bulletin_from_web_success(mock_get): + mock_get.return_value.status_code = 200 + mock_get.return_value.text = "Test bulletin" + bulletin = get_bulletin_from_web() + + assert bulletin == "Test bulletin" + mock_get.assert_called_with( + "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" + ) + + +@patch("requests.get") +def test_get_bulletin_from_web_failure(mock_get): + mock_get.return_value.status_code = 404 + bulletin = get_bulletin_from_web() + + assert bulletin == "" + + +@patch("requests.get") +def test_get_bulletin_from_web_exception(mock_get): + mock_get.side_effect = requests.exceptions.RequestException() + bulletin = get_bulletin_from_web() + + assert bulletin == "" + + +@patch("autogpt.utils.Repo") +def test_get_current_git_branch_success(mock_repo): + mock_repo.return_value.active_branch.name = "test-branch" + branch_name = get_current_git_branch() + + assert branch_name == "test-branch" + + +@patch("autogpt.utils.Repo") +def test_get_current_git_branch_failure(mock_repo): + mock_repo.side_effect = Exception() + branch_name = get_current_git_branch() + + assert branch_name == "" + + +def test_get_latest_bulletin_no_file(): + if os.path.exists("CURRENT_BULLETIN.md"): + os.remove("CURRENT_BULLETIN.md") + + with patch("autogpt.utils.get_bulletin_from_web", return_value=""): + bulletin = get_latest_bulletin() + assert bulletin == "" + + +def test_get_latest_bulletin_with_file(): + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write("Test bulletin") + + with patch("autogpt.utils.get_bulletin_from_web", return_value=""): + bulletin = get_latest_bulletin() + assert bulletin == "Test bulletin" + + os.remove("CURRENT_BULLETIN.md") + + +def test_get_latest_bulletin_with_new_bulletin(): + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write("Old bulletin") + + with patch("autogpt.utils.get_bulletin_from_web", return_value="New bulletin"): + bulletin = get_latest_bulletin() + assert f" {Fore.RED}::UPDATED:: {Fore.CYAN}New bulletin{Fore.RESET}" in bulletin + + os.remove("CURRENT_BULLETIN.md") + + +def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin(): + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write("Test bulletin") + + with patch("autogpt.utils.get_bulletin_from_web", return_value="Test bulletin"): + bulletin = get_latest_bulletin() + assert bulletin == "Test bulletin" + + os.remove("CURRENT_BULLETIN.md") + + +if __name__ == "__main__": + pytest.main() diff --git a/tests/utils.py b/tests/utils.py index a09daa8d..f042974c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -16,3 +16,10 @@ def requires_api_key(env_var): return wrapper return decorator + + +def skip_in_ci(test_function): + return pytest.mark.skipif( + os.environ.get("CI") == "true", + reason="This test doesn't work on GitHub Actions.", + )(test_function) From f462674e32279f383a4d34f2307709566c4a975f Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sun, 23 Apr 2023 17:36:10 +1200 Subject: [PATCH 087/222] Automatic prompting (#2896) * Add automatic ai prompting * Tweak the default prompt. * Print agent info upon creation. * Improve system prompt * Switch to fast_llm_model by default * Add format output command to user prompt. This vastly improves formatting success rate. * Add fallback to manual mode if llm output cannot be parsed (or other error). * Add unit test to cover ai creation setup. * Replace redundent prompt with manual mode instructions. * Add missing docstrings and typing. * Runs black on changes. * Runs isort * Update Python version and benchmark file in benchmark.yml * Refactor main function and imports in cli.py * Update import statement in ai_config.py * Add set_temperature and set_memory_backend methods in config.py * Remove unused import in prompt.py * Add goal oriented tasks workflow * Added agent_utils to create agent * added pytest and vcrpy * added write file cassette * created goal oriented task write file with cassettes to not pay openai tokens * solve conflicts * add ability set azure because github workflow needs it off * solve conflicts in cli.py * black because linter fails * solve conflict * setup github action to v3 Signed-off-by: Merwane Hamadi * fix conflicts Signed-off-by: Merwane Hamadi * Plugins: debug line always printed in plugin load * add decorator to tests Signed-off-by: Merwane Hamadi * move decorator higher up Signed-off-by: Merwane Hamadi * merge --------- Signed-off-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Richard Beales Co-authored-by: Nicholas Tindle Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> --- autogpt/prompts/prompt.py | 18 ++++++ autogpt/setup.py | 121 +++++++++++++++++++++++++++++++++++++- tests/unit/test_setup.py | 71 ++++++++++++++++++++++ 3 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 tests/unit/test_setup.py diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index 79de04ea..d33d0e3a 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -107,4 +107,22 @@ Continue (y/n): """ config = prompt_user() config.save(CFG.ai_settings_file) + # Agent Created, print message + logger.typewriter_log( + config.ai_name, + Fore.LIGHTBLUE_EX, + "has been created with the following details:", + speak_text=True, + ) + + # Print the ai config details + # Name + logger.typewriter_log("Name:", Fore.GREEN, config.ai_name, speak_text=False) + # Role + logger.typewriter_log("Role:", Fore.GREEN, config.ai_role, speak_text=False) + # Goals + logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False) + for goal in config.ai_goals: + logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False) + return config diff --git a/autogpt/setup.py b/autogpt/setup.py index bfa68201..02b67c7a 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -1,18 +1,26 @@ """Set up the AI and its goals""" +import re + from colorama import Fore, Style from autogpt import utils +from autogpt.config import Config from autogpt.config.ai_config import AIConfig +from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger +CFG = Config() + def prompt_user() -> AIConfig: """Prompt the user for input Returns: - AIConfig: The AIConfig object containing the user's input + AIConfig: The AIConfig object tailored to the user's input """ ai_name = "" + ai_config = None + # Construct the prompt logger.typewriter_log( "Welcome to Auto-GPT! ", @@ -21,6 +29,57 @@ def prompt_user() -> AIConfig: speak_text=True, ) + # Get user desire + logger.typewriter_log( + "Create an AI-Assistant:", + Fore.GREEN, + "input '--manual' to enter manual mode.", + speak_text=True, + ) + + user_desire = utils.clean_input( + f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: " + ) + + if user_desire == "": + user_desire = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt + + # If user desire contains "--manual" + if "--manual" in user_desire: + logger.typewriter_log( + "Manual Mode Selected", + Fore.GREEN, + speak_text=True, + ) + return generate_aiconfig_manual() + + else: + try: + return generate_aiconfig_automatic(user_desire) + except Exception as e: + logger.typewriter_log( + "Unable to automatically generate AI Config based on user desire.", + Fore.RED, + "Falling back to manual mode.", + speak_text=True, + ) + + return generate_aiconfig_manual() + + +def generate_aiconfig_manual() -> AIConfig: + """ + Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI. + + This function guides the user through a series of prompts to collect the necessary information to create + an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five + goals. If the user does not provide a value for any of the fields, default values will be used. + + Returns: + AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals. + """ + + # Manual Setup Intro logger.typewriter_log( "Create an AI-Assistant:", Fore.GREEN, @@ -75,3 +134,63 @@ def prompt_user() -> AIConfig: ] return AIConfig(ai_name, ai_role, ai_goals) + + +def generate_aiconfig_automatic(user_prompt) -> AIConfig: + """Generates an AIConfig object from the given string. + + Returns: + AIConfig: The AIConfig object tailored to the user's input + """ + + system_prompt = """ +Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task. + +The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation. + +Example input: +Help me with marketing my business + +Example output: +Name: CMOGPT +Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more. +Goals: +- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer. + +- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations. + +- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment. + +- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track. +""" + + # Call LLM with the string as user input + messages = [ + { + "role": "system", + "content": system_prompt, + }, + { + "role": "user", + "content": f"Task: '{user_prompt}'\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n", + }, + ] + output = create_chat_completion(messages, CFG.fast_llm_model) + + # Debug LLM Output + logger.debug(f"AI Config Generator Raw Output: {output}") + + # Parse the output + ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1) + ai_role = ( + re.search( + r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)", + output, + re.IGNORECASE | re.DOTALL, + ) + .group(1) + .strip() + ) + ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output) + + return AIConfig(ai_name, ai_role, ai_goals) diff --git a/tests/unit/test_setup.py b/tests/unit/test_setup.py new file mode 100644 index 00000000..67923dbd --- /dev/null +++ b/tests/unit/test_setup.py @@ -0,0 +1,71 @@ +import unittest +from io import StringIO +from unittest.mock import patch + +from autogpt.config.ai_config import AIConfig +from autogpt.setup import ( + generate_aiconfig_automatic, + generate_aiconfig_manual, + prompt_user, +) + + +class TestAutoGPT(unittest.TestCase): + def test_generate_aiconfig_automatic_default(self): + user_inputs = [""] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + self.assertIsInstance(ai_config, AIConfig) + self.assertIsNotNone(ai_config.ai_name) + self.assertIsNotNone(ai_config.ai_role) + self.assertGreaterEqual(len(ai_config.ai_goals), 1) + self.assertLessEqual(len(ai_config.ai_goals), 5) + + def test_generate_aiconfig_automatic_typical(self): + user_prompt = "Help me create a rock opera about cybernetic giraffes" + ai_config = generate_aiconfig_automatic(user_prompt) + + self.assertIsInstance(ai_config, AIConfig) + self.assertIsNotNone(ai_config.ai_name) + self.assertIsNotNone(ai_config.ai_role) + self.assertGreaterEqual(len(ai_config.ai_goals), 1) + self.assertLessEqual(len(ai_config.ai_goals), 5) + + def test_generate_aiconfig_automatic_fallback(self): + user_inputs = [ + "T&GF£OIBECC()!*", + "Chef-GPT", + "an AI designed to browse bake a cake.", + "Purchase ingredients", + "Bake a cake", + "", + ] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + self.assertIsInstance(ai_config, AIConfig) + self.assertEqual(ai_config.ai_name, "Chef-GPT") + self.assertEqual(ai_config.ai_role, "an AI designed to browse bake a cake.") + self.assertEqual(ai_config.ai_goals, ["Purchase ingredients", "Bake a cake"]) + + def test_prompt_user_manual_mode(self): + user_inputs = [ + "--manual", + "Chef-GPT", + "an AI designed to browse bake a cake.", + "Purchase ingredients", + "Bake a cake", + "", + ] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + self.assertIsInstance(ai_config, AIConfig) + self.assertEqual(ai_config.ai_name, "Chef-GPT") + self.assertEqual(ai_config.ai_role, "an AI designed to browse bake a cake.") + self.assertEqual(ai_config.ai_goals, ["Purchase ingredients", "Bake a cake"]) + + +if __name__ == "__main__": + unittest.main() From bc354a3df6cb1d462840689b02e6bb7c65362cd1 Mon Sep 17 00:00:00 2001 From: Didier Durand Date: Sun, 23 Apr 2023 08:23:48 +0200 Subject: [PATCH 088/222] Documentation typo: serach -> search (#2977) --- docs/configuration/search.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/configuration/search.md b/docs/configuration/search.md index 205a7721..c01828ed 100644 --- a/docs/configuration/search.md +++ b/docs/configuration/search.md @@ -22,7 +22,7 @@ Set up a custom search API and add to your .env file: 13. Set it as an environment variable named `GOOGLE_API_KEY` on your machine (see how to set up environment variables below). 14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) -Set up a custom serach engine and add to your .env file: +Set up a custom search engine and add to your .env file: 15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page. 16. Click "Add". 17. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. @@ -47,4 +47,4 @@ For macOS and Linux users: ``` export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" -``` \ No newline at end of file +``` From bb786461c76022873eb770802e0bb5dd914cbba0 Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sun, 23 Apr 2023 07:35:17 +0100 Subject: [PATCH 089/222] =?UTF-8?q?Mark=20test=20test=5Fgenerate=5Faiconfi?= =?UTF-8?q?g=5Fautomatic=5Ftypical=20=20as=20@requires=5Fapi=5F=E2=80=A6?= =?UTF-8?q?=20(#2981)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Mark test test_generate_aiconfig_automatic_typical as @requires_api_key("OPENAI_API_KEY") * missing import --- tests/unit/test_setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit/test_setup.py b/tests/unit/test_setup.py index 67923dbd..1e718bf9 100644 --- a/tests/unit/test_setup.py +++ b/tests/unit/test_setup.py @@ -8,6 +8,7 @@ from autogpt.setup import ( generate_aiconfig_manual, prompt_user, ) +from tests.utils import requires_api_key class TestAutoGPT(unittest.TestCase): @@ -22,6 +23,7 @@ class TestAutoGPT(unittest.TestCase): self.assertGreaterEqual(len(ai_config.ai_goals), 1) self.assertLessEqual(len(ai_config.ai_goals), 5) + @requires_api_key("OPENAI_API_KEY") def test_generate_aiconfig_automatic_typical(self): user_prompt = "Help me create a rock opera about cybernetic giraffes" ai_config = generate_aiconfig_automatic(user_prompt) From 0b08b4f1c5babf7c23b25545930eeb36705e3cc3 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Sun, 23 Apr 2023 01:39:13 -0500 Subject: [PATCH 090/222] Update installation.md (#2970) --- docs/installation.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index f80b11f7..554ad711 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -70,6 +70,7 @@ azure_model_map: ... ``` - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. + - If you're on Windows you may need to install https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170 ## Docker @@ -94,4 +95,4 @@ docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_g docker-compose run --build --rm auto-gpt --gpt3only --continuous ``` -Alternatively, you can pull the latest release directly from [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) \ No newline at end of file +Alternatively, you can pull the latest release directly from [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) From eaa6ed85e1a6f6aaadaf1608e03a60b062a38656 Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sun, 23 Apr 2023 09:07:14 +0100 Subject: [PATCH 091/222] Fix to prompt generator - "Ensure the response can beparsed" (#2980) --- autogpt/prompts/generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py index c9a441d8..282b9d72 100644 --- a/autogpt/prompts/generator.py +++ b/autogpt/prompts/generator.py @@ -151,5 +151,5 @@ class PromptGenerator: f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" "You should only respond in JSON format as described below \nResponse" f" Format: \n{formatted_response_format} \nEnsure the response can be" - "parsed by Python json.loads" + " parsed by Python json.loads" ) From 9ed2a7a2d24c21968137ad2e51cd36144179fcf9 Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sun, 23 Apr 2023 09:17:18 +0100 Subject: [PATCH 092/222] Add missing test decorator (#2989) * Mark test test_generate_aiconfig_automatic_typical as @requires_api_key("OPENAI_API_KEY") * missing import * add missing decorator --- tests/unit/test_setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/test_setup.py b/tests/unit/test_setup.py index 1e718bf9..daf8524a 100644 --- a/tests/unit/test_setup.py +++ b/tests/unit/test_setup.py @@ -12,6 +12,7 @@ from tests.utils import requires_api_key class TestAutoGPT(unittest.TestCase): + @requires_api_key("OPENAI_API_KEY") def test_generate_aiconfig_automatic_default(self): user_inputs = [""] with patch("builtins.input", side_effect=user_inputs): From 4906e3d7ef2ffc0b35fb8c29774acd79f00c0ee6 Mon Sep 17 00:00:00 2001 From: non-adjective Date: Sun, 23 Apr 2023 01:17:42 -0700 Subject: [PATCH 093/222] update weaviate.py for weaviate compatibility (#2985) File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/weaviate/schema/crud_schema.py", line 708, in _create_class_with_primitives raise UnexpectedStatusCodeException("Create class", response) weaviate.exceptions.UnexpectedStatusCodeException: Create class! Unexpected status code: 422, with response body: {'error': [{'message': "'Auto-gpt' is not a valid class name"}]}. GPT4: The error message indicates that "Auto-gpt" is not a valid class name. In Weaviate, class names must start with a capital letter and can contain only alphanumeric characters. Took advice and code and applying to weaviate.py to great result, programs runs now with no error! Unable to reproduce easily. Might be related to switching memory between Local and Weaviate? Either way, the proposed solution works for MacOS using Docker + Weaviate. --- autogpt/memory/weaviate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py index 5408e9a9..0225ae04 100644 --- a/autogpt/memory/weaviate.py +++ b/autogpt/memory/weaviate.py @@ -51,6 +51,7 @@ class WeaviateMemory(MemoryProviderSingleton): # weaviate uses capitalised index names # The python client uses the following code to format # index names before the corresponding class is created + index = index.replace("-", "_") if len(index) == 1: return index.capitalize() return index[0].capitalize() + index[1:] From a38646409fb666d6fb515712c29f11c5af7a5f0b Mon Sep 17 00:00:00 2001 From: Didier Durand Date: Sun, 23 Apr 2023 10:26:54 +0200 Subject: [PATCH 094/222] Documentation: fixing typos (#2978) Fixing a couple of typos --- docs/configuration/memory.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md index 8ce713be..674a9aa3 100644 --- a/docs/configuration/memory.md +++ b/docs/configuration/memory.md @@ -152,7 +152,7 @@ Note that you can also use the `--file` argument to ingest a single file into me The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. -You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: +You can adjust the `max_length` and `overlap` parameters to fine-tune the way the documents are presented to the AI when it "recall" that memory: - Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. - Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks. - Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI. @@ -161,6 +161,6 @@ Memory pre-seeding is a technique for improving AI accuracy by ingesting relevan ⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file. -⚠️For other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. +⚠️For other memory backends, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backends, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. -Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. \ No newline at end of file +Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. From 582c85b1402a7c28fd9305ed97bcc7dd48eb80a2 Mon Sep 17 00:00:00 2001 From: Didier Durand Date: Sun, 23 Apr 2023 10:28:08 +0200 Subject: [PATCH 095/222] Documentation: ensuring naming consistency (#2975) auto gpt -> Auto-GPT to ensure naming consistency on the page --- docs/configuration/voice.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/configuration/voice.md b/docs/configuration/voice.md index a0b522b6..8c9ab854 100644 --- a/docs/configuration/voice.md +++ b/docs/configuration/voice.md @@ -6,7 +6,7 @@ Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT python -m autogpt --speak ``` -Eleven Labs provides voice technologies such as voice design, speech synthesis, and premade voices that auto gpt can use for speech. +Eleven Labs provides voice technologies such as voice design, speech synthesis, and premade voices that Auto-GPT can use for speech. 1. Go to [Eleven Labs](https://beta.elevenlabs.io/) and make an account if you don't already have one. 2. Choose and setup the `Starter` plan. @@ -26,4 +26,4 @@ In the `.env` file set: - Josh : TxGEqnHWrfWFTfGW9XjX - Arnold : VR6AewLTigWG4xSOukaG - Adam : pNInz6obpgDQGcFmaJgB -- Sam : yoZ06aMxZJJ28mfd3POQ \ No newline at end of file +- Sam : yoZ06aMxZJJ28mfd3POQ From 1c352f5ff0d63018906112b9f7197c61a0d19ead Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sun, 23 Apr 2023 16:42:12 +0100 Subject: [PATCH 096/222] update documentation deploy gh action --- .github/workflows/documentation.yml | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index e1fd422e..7a7fd784 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -11,16 +11,19 @@ on: # Allows you to run this workflow manually from the Actions tab workflow_dispatch: +permissions: + contents: write jobs: - build: + deploy: runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: - fetch-depth: 0 - - uses: actions/setup-python@v2 - - run: pip install --upgrade pip && pip install mkdocs mkdocs-gen-files - - run: git config user.name 'github-actions[bot]' && git config user.email 'github-actions[bot]@users.noreply.github.com' - - name: Publish docs - run: mkdocs gh-deploy + python-version: 3.x + - uses: actions/cache@v2 + with: + key: ${{ github.ref }} + path: .cache + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force From cac1ea27e20a81d2383417c331c26f6b16a12d40 Mon Sep 17 00:00:00 2001 From: chyezh Date: Mon, 24 Apr 2023 00:11:04 +0800 Subject: [PATCH 097/222] Support secure and authenticated Milvus memory backends (#2127) Co-authored-by: Reinier van der Leer (Pwuts) --- .env.template | 16 +++++--- autogpt/config/config.py | 5 ++- autogpt/memory/__init__.py | 4 +- autogpt/memory/milvus.py | 78 ++++++++++++++++++++++++++++-------- docs/configuration/memory.md | 38 +++++++++++++----- 5 files changed, 108 insertions(+), 33 deletions(-) diff --git a/.env.template b/.env.template index 60edecd6..1d9eefb3 100644 --- a/.env.template +++ b/.env.template @@ -52,7 +52,7 @@ OPENAI_API_KEY=your-openai-api-key ## local - Default ## pinecone - Pinecone (if configured) ## redis - Redis (if configured) -## milvus - Milvus (if configured) +## milvus - Milvus (if configured - also works with Zilliz) ## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt) # MEMORY_BACKEND=local # MEMORY_INDEX=auto-gpt @@ -93,10 +93,16 @@ OPENAI_API_KEY=your-openai-api-key # WEAVIATE_API_KEY= ### MILVUS -## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530) -## MILVUS_COLLECTION - Milvus collection, -## change it if you want to start a new memory and retain the old memory. -# MILVUS_ADDR=your-milvus-cluster-host-port +## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443) +## MILVUS_USERNAME - username for your Milvus database +## MILVUS_PASSWORD - password for your Milvus database +## MILVUS_SECURE - True to enable TLS. (Default: False) +## Setting MILVUS_ADDR to a `https://` URL will override this setting. +## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory. +# MILVUS_ADDR=localhost:19530 +# MILVUS_USERNAME= +# MILVUS_PASSWORD= +# MILVUS_SECURE= # MILVUS_COLLECTION=autogpt ################################################################################ diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 7ae362a3..b8981bee 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -85,9 +85,12 @@ class Config(metaclass=Singleton): os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" ) - # milvus configuration, e.g., localhost:19530. + # milvus or zilliz cloud configuration. self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530") + self.milvus_username = os.getenv("MILVUS_USERNAME") + self.milvus_password = os.getenv("MILVUS_PASSWORD") self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt") + self.milvus_secure = os.getenv("MILVUS_SECURE") == "True" self.image_provider = os.getenv("IMAGE_PROVIDER") self.image_size = int(os.getenv("IMAGE_SIZE", 256)) diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py index 3d18704c..c4eb4a07 100644 --- a/autogpt/memory/__init__.py +++ b/autogpt/memory/__init__.py @@ -69,8 +69,8 @@ def get_memory(cfg, init=False): elif cfg.memory_backend == "milvus": if not MilvusMemory: print( - "Error: Milvus sdk is not installed." - "Please install pymilvus to use Milvus as memory backend." + "Error: pymilvus sdk is not installed." + "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend." ) else: memory = MilvusMemory(cfg) diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index 44aa72b9..9a38a3ab 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -1,4 +1,7 @@ """ Milvus memory storage provider.""" +import re + +from config import Config from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding @@ -7,14 +10,66 @@ from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding class MilvusMemory(MemoryProviderSingleton): """Milvus memory storage provider.""" - def __init__(self, cfg) -> None: + def __init__(self, cfg: Config) -> None: """Construct a milvus memory storage connection. Args: cfg (Config): Auto-GPT global config. """ - # connect to milvus server. - connections.connect(address=cfg.milvus_addr) + self.configure(cfg) + + connect_kwargs = {} + if self.username: + connect_kwargs["user"] = self.username + connect_kwargs["password"] = self.password + + connections.connect( + **connect_kwargs, + uri=self.uri or "", + address=self.address or "", + secure=self.secure, + ) + + self.init_collection() + + def configure(self, cfg: Config) -> None: + # init with configuration. + self.uri = None + self.address = cfg.milvus_addr + self.secure = cfg.milvus_secure + self.username = cfg.milvus_username + self.password = cfg.milvus_password + self.collection_name = cfg.milvus_collection + # use HNSW by default. + self.index_params = { + "metric_type": "IP", + "index_type": "HNSW", + "params": {"M": 8, "efConstruction": 64}, + } + + if (self.username is None) != (self.password is None): + raise ValueError( + "Both username and password must be set to use authentication for Milvus" + ) + + # configured address may be a full URL. + if re.match(r"^(https?|tcp)://", self.address) is not None: + self.uri = self.address + self.address = None + + if self.uri.startswith("https"): + self.secure = True + + # Zilliz Cloud requires AutoIndex. + if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.address) is not None: + self.index_params = { + "metric_type": "IP", + "index_type": "AUTOINDEX", + "params": {}, + } + + def init_collection(self) -> None: + """Initialize collection in vector database.""" fields = [ FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True), FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536), @@ -22,19 +77,14 @@ class MilvusMemory(MemoryProviderSingleton): ] # create collection if not exist and load it. - self.milvus_collection = cfg.milvus_collection self.schema = CollectionSchema(fields, "auto-gpt memory storage") - self.collection = Collection(self.milvus_collection, self.schema) + self.collection = Collection(self.collection_name, self.schema) # create index if not exist. if not self.collection.has_index(): self.collection.release() self.collection.create_index( "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, + self.index_params, index_name="embeddings", ) self.collection.load() @@ -70,14 +120,10 @@ class MilvusMemory(MemoryProviderSingleton): str: log. """ self.collection.drop() - self.collection = Collection(self.milvus_collection, self.schema) + self.collection = Collection(self.collection_name, self.schema) self.collection.create_index( "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, + self.index_params, index_name="embeddings", ) self.collection.load() diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md index 674a9aa3..6fc80a75 100644 --- a/docs/configuration/memory.md +++ b/docs/configuration/memory.md @@ -15,7 +15,7 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that Links to memory backends - [Pinecone](https://www.pinecone.io/) -- [Milvus](https://milvus.io/) +- [Milvus](https://milvus.io/) – [self-hosted](https://milvus.io/docs), or managed with [Zilliz Cloud](https://zilliz.com/) - [Redis](https://redis.io) - [Weaviate](https://weaviate.io) @@ -80,17 +80,37 @@ export MEMORY_BACKEND="pinecone" ### Milvus Setup -[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search. +[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search. And it can be quickly deployed by docker locally or as a cloud service provided by [Zilliz Cloud](https://zilliz.com/). -- setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues. - - setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md) - - or setup by [Zilliz Cloud](https://zilliz.com/cloud) -- set `MILVUS_ADDR` in `.env` to your milvus address `host:ip`. -- set `MEMORY_BACKEND` in `.env` to `milvus` to enable milvus as backend. +1. Deploy your Milvus service, either locally using docker or with a managed Zilliz Cloud database. + - [Install and deploy Milvus locally](https://milvus.io/docs/install_standalone-operator.md) -**Optional:** -- set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name. + -
Set up a managed Zilliz Cloud database (click to expand) + 1. Go to [Zilliz Cloud](https://zilliz.com/) and sign up if you don't already have account. + 2. In the *Databases* tab, create a new database. + - Remember your username and password + - Wait until the database status is changed to RUNNING. + 3. In the *Database detail* tab of the database you have created, the public cloud endpoint, such as: + `https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443`. +
+ +2. Run `pip3 install pymilvus` to install the required client library. + Make sure your PyMilvus version and Milvus version are [compatible](https://github.com/milvus-io/pymilvus#compatibility) to avoid issues. + See also the [PyMilvus installation instructions](https://github.com/milvus-io/pymilvus#installation). + +3. Update `.env` + - `MEMORY_BACKEND=milvus` + - One of: + - `MILVUS_ADDR=host:ip` (for local instance) + - `MILVUS_ADDR=https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443` (for Zilliz Cloud) + + *The following settings are **optional**:* + - Set `MILVUS_USERNAME='username-of-your-milvus-instance'` + - Set `MILVUS_PASSWORD='password-of-your-milvus-instance'` + - Set `MILVUS_SECURE=True` to use a secure connection. Only use if your Milvus instance has TLS enabled. + Setting `MILVUS_ADDR` to a `https://` URL will override this setting. + - Set `MILVUS_COLLECTION` if you want to change the collection name to use in Milvus. Defaults to `autogpt`. ### Weaviate Setup [Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store data objects and vector embeddings from ML-models and scales seamlessly to billion of data objects. [An instance of Weaviate can be created locally (using Docker), on Kubernetes or using Weaviate Cloud Services](https://weaviate.io/developers/weaviate/quickstart). From da48f9c972492f6ed7ae0c05813ee009d9438a7c Mon Sep 17 00:00:00 2001 From: chyezh Date: Mon, 24 Apr 2023 00:32:17 +0800 Subject: [PATCH 098/222] Fix Milvus module config import (#3036) Co-authored-by: Reinier van der Leer --- autogpt/memory/milvus.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index 9a38a3ab..1849a9e6 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -1,9 +1,9 @@ """ Milvus memory storage provider.""" import re -from config import Config from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections +from autogpt.config import Config from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding From dcd6aa912b992d7c024ea1b668d3249b201b0c2c Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 23 Apr 2023 12:36:04 -0700 Subject: [PATCH 099/222] Add workspace abstraction (#2982) * Add workspace abstraction * Remove old workspace implementation * Extract path resolution to a helper function * Add api key requirements to new tests --- autogpt/agent/agent.py | 17 +++ autogpt/cli.py | 36 +++++- autogpt/commands/audio_text.py | 6 +- autogpt/commands/command.py | 3 + autogpt/commands/execute_code.py | 26 ++-- autogpt/commands/file_operations.py | 40 ++---- autogpt/commands/git_operations.py | 6 +- autogpt/commands/image_gen.py | 9 +- autogpt/config/config.py | 3 + autogpt/workspace.py | 48 ------- autogpt/workspace/__init__.py | 5 + autogpt/workspace/workspace.py | 120 ++++++++++++++++++ tests/conftest.py | 16 +++ .../goal_oriented/test_write_file.py | 30 ++--- tests/test_image_gen.py | 30 +++-- tests/test_workspace.py | 86 +++++++++++++ tests/unit/test_file_operations.py | 90 +++++-------- tests/unit/test_setup.py | 2 + tests/utils.py | 2 + 19 files changed, 379 insertions(+), 196 deletions(-) delete mode 100644 autogpt/workspace.py create mode 100644 autogpt/workspace/__init__.py create mode 100644 autogpt/workspace/workspace.py create mode 100644 tests/test_workspace.py diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 189338f5..8c2bfb74 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -9,6 +9,7 @@ from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner from autogpt.utils import clean_input +from autogpt.workspace import Workspace class Agent: @@ -50,7 +51,9 @@ class Agent: config, system_prompt, triggering_prompt, + workspace_directory, ): + cfg = Config() self.ai_name = ai_name self.memory = memory self.full_message_history = full_message_history @@ -59,6 +62,7 @@ class Agent: self.config = config self.system_prompt = system_prompt self.triggering_prompt = triggering_prompt + self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace) def start_interaction_loop(self): # Interaction Loop @@ -107,6 +111,8 @@ class Agent: command_name, arguments = get_command(assistant_reply_json) if cfg.speak_mode: say_text(f"I want to execute {command_name}") + arguments = self._resolve_pathlike_command_args(arguments) + except Exception as e: logger.error("Error: \n", str(e)) @@ -226,3 +232,14 @@ class Agent: logger.typewriter_log( "SYSTEM: ", Fore.YELLOW, "Unable to execute command" ) + + def _resolve_pathlike_command_args(self, command_args): + if "directory" in command_args and command_args["directory"] in {"", "/"}: + command_args["directory"] = str(self.workspace.root) + else: + for pathlike in ["filename", "directory", "clone_path"]: + if pathlike in command_args: + command_args[pathlike] = str( + self.workspace.get_path(command_args[pathlike]) + ) + return command_args diff --git a/autogpt/cli.py b/autogpt/cli.py index 51a946a7..75908a1e 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -47,6 +47,14 @@ import click is_flag=True, help="Specifies whether to suppress the output of latest news on startup.", ) +@click.option( + # TODO: this is a hidden option for now, necessary for integration testing. + # We should make this public once we're ready to roll out agent specific workspaces. + "--workspace-directory", + "-w", + type=click.Path(), + hidden=True, +) @click.pass_context def main( ctx: click.Context, @@ -62,6 +70,7 @@ def main( browser_name: str, allow_downloads: bool, skip_news: bool, + workspace_directory: str, ) -> None: """ Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI. @@ -71,6 +80,7 @@ def main( # Put imports inside function to avoid importing everything when starting the CLI import logging import sys + from pathlib import Path from colorama import Fore @@ -83,6 +93,7 @@ def main( from autogpt.plugins import scan_plugins from autogpt.prompts.prompt import construct_main_ai_config from autogpt.utils import get_current_git_branch, get_latest_bulletin + from autogpt.workspace import Workspace if ctx.invoked_subcommand is None: cfg = Config() @@ -103,7 +114,6 @@ def main( skip_news, ) logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) - ai_name = "" if not cfg.skip_news: motd = get_latest_bulletin() if motd: @@ -126,7 +136,6 @@ def main( "Please consider upgrading to Python 3.10 or higher.", ) - cfg = Config() cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) # Create a CommandRegistry instance and scan default folder command_registry = CommandRegistry() @@ -142,6 +151,7 @@ def main( command_registry.import_commands("autogpt.commands.web_selenium") command_registry.import_commands("autogpt.commands.write_tests") command_registry.import_commands("autogpt.app") + ai_name = "" ai_config = construct_main_ai_config() ai_config.command_registry = command_registry @@ -164,6 +174,27 @@ def main( system_prompt = ai_config.construct_full_prompt() if cfg.debug_mode: logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt) + + # TODO: have this directory live outside the repository (e.g. in a user's + # home directory) and have it come in as a command line argument or part of + # the env file. + if workspace_directory is None: + workspace_directory = Path(__file__).parent / "auto_gpt_workspace" + else: + workspace_directory = Path(workspace_directory) + # TODO: pass in the ai_settings file and the env file and have them cloned into + # the workspace directory so we can bind them to the agent. + workspace_directory = Workspace.make_workspace(workspace_directory) + cfg.workspace_path = str(workspace_directory) + + # HACK: doing this here to collect some globals that depend on the workspace. + file_logger_path = workspace_directory / "file_logger.txt" + if not file_logger_path.exists(): + with file_logger_path.open(mode="w", encoding="utf-8") as f: + f.write("File Operation Logger ") + + cfg.file_logger_path = str(file_logger_path) + agent = Agent( ai_name=ai_name, memory=memory, @@ -173,6 +204,7 @@ def main( config=ai_config, system_prompt=system_prompt, triggering_prompt=triggering_prompt, + workspace_directory=workspace_directory, ) agent.start_interaction_loop() diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py index b409fefd..0a8640cf 100644 --- a/autogpt/commands/audio_text.py +++ b/autogpt/commands/audio_text.py @@ -5,7 +5,6 @@ import requests from autogpt.commands.command import command from autogpt.config import Config -from autogpt.workspace import path_in_workspace CFG = Config() @@ -22,13 +21,12 @@ def read_audio_from_file(filename: str) -> str: Convert audio to text. Args: - audio_path (str): The path to the audio file + filename (str): The path to the audio file Returns: str: The text from the audio """ - audio_path = path_in_workspace(filename) - with open(audio_path, "rb") as audio_file: + with open(filename, "rb") as audio_file: audio = audio_file.read() return read_audio(audio) diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py index e97af008..22ebace5 100644 --- a/autogpt/commands/command.py +++ b/autogpt/commands/command.py @@ -1,3 +1,4 @@ +import functools import importlib import inspect from typing import Any, Callable, Optional @@ -142,12 +143,14 @@ def command( disabled_reason=disabled_reason, ) + @functools.wraps(func) def wrapper(*args, **kwargs) -> Any: return func(*args, **kwargs) wrapper.command = cmd setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True) + return wrapper return decorator diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index ff35d428..71c1bd2e 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -7,7 +7,6 @@ from docker.errors import ImageNotFound from autogpt.commands.command import command from autogpt.config import Config -from autogpt.workspace import WORKSPACE_PATH, path_in_workspace CFG = Config() @@ -22,20 +21,17 @@ def execute_python_file(filename: str) -> str: Returns: str: The output of the file """ - file = filename - print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'") + print(f"Executing file '{filename}'") - if not file.endswith(".py"): + if not filename.endswith(".py"): return "Error: Invalid file type. Only .py files are allowed." - file_path = path_in_workspace(file) - - if not os.path.isfile(file_path): - return f"Error: File '{file}' does not exist." + if not os.path.isfile(filename): + return f"Error: File '{filename}' does not exist." if we_are_running_in_a_docker_container(): result = subprocess.run( - f"python {file_path}", capture_output=True, encoding="utf8", shell=True + f"python {filename}", capture_output=True, encoding="utf8", shell=True ) if result.returncode == 0: return result.stdout @@ -67,9 +63,9 @@ def execute_python_file(filename: str) -> str: container = client.containers.run( image_name, - f"python {file}", + f"python {filename}", volumes={ - os.path.abspath(WORKSPACE_PATH): { + CFG.workspace_path: { "bind": "/workspace", "mode": "ro", } @@ -126,8 +122,8 @@ def execute_shell(command_line: str) -> str: ) current_dir = os.getcwd() # Change dir into workspace if necessary - if str(WORKSPACE_PATH) not in current_dir: - os.chdir(WORKSPACE_PATH) + if CFG.workspace_path not in current_dir: + os.chdir(CFG.workspace_path) print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") @@ -160,8 +156,8 @@ def execute_shell_popen(command_line) -> str: """ current_dir = os.getcwd() # Change dir into workspace if necessary - if str(WORKSPACE_PATH) not in current_dir: - os.chdir(WORKSPACE_PATH) + if CFG.workspace_path not in current_dir: + os.chdir(CFG.workspace_path) print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index cb2d80e1..9999fccf 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -13,11 +13,8 @@ from autogpt.commands.command import command from autogpt.config import Config from autogpt.spinner import Spinner from autogpt.utils import readable_file_size -from autogpt.workspace import WORKSPACE_PATH, path_in_workspace CFG = Config() -LOG_FILE = "file_logger.txt" -LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE def check_duplicate_operation(operation: str, filename: str) -> bool: @@ -30,7 +27,7 @@ def check_duplicate_operation(operation: str, filename: str) -> bool: Returns: bool: True if the operation has already been performed on the file """ - log_content = read_file(LOG_FILE) + log_content = read_file(CFG.file_logger_path) log_entry = f"{operation}: {filename}\n" return log_entry in log_content @@ -43,12 +40,7 @@ def log_operation(operation: str, filename: str) -> None: filename (str): The name of the file the operation was performed on """ log_entry = f"{operation}: {filename}\n" - - # Create the log file if it doesn't exist - if not os.path.exists(LOG_FILE_PATH): - with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: - f.write("File Operation Logger ") - append_to_file(str(LOG_FILE_PATH), log_entry, should_log=False) + append_to_file(CFG.file_logger_path, log_entry, should_log=False) def split_file( @@ -93,9 +85,8 @@ def read_file(filename: str) -> str: Returns: str: The contents of the file """ - filepath = path_in_workspace(filename) try: - with open(filepath, "r", encoding="utf-8") as f: + with open(filename, "r", encoding="utf-8") as f: content = f.read() return content except Exception as e: @@ -150,11 +141,10 @@ def write_to_file(filename: str, text: str) -> str: if check_duplicate_operation("write", filename): return "Error: File has already been updated." try: - filepath = path_in_workspace(filename) - directory = os.path.dirname(filepath) + directory = os.path.dirname(filename) if not os.path.exists(directory): os.makedirs(directory) - with open(filepath, "w", encoding="utf-8") as f: + with open(filename, "w", encoding="utf-8") as f: f.write(text) log_operation("write", filename) return "File written to successfully." @@ -177,8 +167,7 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str: str: A message indicating success or failure """ try: - filepath = path_in_workspace(filename) - with open(filepath, "a") as f: + with open(filename, "a") as f: f.write(text) if should_log: @@ -202,8 +191,7 @@ def delete_file(filename: str) -> str: if check_duplicate_operation("delete", filename): return "Error: File has already been deleted." try: - filepath = path_in_workspace(filename) - os.remove(filepath) + os.remove(filename) log_operation("delete", filename) return "File deleted successfully." except Exception as e: @@ -222,16 +210,13 @@ def search_files(directory: str) -> list[str]: """ found_files = [] - if directory in {"", "/"}: - search_directory = WORKSPACE_PATH - else: - search_directory = path_in_workspace(directory) - - for root, _, files in os.walk(search_directory): + for root, _, files in os.walk(directory): for file in files: if file.startswith("."): continue - relative_path = os.path.relpath(os.path.join(root, file), WORKSPACE_PATH) + relative_path = os.path.relpath( + os.path.join(root, file), CFG.workspace_path + ) found_files.append(relative_path) return found_files @@ -250,7 +235,6 @@ def download_file(url, filename): url (str): URL of the file to download filename (str): Filename to save the file as """ - safe_filename = path_in_workspace(filename) try: message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}" with Spinner(message) as spinner: @@ -268,7 +252,7 @@ def download_file(url, filename): total_size = int(r.headers.get("Content-Length", 0)) downloaded_size = 0 - with open(safe_filename, "wb") as f: + with open(filename, "wb") as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) downloaded_size += len(chunk) diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 1a6beb95..c373b8c0 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -3,7 +3,6 @@ from git.repo import Repo from autogpt.commands.command import command from autogpt.config import Config -from autogpt.workspace import path_in_workspace CFG = Config() @@ -27,9 +26,8 @@ def clone_repository(repository_url: str, clone_path: str) -> str: """ split_url = repository_url.split("//") auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) - safe_clone_path = path_in_workspace(clone_path) try: - Repo.clone_from(auth_repo_url, safe_clone_path) - return f"""Cloned {repository_url} to {safe_clone_path}""" + Repo.clone_from(auth_repo_url, clone_path) + return f"""Cloned {repository_url} to {clone_path}""" except Exception as e: return f"Error: {str(e)}" diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 962c41fb..834432c5 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -9,7 +9,6 @@ from PIL import Image from autogpt.commands.command import command from autogpt.config import Config -from autogpt.workspace import path_in_workspace CFG = Config() @@ -25,7 +24,7 @@ def generate_image(prompt: str, size: int = 256) -> str: Returns: str: The filename of the image """ - filename = f"{str(uuid.uuid4())}.jpg" + filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg" # DALL-E if CFG.image_provider == "dalle": @@ -72,7 +71,7 @@ def generate_image_with_hf(prompt: str, filename: str) -> str: image = Image.open(io.BytesIO(response.content)) print(f"Image Generated for prompt:{prompt}") - image.save(path_in_workspace(filename)) + image.save(filename) return f"Saved to disk:{filename}" @@ -109,7 +108,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: image_data = b64decode(response["data"][0]["b64_json"]) - with open(path_in_workspace(filename), mode="wb") as png: + with open(filename, mode="wb") as png: png.write(image_data) return f"Saved to disk:{filename}" @@ -160,6 +159,6 @@ def generate_image_with_sd_webui( response = response.json() b64 = b64decode(response["images"][0].split(",", 1)[0]) image = Image.open(io.BytesIO(b64)) - image.save(path_in_workspace(filename)) + image.save(filename) return f"Saved to disk:{filename}" diff --git a/autogpt/config/config.py b/autogpt/config/config.py index b8981bee..7fa849e5 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -20,6 +20,9 @@ class Config(metaclass=Singleton): def __init__(self) -> None: """Initialize the Config class""" + self.workspace_path = None + self.file_logger_path = None + self.debug_mode = False self.continuous_mode = False self.continuous_limit = 0 diff --git a/autogpt/workspace.py b/autogpt/workspace.py deleted file mode 100644 index 724f8443..00000000 --- a/autogpt/workspace.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import annotations - -import os -from pathlib import Path - -from autogpt.config import Config - -CFG = Config() - -# Set a dedicated folder for file I/O -WORKSPACE_PATH = Path(os.getcwd()) / "auto_gpt_workspace" - -# Create the directory if it doesn't exist -if not os.path.exists(WORKSPACE_PATH): - os.makedirs(WORKSPACE_PATH) - - -def path_in_workspace(relative_path: str | Path) -> Path: - """Get full path for item in workspace - - Parameters: - relative_path (str | Path): Path to translate into the workspace - - Returns: - Path: Absolute path for the given path in the workspace - """ - return safe_path_join(WORKSPACE_PATH, relative_path) - - -def safe_path_join(base: Path, *paths: str | Path) -> Path: - """Join one or more path components, asserting the resulting path is within the workspace. - - Args: - base (Path): The base path - *paths (str): The paths to join to the base path - - Returns: - Path: The joined path - """ - base = base.resolve() - joined_path = base.joinpath(*paths).resolve() - - if CFG.restrict_to_workspace and not joined_path.is_relative_to(base): - raise ValueError( - f"Attempted to access path '{joined_path}' outside of workspace '{base}'." - ) - - return joined_path diff --git a/autogpt/workspace/__init__.py b/autogpt/workspace/__init__.py new file mode 100644 index 00000000..b348144b --- /dev/null +++ b/autogpt/workspace/__init__.py @@ -0,0 +1,5 @@ +from autogpt.workspace.workspace import Workspace + +__all__ = [ + "Workspace", +] diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py new file mode 100644 index 00000000..b06fa9eb --- /dev/null +++ b/autogpt/workspace/workspace.py @@ -0,0 +1,120 @@ +""" +========= +Workspace +========= + +The workspace is a directory containing configuration and working files for an AutoGPT +agent. + +""" +from __future__ import annotations + +from pathlib import Path + + +class Workspace: + """A class that represents a workspace for an AutoGPT agent.""" + + def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool): + self._root = self._sanitize_path(workspace_root) + self._restrict_to_workspace = restrict_to_workspace + + @property + def root(self) -> Path: + """The root directory of the workspace.""" + return self._root + + @property + def restrict_to_workspace(self): + """Whether to restrict generated paths to the workspace.""" + return self._restrict_to_workspace + + @classmethod + def make_workspace(cls, workspace_directory: str | Path, *args, **kwargs) -> Path: + """Create a workspace directory and return the path to it. + + Parameters + ---------- + workspace_directory + The path to the workspace directory. + + Returns + ------- + Path + The path to the workspace directory. + + """ + # TODO: have this make the env file and ai settings file in the directory. + workspace_directory = cls._sanitize_path(workspace_directory) + workspace_directory.mkdir(exist_ok=True, parents=True) + return workspace_directory + + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the workspace. + + Parameters + ---------- + relative_path + The relative path to resolve in the workspace. + + Returns + ------- + Path + The resolved path relative to the workspace. + + """ + return self._sanitize_path( + relative_path, + root=self.root, + restrict_to_root=self.restrict_to_workspace, + ) + + @staticmethod + def _sanitize_path( + relative_path: str | Path, + root: str | Path = None, + restrict_to_root: bool = True, + ) -> Path: + """Resolve the relative path within the given root if possible. + + Parameters + ---------- + relative_path + The relative path to resolve. + root + The root path to resolve the relative path within. + restrict_to_root + Whether to restrict the path to the root. + + Returns + ------- + Path + The resolved path. + + Raises + ------ + ValueError + If the path is absolute and a root is provided. + ValueError + If the path is outside the root and the root is restricted. + + """ + + if root is None: + return Path(relative_path).resolve() + + root, relative_path = Path(root), Path(relative_path) + + if relative_path.is_absolute(): + raise ValueError( + f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." + ) + + full_path = root.joinpath(relative_path).resolve() + + if restrict_to_root and not full_path.is_relative_to(root): + raise ValueError( + f"Attempted to access path '{full_path}' outside of workspace '{root}'." + ) + + return full_path diff --git a/tests/conftest.py b/tests/conftest.py index bf6bd6c5..99ccb940 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,19 @@ +from pathlib import Path + +import pytest from dotenv import load_dotenv +from autogpt.workspace import Workspace + load_dotenv() + + +@pytest.fixture() +def workspace_root(tmp_path) -> Path: + return tmp_path / "home/users/monty/auto_gpt_workspace" + + +@pytest.fixture() +def workspace(workspace_root: Path) -> Workspace: + workspace_root = Workspace.make_workspace(workspace_root) + return Workspace(workspace_root, restrict_to_workspace=True) diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index d995c7a3..053d6e08 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -6,12 +6,9 @@ import vcr from autogpt.agent import Agent from autogpt.commands.command import CommandRegistry -from autogpt.commands.file_operations import LOG_FILE, delete_file, read_file -from autogpt.config import AIConfig, Config, check_openai_api_key +from autogpt.commands.file_operations import delete_file, read_file +from autogpt.config import AIConfig, Config from autogpt.memory import get_memory - -# from autogpt.prompt import Prompt -from autogpt.workspace import WORKSPACE_PATH from tests.integration.goal_oriented.vcr_helper import before_record_request from tests.utils import requires_api_key @@ -28,19 +25,12 @@ CFG = Config() @requires_api_key("OPENAI_API_KEY") -def test_write_file() -> None: - # if file exist - file_name = "hello_world.txt" +def test_write_file(workspace) -> None: + CFG.workspace_path = workspace.root + CFG.file_logger_path = os.path.join(workspace.root, "file_logger.txt") - file_path_to_write_into = f"{WORKSPACE_PATH}/{file_name}" - if os.path.exists(file_path_to_write_into): - os.remove(file_path_to_write_into) - file_logger_path = f"{WORKSPACE_PATH}/{LOG_FILE}" - if os.path.exists(file_logger_path): - os.remove(file_logger_path) - - delete_file(file_name) - agent = create_writer_agent() + file_name = str(workspace.get_path("hello_world.txt")) + agent = create_writer_agent(workspace) try: with my_vcr.use_cassette( "write_file.vcr.yml", @@ -58,14 +48,11 @@ def test_write_file() -> None: assert False, "The process took longer than 45 seconds to complete." # catch system exit exceptions except SystemExit: # the agent returns an exception when it shuts down - content = "" content = read_file(file_name) - os.remove(file_path_to_write_into) - assert content == "Hello World", f"Expected 'Hello World', got {content}" -def create_writer_agent(): +def create_writer_agent(workspace): command_registry = CommandRegistry() command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.app") @@ -96,6 +83,7 @@ def create_writer_agent(): next_action_count=0, system_prompt=system_prompt, triggering_prompt=triggering_prompt, + workspace_directory=workspace.root, ) CFG.set_continuous_mode(True) CFG.set_memory_backend("no_memory") diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index 58b8337f..d29d5aa7 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -1,39 +1,46 @@ import hashlib import os +import shutil import unittest +from pathlib import Path from PIL import Image from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui from autogpt.config import Config -from autogpt.workspace import path_in_workspace +from autogpt.workspace import Workspace from tests.utils import requires_api_key def lst(txt): - return txt.split(":")[1].strip() + return Path(txt.split(":")[1].strip()) -@unittest.skipIf(os.getenv("CI"), "Skipping image generation tests") +@unittest.skip("Skipping image generation tests") class TestImageGen(unittest.TestCase): def setUp(self): self.config = Config() + workspace_path = os.path.join(os.path.dirname(__file__), "workspace") + self.workspace_path = Workspace.make_workspace(workspace_path) + self.config.workspace_path = workspace_path + self.workspace = Workspace(workspace_path, restrict_to_workspace=True) + + def tearDown(self) -> None: + shutil.rmtree(self.workspace_path) @requires_api_key("OPENAI_API_KEY") def test_dalle(self): self.config.image_provider = "dalle" # Test using size 256 - result = lst(generate_image("astronaut riding a horse", 256)) - image_path = path_in_workspace(result) + image_path = lst(generate_image("astronaut riding a horse", 256)) self.assertTrue(image_path.exists()) with Image.open(image_path) as img: self.assertEqual(img.size, (256, 256)) image_path.unlink() # Test using size 512 - result = lst(generate_image("astronaut riding a horse", 512)) - image_path = path_in_workspace(result) + image_path = lst(generate_image("astronaut riding a horse", 512)) with Image.open(image_path) as img: self.assertEqual(img.size, (512, 512)) image_path.unlink() @@ -44,8 +51,7 @@ class TestImageGen(unittest.TestCase): # Test usin SD 1.4 model and size 512 self.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - result = lst(generate_image("astronaut riding a horse", 512)) - image_path = path_in_workspace(result) + image_path = lst(generate_image("astronaut riding a horse", 512)) self.assertTrue(image_path.exists()) with Image.open(image_path) as img: self.assertEqual(img.size, (512, 512)) @@ -53,8 +59,7 @@ class TestImageGen(unittest.TestCase): # Test using SD 2.1 768 model and size 768 self.config.huggingface_image_model = "stabilityai/stable-diffusion-2-1" - result = lst(generate_image("astronaut riding a horse", 768)) - image_path = path_in_workspace(result) + image_path = lst(generate_image("astronaut riding a horse", 768)) with Image.open(image_path) as img: self.assertEqual(img.size, (768, 768)) image_path.unlink() @@ -64,8 +69,7 @@ class TestImageGen(unittest.TestCase): return # Test using size 128 - result = lst(generate_image_with_sd_webui("astronaut riding a horse", 128)) - image_path = path_in_workspace(result) + image_path = lst(generate_image_with_sd_webui("astronaut riding a horse", 128)) self.assertTrue(image_path.exists()) with Image.open(image_path) as img: self.assertEqual(img.size, (128, 128)) diff --git a/tests/test_workspace.py b/tests/test_workspace.py new file mode 100644 index 00000000..33c096d3 --- /dev/null +++ b/tests/test_workspace.py @@ -0,0 +1,86 @@ +from pathlib import Path + +import pytest + +from autogpt.workspace import Workspace + +_WORKSPACE_ROOT = Path("home/users/monty/auto_gpt_workspace") + +_ACCESSIBLE_PATHS = [ + Path("."), + Path("test_file.txt"), + Path("test_folder"), + Path("test_folder/test_file.txt"), + Path("test_folder/.."), + Path("test_folder/../test_file.txt"), + Path("test_folder/../test_folder"), + Path("test_folder/../test_folder/test_file.txt"), +] + +_INACCESSIBLE_PATHS = [ + # Takes us out of the workspace + Path(".."), + Path("../test_file.txt"), + Path("../not_auto_gpt_workspace"), + Path("../not_auto_gpt_workspace/test_file.txt"), + Path("test_folder/../.."), + Path("test_folder/../../test_file.txt"), + Path("test_folder/../../not_auto_gpt_workspace"), + Path("test_folder/../../not_auto_gpt_workspace/test_file.txt"), + # Contains null bytes + Path("\x00"), + Path("\x00test_file.txt"), + Path("test_folder/\x00"), + Path("test_folder/\x00test_file.txt"), + # Absolute paths + Path("/"), + Path("/test_file.txt"), + Path("/home"), +] + + +@pytest.fixture() +def workspace_root(tmp_path): + return tmp_path / _WORKSPACE_ROOT + + +@pytest.fixture(params=_ACCESSIBLE_PATHS) +def accessible_path(request): + return request.param + + +@pytest.fixture(params=_INACCESSIBLE_PATHS) +def inaccessible_path(request): + return request.param + + +def test_sanitize_path_accessible(accessible_path, workspace_root): + full_path = Workspace._sanitize_path( + accessible_path, + root=workspace_root, + restrict_to_root=True, + ) + assert full_path.is_absolute() + assert full_path.is_relative_to(workspace_root) + + +def test_sanitize_path_inaccessible(inaccessible_path, workspace_root): + with pytest.raises(ValueError): + Workspace._sanitize_path( + inaccessible_path, + root=workspace_root, + restrict_to_root=True, + ) + + +def test_get_path_accessible(accessible_path, workspace_root): + workspace = Workspace(workspace_root, True) + full_path = workspace.get_path(accessible_path) + assert full_path.is_absolute() + assert full_path.is_relative_to(workspace_root) + + +def test_get_path_inaccessible(inaccessible_path, workspace_root): + workspace = Workspace(workspace_root, True) + with pytest.raises(ValueError): + workspace.get_path(inaccessible_path) diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index dfcde571..35f9b91a 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -4,7 +4,6 @@ import unittest from pathlib import Path from autogpt.commands.file_operations import ( - LOG_FILE_PATH, append_to_file, check_duplicate_operation, delete_file, @@ -15,7 +14,7 @@ from autogpt.commands.file_operations import ( write_to_file, ) from autogpt.config import Config -from autogpt.workspace import path_in_workspace +from autogpt.workspace import Workspace class TestFileOperations(unittest.TestCase): @@ -24,24 +23,24 @@ class TestFileOperations(unittest.TestCase): """ def setUp(self): - self.test_file = "test_file.txt" + self.config = Config() + workspace_path = os.path.join(os.path.dirname(__file__), "workspace") + self.workspace_path = Workspace.make_workspace(workspace_path) + self.config.workspace_path = workspace_path + self.config.file_logger_path = os.path.join(workspace_path, "file_logger.txt") + self.workspace = Workspace(workspace_path, restrict_to_workspace=True) + + self.test_file = str(self.workspace.get_path("test_file.txt")) self.test_file2 = "test_file2.txt" - self.test_directory = "test_directory" + self.test_directory = str(self.workspace.get_path("test_directory")) self.file_content = "This is a test file.\n" self.file_logger_logs = "file_logger.txt" - with open(path_in_workspace(self.test_file), "w") as f: + with open(self.test_file, "w") as f: f.write(self.file_content) - if os.path.exists(LOG_FILE_PATH): - os.remove(LOG_FILE_PATH) - - def tearDown(self): - if os.path.exists(path_in_workspace(self.test_file)): - os.remove(path_in_workspace(self.test_file)) - - if os.path.exists(self.test_directory): - shutil.rmtree(self.test_directory) + def tearDown(self) -> None: + shutil.rmtree(self.workspace_path) def test_check_duplicate_operation(self): log_operation("write", self.test_file) @@ -53,9 +52,9 @@ class TestFileOperations(unittest.TestCase): os.remove(self.file_logger_logs) log_operation("log_test", self.test_file) - with open(LOG_FILE_PATH, "r") as f: + with open(self.config.file_logger_path, "r") as f: content = f.read() - self.assertIn("log_test: test_file.txt", content) + self.assertIn(f"log_test: {self.test_file}", content) # Test splitting a file into chunks def test_split_file(self): @@ -71,80 +70,59 @@ class TestFileOperations(unittest.TestCase): def test_write_to_file(self): new_content = "This is new content.\n" write_to_file(self.test_file, new_content) - with open(path_in_workspace(self.test_file), "r") as f: + with open(self.test_file, "r") as f: content = f.read() self.assertEqual(content, new_content) def test_append_to_file(self): - with open(path_in_workspace(self.test_file), "r") as f: + with open(self.test_file, "r") as f: content_before = f.read() append_text = "This is appended text.\n" append_to_file(self.test_file, append_text) - with open(path_in_workspace(self.test_file), "r") as f: + with open(self.test_file, "r") as f: content = f.read() self.assertEqual(content, content_before + append_text) def test_delete_file(self): delete_file(self.test_file) - self.assertFalse(os.path.exists(path_in_workspace(self.test_file))) + self.assertFalse(os.path.exists(self.test_file)) def test_search_files(self): # Case 1: Create files A and B, search for A, and ensure we don't return A and B - file_a = "file_a.txt" - file_b = "file_b.txt" + file_a = self.workspace.get_path("file_a.txt") + file_b = self.workspace.get_path("file_b.txt") - with open(path_in_workspace(file_a), "w") as f: + with open(file_a, "w") as f: f.write("This is file A.") - with open(path_in_workspace(file_b), "w") as f: + with open(file_b, "w") as f: f.write("This is file B.") # Create a subdirectory and place a copy of file_a in it - if not os.path.exists(path_in_workspace(self.test_directory)): - os.makedirs(path_in_workspace(self.test_directory)) + if not os.path.exists(self.test_directory): + os.makedirs(self.test_directory) - with open( - path_in_workspace(os.path.join(self.test_directory, file_a)), "w" - ) as f: + with open(os.path.join(self.test_directory, file_a.name), "w") as f: f.write("This is file A in the subdirectory.") - files = search_files(path_in_workspace("")) - self.assertIn(file_a, files) - self.assertIn(file_b, files) - self.assertIn(os.path.join(self.test_directory, file_a), files) + files = search_files(str(self.workspace.root)) + self.assertIn(file_a.name, files) + self.assertIn(file_b.name, files) + self.assertIn(f"{Path(self.test_directory).name}/{file_a.name}", files) # Clean up - os.remove(path_in_workspace(file_a)) - os.remove(path_in_workspace(file_b)) - os.remove(path_in_workspace(os.path.join(self.test_directory, file_a))) - os.rmdir(path_in_workspace(self.test_directory)) + os.remove(file_a) + os.remove(file_b) + os.remove(os.path.join(self.test_directory, file_a.name)) + os.rmdir(self.test_directory) # Case 2: Search for a file that does not exist and make sure we don't throw non_existent_file = "non_existent_file.txt" files = search_files("") self.assertNotIn(non_existent_file, files) - # Test to ensure we cannot read files out of workspace - def test_restrict_workspace(self): - CFG = Config() - with open(self.test_file2, "w+") as f: - f.write("test text") - - CFG.restrict_to_workspace = True - - # Get the absolute path of self.test_file2 - test_file2_abs_path = os.path.abspath(self.test_file2) - - with self.assertRaises(ValueError): - read_file(test_file2_abs_path) - - CFG.restrict_to_workspace = False - read_file(test_file2_abs_path) - - os.remove(test_file2_abs_path) - if __name__ == "__main__": unittest.main() diff --git a/tests/unit/test_setup.py b/tests/unit/test_setup.py index daf8524a..c922ff9c 100644 --- a/tests/unit/test_setup.py +++ b/tests/unit/test_setup.py @@ -35,6 +35,7 @@ class TestAutoGPT(unittest.TestCase): self.assertGreaterEqual(len(ai_config.ai_goals), 1) self.assertLessEqual(len(ai_config.ai_goals), 5) + @requires_api_key("OPENAI_API_KEY") def test_generate_aiconfig_automatic_fallback(self): user_inputs = [ "T&GF£OIBECC()!*", @@ -52,6 +53,7 @@ class TestAutoGPT(unittest.TestCase): self.assertEqual(ai_config.ai_role, "an AI designed to browse bake a cake.") self.assertEqual(ai_config.ai_goals, ["Purchase ingredients", "Bake a cake"]) + @requires_api_key("OPENAI_API_KEY") def test_prompt_user_manual_mode(self): user_inputs = [ "--manual", diff --git a/tests/utils.py b/tests/utils.py index f042974c..8c72f1fa 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,3 +1,4 @@ +import functools import os import pytest @@ -5,6 +6,7 @@ import pytest def requires_api_key(env_var): def decorator(func): + @functools.wraps(func) def wrapper(*args, **kwargs): if not os.environ.get(env_var): pytest.skip( From bf895eb656dee9084273cd36395828bd06aa231d Mon Sep 17 00:00:00 2001 From: hdkiller Date: Sun, 23 Apr 2023 22:28:48 +0200 Subject: [PATCH 100/222] fix typo in warning message (#3044) --- autogpt/plugins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/plugins.py b/autogpt/plugins.py index 8aaa35e9..57045bb8 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -261,7 +261,7 @@ def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool: if plugin_name in cfg.plugins_allowlist: return True ack = input( - f"WARNNG Plugin {plugin_name} found. But not in the" + f"WARNING: Plugin {plugin_name} found. But not in the" " allowlist... Load? (y/n): " ) return ack.lower() == "y" From d6ef9d1b5d67a4a3a096a7c108dec14e3686f262 Mon Sep 17 00:00:00 2001 From: Vwing Date: Sun, 23 Apr 2023 14:04:31 -0700 Subject: [PATCH 101/222] Make Auto-GPT aware of its running cost (#762) * Implemented running cost counter for chat completions This data is known to the AI as additional system context, and is printed out to the user * Added comments to api_manager.py * Added user-defined API budget. The user is now prompted if they want to give the AI a budget for API calls. If they enter nothing, there is no monetary limit, but if they define a budget then the AI will be told to shut down gracefully once it has come within 1 cent of its limit, and to shut down immediately once it has exceeded its limit. If a budget is defined, Auto-GPT is always aware of how much it was given and how much remains to be spent. * Chat completion calls are now done through api_manager. Total running cost is printed. * Implemented api budget setting and tracking User can now configure a maximum api budget, and the AI is aware of that and its remaining budget. The AI is instructed to shut down when exceeding the budget. * Update autogpt/api_manager.py Change "per token" to "per 1000 tokens" in a comment on the api cost Co-authored-by: Rob Luke * Fixed lint errors * Include embedding costs * Add embedding completion cost * lint * Added 'requires_api_key' decorator to test_commands.py, switched to a valid chat completions model * Refactor API manager, add debug mode, and add tests - Extract model costs to to avoid duplication - Add debug mode parameter to ApiManager class - Move debug mode configuration to - Log AI response and budget messages in debug mode - Implement 'test_api_manager.py' * Fixed test_setup failing. An extra user input is needed for api budget * Linting --------- Co-authored-by: Rob Luke Co-authored-by: Nicholas Tindle --- autogpt/api_manager.py | 158 ++++++++++++++++++++++++++++++++++++ autogpt/chat.py | 23 ++++++ autogpt/config/ai_config.py | 19 ++++- autogpt/llm_utils.py | 19 ++--- autogpt/memory/base.py | 13 +-- autogpt/modelsinfo.py | 7 ++ autogpt/prompts/prompt.py | 10 +++ autogpt/setup.py | 26 +++++- tests/test_api_manager.py | 148 +++++++++++++++++++++++++++++++++ tests/unit/test_commands.py | 6 +- tests/unit/test_setup.py | 2 + 11 files changed, 401 insertions(+), 30 deletions(-) create mode 100644 autogpt/api_manager.py create mode 100644 autogpt/modelsinfo.py create mode 100644 tests/test_api_manager.py diff --git a/autogpt/api_manager.py b/autogpt/api_manager.py new file mode 100644 index 00000000..497e8a79 --- /dev/null +++ b/autogpt/api_manager.py @@ -0,0 +1,158 @@ +from typing import List + +import openai + +from autogpt.config import Config +from autogpt.logs import logger +from autogpt.modelsinfo import COSTS + +cfg = Config() +openai.api_key = cfg.openai_api_key +print_total_cost = cfg.debug_mode + + +class ApiManager: + def __init__(self, debug=False): + self.total_prompt_tokens = 0 + self.total_completion_tokens = 0 + self.total_cost = 0 + self.total_budget = 0 + self.debug = debug + + def reset(self): + self.total_prompt_tokens = 0 + self.total_completion_tokens = 0 + self.total_cost = 0 + self.total_budget = 0.0 + + def create_chat_completion( + self, + messages: list, # type: ignore + model: str | None = None, + temperature: float = cfg.temperature, + max_tokens: int | None = None, + deployment_id=None, + ) -> str: + """ + Create a chat completion and update the cost. + Args: + messages (list): The list of messages to send to the API. + model (str): The model to use for the API call. + temperature (float): The temperature to use for the API call. + max_tokens (int): The maximum number of tokens for the API call. + Returns: + str: The AI's response. + """ + if deployment_id is not None: + response = openai.ChatCompletion.create( + deployment_id=deployment_id, + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + ) + else: + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + ) + if self.debug: + logger.debug(f"Response: {response}") + prompt_tokens = response.usage.prompt_tokens + completion_tokens = response.usage.completion_tokens + self.update_cost(prompt_tokens, completion_tokens, model) + return response + + def embedding_create( + self, + text_list: List[str], + model: str = "text-embedding-ada-002", + ) -> List[float]: + """ + Create an embedding for the given input text using the specified model. + + Args: + text_list (List[str]): Input text for which the embedding is to be created. + model (str, optional): The model to use for generating the embedding. + + Returns: + List[float]: The generated embedding as a list of float values. + """ + if cfg.use_azure: + response = openai.Embedding.create( + input=text_list, + engine=cfg.get_azure_deployment_id_for_model(model), + ) + else: + response = openai.Embedding.create(input=text_list, model=model) + + self.update_cost(response.usage.prompt_tokens, 0, model) + return response["data"][0]["embedding"] + + def update_cost(self, prompt_tokens, completion_tokens, model): + """ + Update the total cost, prompt tokens, and completion tokens. + + Args: + prompt_tokens (int): The number of tokens used in the prompt. + completion_tokens (int): The number of tokens used in the completion. + model (str): The model used for the API call. + """ + self.total_prompt_tokens += prompt_tokens + self.total_completion_tokens += completion_tokens + self.total_cost += ( + prompt_tokens * COSTS[model]["prompt"] + + completion_tokens * COSTS[model]["completion"] + ) / 1000 + if print_total_cost: + print(f"Total running cost: ${self.total_cost:.3f}") + + def set_total_budget(self, total_budget): + """ + Sets the total user-defined budget for API calls. + + Args: + prompt_tokens (int): The number of tokens used in the prompt. + """ + self.total_budget = total_budget + + def get_total_prompt_tokens(self): + """ + Get the total number of prompt tokens. + + Returns: + int: The total number of prompt tokens. + """ + return self.total_prompt_tokens + + def get_total_completion_tokens(self): + """ + Get the total number of completion tokens. + + Returns: + int: The total number of completion tokens. + """ + return self.total_completion_tokens + + def get_total_cost(self): + """ + Get the total cost of API calls. + + Returns: + float: The total cost of API calls. + """ + return self.total_cost + + def get_total_budget(self): + """ + Get the total user-defined budget for API calls. + + Returns: + float: The total budget for API calls. + """ + return self.total_budget + + +api_manager = ApiManager(cfg.debug_mode) diff --git a/autogpt/chat.py b/autogpt/chat.py index f9fc9471..21eab6a0 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -3,6 +3,7 @@ import time from openai.error import RateLimitError from autogpt import token_counter +from autogpt.api_manager import api_manager from autogpt.config import Config from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger @@ -133,6 +134,28 @@ def chat_with_ai( # Move to the next most recent message in the full message history next_message_to_add_index -= 1 + # inform the AI about its remaining budget (if it has one) + if api_manager.get_total_budget() > 0.0: + remaining_budget = ( + api_manager.get_total_budget() - api_manager.get_total_cost() + ) + if remaining_budget < 0: + remaining_budget = 0 + system_message = ( + f"Your remaining API budget is ${remaining_budget:.3f}" + + ( + " BUDGET EXCEEDED! SHUT DOWN!\n\n" + if remaining_budget == 0 + else " Budget very nearly exceeded! Shut down gracefully!\n\n" + if remaining_budget < 0.005 + else " Budget nearly exceeded. Finish up.\n\n" + if remaining_budget < 0.01 + else "\n\n" + ) + ) + logger.debug(system_message) + current_context.append(create_chat_message("system", system_message)) + # Append user input, the length of this is accounted for above current_context.extend([create_chat_message("user", user_input)]) diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index a622c963..d662429f 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -26,10 +26,15 @@ class AIConfig: ai_name (str): The name of the AI. ai_role (str): The description of the AI's role. ai_goals (list): The list of objectives the AI is supposed to complete. + api_budget (float): The maximum dollar value for API calls (0.0 means infinite) """ def __init__( - self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None + self, + ai_name: str = "", + ai_role: str = "", + ai_goals: list | None = None, + api_budget: float = 0.0, ) -> None: """ Initialize a class instance @@ -38,6 +43,7 @@ class AIConfig: ai_name (str): The name of the AI. ai_role (str): The description of the AI's role. ai_goals (list): The list of objectives the AI is supposed to complete. + api_budget (float): The maximum dollar value for API calls (0.0 means infinite) Returns: None """ @@ -46,13 +52,14 @@ class AIConfig: self.ai_name = ai_name self.ai_role = ai_role self.ai_goals = ai_goals + self.api_budget = api_budget self.prompt_generator = None self.command_registry = None @staticmethod def load(config_file: str = SAVE_FILE) -> "AIConfig": """ - Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from + Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from yaml file if yaml file exists, else returns class with no parameters. @@ -73,8 +80,9 @@ class AIConfig: ai_name = config_params.get("ai_name", "") ai_role = config_params.get("ai_role", "") ai_goals = config_params.get("ai_goals", []) + api_budget = config_params.get("api_budget", 0.0) # type: Type[AIConfig] - return AIConfig(ai_name, ai_role, ai_goals) + return AIConfig(ai_name, ai_role, ai_goals, api_budget) def save(self, config_file: str = SAVE_FILE) -> None: """ @@ -92,6 +100,7 @@ class AIConfig: "ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals, + "api_budget": self.api_budget, } with open(config_file, "w", encoding="utf-8") as file: yaml.dump(config, file, allow_unicode=True) @@ -107,7 +116,7 @@ class AIConfig: Returns: full_prompt (str): A string containing the initial prompt for the user - including the ai_name, ai_role and ai_goals. + including the ai_name, ai_role, ai_goals, and api_budget. """ prompt_start = ( @@ -147,6 +156,8 @@ class AIConfig: full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n" for i, goal in enumerate(self.ai_goals): full_prompt += f"{i+1}. {goal}\n" + if self.api_budget > 0.0: + full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}" self.prompt_generator = prompt_generator full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}" return full_prompt diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 8b85959c..da6cb979 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -7,6 +7,7 @@ import openai from colorama import Fore, Style from openai.error import APIError, RateLimitError +from autogpt.api_manager import api_manager from autogpt.config import Config from autogpt.logs import logger from autogpt.types.openai import Message @@ -96,7 +97,7 @@ def create_chat_completion( backoff = 2 ** (attempt + 2) try: if CFG.use_azure: - response = openai.ChatCompletion.create( + response = api_manager.create_chat_completion( deployment_id=CFG.get_azure_deployment_id_for_model(model), model=model, messages=messages, @@ -104,7 +105,7 @@ def create_chat_completion( max_tokens=max_tokens, ) else: - response = openai.ChatCompletion.create( + response = api_manager.create_chat_completion( model=model, messages=messages, temperature=temperature, @@ -159,17 +160,9 @@ def create_embedding_with_ada(text) -> list: for attempt in range(num_retries): backoff = 2 ** (attempt + 2) try: - if CFG.use_azure: - return openai.Embedding.create( - input=[text], - engine=CFG.get_azure_deployment_id_for_model( - "text-embedding-ada-002" - ), - )["data"][0]["embedding"] - else: - return openai.Embedding.create( - input=[text], model="text-embedding-ada-002" - )["data"][0]["embedding"] + return api_manager.embedding_create( + text_list=[text], model="text-embedding-ada-002" + ) except RateLimitError: pass except APIError as e: diff --git a/autogpt/memory/base.py b/autogpt/memory/base.py index 691e2299..b69f795c 100644 --- a/autogpt/memory/base.py +++ b/autogpt/memory/base.py @@ -3,6 +3,7 @@ import abc import openai +from autogpt.api_manager import api_manager from autogpt.config import AbstractSingleton, Config cfg = Config() @@ -10,15 +11,9 @@ cfg = Config() def get_ada_embedding(text): text = text.replace("\n", " ") - if cfg.use_azure: - return openai.Embedding.create( - input=[text], - engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"), - )["data"][0]["embedding"] - else: - return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[ - "data" - ][0]["embedding"] + return api_manager.embedding_create( + text_list=[text], model="text-embedding-ada-002" + ) class MemoryProviderSingleton(AbstractSingleton): diff --git a/autogpt/modelsinfo.py b/autogpt/modelsinfo.py new file mode 100644 index 00000000..4326c0b1 --- /dev/null +++ b/autogpt/modelsinfo.py @@ -0,0 +1,7 @@ +COSTS = { + "gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002}, + "gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002}, + "gpt-4-0314": {"prompt": 0.03, "completion": 0.06}, + "gpt-4": {"prompt": 0.03, "completion": 0.06}, + "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0}, +} diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index d33d0e3a..bd3cc7cb 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -1,5 +1,6 @@ from colorama import Fore +from autogpt.api_manager import api_manager from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config from autogpt.logs import logger @@ -86,6 +87,11 @@ def construct_main_ai_config() -> AIConfig: logger.typewriter_log("Name :", Fore.GREEN, config.ai_name) logger.typewriter_log("Role :", Fore.GREEN, config.ai_role) logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}") + logger.typewriter_log( + "API Budget:", + Fore.GREEN, + "infinite" if config.api_budget <= 0 else f"${config.api_budget}", + ) elif config.ai_name: logger.typewriter_log( "Welcome back! ", @@ -98,6 +104,7 @@ def construct_main_ai_config() -> AIConfig: Name: {config.ai_name} Role: {config.ai_role} Goals: {config.ai_goals} +API Budget: {"infinite" if config.api_budget <= 0 else f"${config.api_budget}"} Continue (y/n): """ ) if should_continue.lower() == "n": @@ -107,6 +114,9 @@ Continue (y/n): """ config = prompt_user() config.save(CFG.ai_settings_file) + # set the total api budget + api_manager.set_total_budget(config.api_budget) + # Agent Created, print message logger.typewriter_log( config.ai_name, diff --git a/autogpt/setup.py b/autogpt/setup.py index 02b67c7a..e3c251ec 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -133,7 +133,28 @@ def generate_aiconfig_manual() -> AIConfig: "Develop and manage multiple businesses autonomously", ] - return AIConfig(ai_name, ai_role, ai_goals) + # Get API Budget from User + logger.typewriter_log( + "Enter your budget for API calls: ", + Fore.GREEN, + "For example: $1.50", + ) + print("Enter nothing to let the AI run without monetary limit", flush=True) + api_budget_input = utils.clean_input( + f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $" + ) + if api_budget_input == "": + api_budget = 0.0 + else: + try: + api_budget = float(api_budget_input.replace("$", "")) + except ValueError: + logger.typewriter_log( + "Invalid budget input. Setting budget to unlimited.", Fore.RED + ) + api_budget = 0.0 + + return AIConfig(ai_name, ai_role, ai_goals, api_budget) def generate_aiconfig_automatic(user_prompt) -> AIConfig: @@ -192,5 +213,6 @@ Goals: .strip() ) ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output) + api_budget = 0.0 # TODO: parse api budget using a regular expression - return AIConfig(ai_name, ai_role, ai_goals) + return AIConfig(ai_name, ai_role, ai_goals, api_budget) diff --git a/tests/test_api_manager.py b/tests/test_api_manager.py new file mode 100644 index 00000000..900a88e0 --- /dev/null +++ b/tests/test_api_manager.py @@ -0,0 +1,148 @@ +from unittest.mock import MagicMock, patch + +import pytest + +from autogpt.api_manager import COSTS, ApiManager + +api_manager = ApiManager() + + +@pytest.fixture(autouse=True) +def reset_api_manager(): + api_manager.reset() + yield + + +@pytest.fixture(autouse=True) +def mock_costs(): + with patch.dict( + COSTS, + { + "gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002}, + "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0}, + }, + clear=True, + ): + yield + + +class TestApiManager: + @staticmethod + def test_create_chat_completion_debug_mode(caplog): + """Test if debug mode logs response.""" + api_manager_debug = ApiManager(debug=True) + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020?"}, + ] + model = "gpt-3.5-turbo" + + with patch("openai.ChatCompletion.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 20 + mock_create.return_value = mock_response + + api_manager_debug.create_chat_completion(messages, model=model) + + assert "Response" in caplog.text + + @staticmethod + def test_create_chat_completion_empty_messages(): + """Test if empty messages result in zero tokens and cost.""" + messages = [] + model = "gpt-3.5-turbo" + + with patch("openai.ChatCompletion.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 0 + mock_response.usage.completion_tokens = 0 + mock_create.return_value = mock_response + + api_manager.create_chat_completion(messages, model=model) + + assert api_manager.get_total_prompt_tokens() == 0 + assert api_manager.get_total_completion_tokens() == 0 + assert api_manager.get_total_cost() == 0 + + @staticmethod + def test_create_chat_completion_valid_inputs(): + """Test if valid inputs result in correct tokens and cost.""" + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020?"}, + ] + model = "gpt-3.5-turbo" + + with patch("openai.ChatCompletion.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 20 + mock_create.return_value = mock_response + + api_manager.create_chat_completion(messages, model=model) + + assert api_manager.get_total_prompt_tokens() == 10 + assert api_manager.get_total_completion_tokens() == 20 + assert api_manager.get_total_cost() == (10 * 0.002 + 20 * 0.002) / 1000 + + @staticmethod + def test_embedding_create_invalid_model(): + """Test if an invalid model for embedding raises a KeyError.""" + text_list = ["Hello, how are you?"] + model = "invalid-model" + + with patch("openai.Embedding.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 5 + mock_create.side_effect = KeyError("Invalid model") + with pytest.raises(KeyError): + api_manager.embedding_create(text_list, model=model) + + @staticmethod + def test_embedding_create_valid_inputs(): + """Test if valid inputs for embedding result in correct tokens and cost.""" + text_list = ["Hello, how are you?"] + model = "text-embedding-ada-002" + + with patch("openai.Embedding.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 5 + mock_response["data"] = [{"embedding": [0.1, 0.2, 0.3]}] + mock_create.return_value = mock_response + + api_manager.embedding_create(text_list, model=model) + + assert api_manager.get_total_prompt_tokens() == 5 + assert api_manager.get_total_completion_tokens() == 0 + assert api_manager.get_total_cost() == (5 * 0.0004) / 1000 + + def test_getter_methods(self): + """Test the getter methods for total tokens, cost, and budget.""" + api_manager.update_cost(60, 120, "gpt-3.5-turbo") + api_manager.set_total_budget(10.0) + assert api_manager.get_total_prompt_tokens() == 60 + assert api_manager.get_total_completion_tokens() == 120 + assert api_manager.get_total_cost() == (60 * 0.002 + 120 * 0.002) / 1000 + assert api_manager.get_total_budget() == 10.0 + + @staticmethod + def test_set_total_budget(): + """Test if setting the total budget works correctly.""" + total_budget = 10.0 + api_manager.set_total_budget(total_budget) + + assert api_manager.get_total_budget() == total_budget + + @staticmethod + def test_update_cost(): + """Test if updating the cost works correctly.""" + prompt_tokens = 50 + completion_tokens = 100 + model = "gpt-3.5-turbo" + + api_manager.update_cost(prompt_tokens, completion_tokens, model) + + assert api_manager.get_total_prompt_tokens() == 50 + assert api_manager.get_total_completion_tokens() == 100 + assert api_manager.get_total_cost() == (50 * 0.002 + 100 * 0.002) / 1000 diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index ecbac9b7..f66fe4d9 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -5,18 +5,20 @@ import pytest import autogpt.agent.agent_manager as agent_manager from autogpt.app import execute_command, list_agents, start_agent +from tests.utils import requires_api_key @pytest.mark.integration_test +@requires_api_key("OPENAI_API_KEY") def test_make_agent() -> None: """Test the make_agent command""" with patch("openai.ChatCompletion.create") as mock: obj = MagicMock() obj.response.choices[0].messages[0].content = "Test message" mock.return_value = obj - start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2") + start_agent("Test Agent", "chat", "Hello, how are you?", "gpt-3.5-turbo") agents = list_agents() assert "List of agents:\n0: chat" == agents - start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2") + start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt-3.5-turbo") agents = list_agents() assert "List of agents:\n0: chat\n1: write" == agents diff --git a/tests/unit/test_setup.py b/tests/unit/test_setup.py index c922ff9c..9b907fcf 100644 --- a/tests/unit/test_setup.py +++ b/tests/unit/test_setup.py @@ -44,6 +44,7 @@ class TestAutoGPT(unittest.TestCase): "Purchase ingredients", "Bake a cake", "", + "", ] with patch("builtins.input", side_effect=user_inputs): ai_config = prompt_user() @@ -62,6 +63,7 @@ class TestAutoGPT(unittest.TestCase): "Purchase ingredients", "Bake a cake", "", + "", ] with patch("builtins.input", side_effect=user_inputs): ai_config = prompt_user() From ec71075bfe658161440c62ff2a07e0fdb685f59e Mon Sep 17 00:00:00 2001 From: coditamar <108689937+coditamar@users.noreply.github.com> Date: Mon, 24 Apr 2023 00:29:40 +0300 Subject: [PATCH 102/222] Add tests for json_utils.json_fix_llm (#2952) * config.py: make load_dotenv(override=True) * Update Python version and benchmark file in benchmark.yml * Refactor main function and imports in cli.py * Update import statement in ai_config.py * Add set_temperature and set_memory_backend methods in config.py * Remove unused import in prompt.py * Add goal oriented tasks workflow * Added agent_utils to create agent * added pytest and vcrpy * added write file cassette * created goal oriented task write file with cassettes to not pay openai tokens * solve conflicts * add ability set azure because github workflow needs it off * solve conflicts in cli.py * black because linter fails * solve conflict * setup github action to v3 Signed-off-by: Merwane Hamadi * fix conflicts Signed-off-by: Merwane Hamadi * Plugins: debug line always printed in plugin load * add test for fix_json_using_multiple_techniques * style * style * mocking try_ai_fix to avoid call_ai_function * black style * mock try_ai_fix to avoid calling the AI model * removed mock, as we can add @requires_api_key("OPEN_API_KEY") * style * reverse merge conflict related files and changes * bring back the mock for try_ai_fix --------- Signed-off-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Richard Beales Co-authored-by: Nicholas Tindle Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> --- tests/unit/test_json_utils_llm.py | 117 ++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 tests/unit/test_json_utils_llm.py diff --git a/tests/unit/test_json_utils_llm.py b/tests/unit/test_json_utils_llm.py new file mode 100644 index 00000000..b8c9518d --- /dev/null +++ b/tests/unit/test_json_utils_llm.py @@ -0,0 +1,117 @@ +# Generated by CodiumAI +import pytest +from loguru import logger + +from autogpt.json_utils.json_fix_llm import ( + fix_and_parse_json, + fix_json_using_multiple_techniques, +) +from tests.utils import requires_api_key + +""" +Code Analysis + +Objective: +- The objective of the function is to fix a given JSON string to make it parseable and fully compliant with two techniques. + +Inputs: +- The function takes in a string called 'assistant_reply', which is the JSON string to be fixed. + +Flow: +- The function first calls the 'fix_and_parse_json' function to parse and print the Assistant response. +- If the parsed JSON is an empty dictionary, the function calls the 'attempt_to_fix_json_by_finding_outermost_brackets' function to fix the JSON string. +- If the parsed JSON is not an empty dictionary, the function returns the parsed JSON. +- If the parsed JSON is an empty dictionary and cannot be fixed, the function logs an error and returns an empty dictionary. + +Outputs: +- The main output of the function is a dictionary containing the fixed JSON string. + +Additional aspects: +- The function uses two techniques to fix the JSON string: parsing and finding outermost brackets. +- The function logs an error if the JSON string cannot be fixed and returns an empty dictionary. +- The function uses the 'CFG' object to determine whether to speak the error message or not. +""" + + +class TestFixJsonUsingMultipleTechniques: + # Tests that the function successfully fixes and parses a JSON string that is already compliant with both techniques. + def test_fix_and_parse_json_happy_path(self): + # Happy path test case where the JSON string is already compliant with both techniques + json_string = '{"text": "Hello world", "confidence": 0.9}' + expected_output = {"text": "Hello world", "confidence": 0.9} + assert fix_json_using_multiple_techniques(json_string) == expected_output + + # Tests that the function successfully fixes and parses a JSON string that contains only whitespace characters. + # @requires_api_key("OPEN_API_KEY") + def test_fix_and_parse_json_whitespace(self, mocker): + # Happy path test case where the JSON string contains only whitespace characters + json_string = " \n\t " + + # mock try_ai_fix to avoid calling the AI model: + mocker.patch("autogpt.json_utils.json_fix_llm.try_ai_fix", return_value={}) + + expected_output = {} + assert fix_json_using_multiple_techniques(json_string) == expected_output + + # Tests that the function successfully converts a string with arrays to an array + def test_fix_and_parse_json_array(self): + # Happy path test case where the JSON string contains an array of string + json_string = '[ "Add type hints", "Move docstrings", "Consider using" ]' + expected_output = ["Add type hints", "Move docstrings", "Consider using"] + assert fix_json_using_multiple_techniques(json_string) == expected_output + + # Tests that the function returns an empty dictionary when the JSON string is not parseable and cannot be fixed using either technique. + # @requires_api_key("OPEN_API_KEY") + def test_fix_and_parse_json_can_not(self, mocker): + # Edge case test case where the JSON string is not parseable and cannot be fixed using either technique + json_string = "This is not a JSON string" + + # mock try_ai_fix to avoid calling the AI model: + mocker.patch("autogpt.json_utils.json_fix_llm.try_ai_fix", return_value={}) + + expected_output = {} + + # Use the actual function name in the test + result = fix_json_using_multiple_techniques(json_string) + + assert result == expected_output + + # Tests that the function returns an empty dictionary when the JSON string is empty. + # @requires_api_key("OPEN_API_KEY") + def test_fix_and_parse_json_empty_string(self, mocker): + # Arrange + json_string = "" + + # Act + # mock try_ai_fix to avoid calling the AI model: + mocker.patch("autogpt.json_utils.json_fix_llm.try_ai_fix", return_value={}) + + result = fix_and_parse_json(json_string) + + # Assert + assert result == {} + + # Tests that the function successfully fixes and parses a JSON string that contains escape characters. + def test_fix_and_parse_json_escape_characters(self): + # Arrange + json_string = '{"text": "This is a \\"test\\" string."}' + + # Act + result = fix_json_using_multiple_techniques(json_string) + + # Assert + assert result == {"text": 'This is a "test" string.'} + + # Tests that the function successfully fixes and parses a JSON string that contains nested objects or arrays. + def test_fix_and_parse_json_nested_objects(self): + # Arrange + json_string = '{"person": {"name": "John", "age": 30}, "hobbies": ["reading", "swimming"]}' + + # Act + result = fix_json_using_multiple_techniques(json_string) + + # Assert + assert result == { + "person": {"name": "John", "age": 30}, + "hobbies": ["reading", "swimming"], + } From 374f543bea153cd7bf6fa510500347407e62470f Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Sun, 23 Apr 2023 23:50:15 +0200 Subject: [PATCH 103/222] Perm memory test cases (#2996) * Update Python version and benchmark file in benchmark.yml * Refactor main function and imports in cli.py * Update import statement in ai_config.py * Add set_temperature and set_memory_backend methods in config.py * Remove unused import in prompt.py * Add goal oriented tasks workflow * Added agent_utils to create agent * added pytest and vcrpy * added write file cassette * created goal oriented task write file with cassettes to not pay openai tokens * solve conflicts * add ability set azure because github workflow needs it off * solve conflicts in cli.py * black because linter fails * solve conflict * setup github action to v3 Signed-off-by: Merwane Hamadi * fix conflicts Signed-off-by: Merwane Hamadi * Plugins: debug line always printed in plugin load * add decorator to tests Signed-off-by: Merwane Hamadi * move decorator higher up Signed-off-by: Merwane Hamadi * init * more tests * passing tests * skip gitbranch decorator on ci * decorator skiponci * black * Update tests/utils.py decorator of skipping ci Co-authored-by: Nicholas Tindle * black * I oopsed the name * black * finally * perm memory tests * perm memory tests --------- Signed-off-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Richard Beales Co-authored-by: Nicholas Tindle Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- tests/test_permanent_memory.py | 59 ++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 tests/test_permanent_memory.py diff --git a/tests/test_permanent_memory.py b/tests/test_permanent_memory.py new file mode 100644 index 00000000..943c61fe --- /dev/null +++ b/tests/test_permanent_memory.py @@ -0,0 +1,59 @@ +import os +import unittest + +from autogpt.permanent_memory.sqlite3_store import MemoryDB + + +class TestMemoryDB(unittest.TestCase): + def setUp(self): + self.db_filename = "test_db.sqlite3" + self.db = MemoryDB(self.db_filename) + + def tearDown(self): + self.db.quit() + os.remove(self.db_filename) + + def test_overwrite_and_get_session(self): + self.db.insert("The quick brown fox jumps over the lazy dog") + self.db.insert("The five boxing wizards jump quickly") + + # Overwrite the second text + self.db.overwrite(1, "The slow elephant walks carefully") + + # Get the session and verify the texts + session = self.db.get_session() + self.assertEqual(len(session), 2) + self.assertIn("The quick brown fox jumps over the lazy dog", session) + self.assertIn("The slow elephant walks carefully", session) + + # Overwrite the first text + self.db.overwrite(0, "The lazy dog jumps over the quick brown fox") + + # Get the session and verify the texts + session = self.db.get_session() + self.assertEqual(len(session), 2) + self.assertIn("The lazy dog jumps over the quick brown fox", session) + self.assertIn("The slow elephant walks carefully", session) + + def test_delete_memory(self): + self.db.insert("The quick brown fox jumps over the lazy dog") + self.db.insert("The five boxing wizards jump quickly") + + # Delete the first text + self.db.delete_memory(0) + + # Get the session and verify the remaining text + session = self.db.get_session() + self.assertEqual(len(session), 1) + self.assertIn("The five boxing wizards jump quickly", session) + + # Delete the remaining text + self.db.delete_memory(1) + + # Get the session and verify that it's empty + session = self.db.get_session() + self.assertEqual(len(session), 0) + + +if __name__ == "__main__": + unittest.main() From 680c7b5aaaa9bc088e7f16451227daac8122e4d4 Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 23 Apr 2023 15:43:04 -0700 Subject: [PATCH 104/222] Make local json cache when it doesn't exist --- autogpt/memory/local.py | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 803b6dc6..b35aced6 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -1,7 +1,7 @@ from __future__ import annotations import dataclasses -import os +from pathlib import Path from typing import Any, List import numpy as np @@ -38,26 +38,22 @@ class LocalCache(MemoryProviderSingleton): Returns: None """ - self.filename = f"{cfg.memory_index}.json" - if os.path.exists(self.filename): - try: - with open(self.filename, "w+b") as f: - file_content = f.read() - if not file_content.strip(): - file_content = b"{}" - f.write(file_content) + workspace_path = Path(cfg.workspace_path) + self.filename = workspace_path / f"{cfg.memory_index}.json" + self.filename.touch(exist_ok=True) - loaded = orjson.loads(file_content) - self.data = CacheContent(**loaded) - except orjson.JSONDecodeError: - print(f"Error: The file '{self.filename}' is not in JSON format.") - self.data = CacheContent() - else: - print( - f"Warning: The file '{self.filename}' does not exist. " - "Local memory would not be saved to a file." - ) - self.data = CacheContent() + try: + with self.filename.open("w+b") as f: + file_content = f.read() + if not file_content.strip(): + file_content = b"{}" + f.write(file_content) + loaded = orjson.loads(file_content) + except orjson.JSONDecodeError: + print(f"Error: The file '{self.filename}' is not in JSON format.") + loaded = {} + + self.data = CacheContent(**loaded) def add(self, text: str): """ From aedd288dbe7e807aafb123994118e91acdc745dd Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 23 Apr 2023 15:50:50 -0700 Subject: [PATCH 105/222] Refactor/collect embeddings code (#3060) * Collect all embedding code into a single module * Collect all embedding code into a single module * actually, llm_utils is a better place * Oh, and remove the module now that we don't use it --------- Co-authored-by: Nicholas Tindle --- autogpt/llm_utils.py | 7 +++++++ autogpt/memory/base.py | 10 ---------- autogpt/memory/milvus.py | 3 ++- autogpt/memory/weaviate.py | 6 ++---- tests/integration/weaviate_memory_tests.py | 5 +---- 5 files changed, 12 insertions(+), 19 deletions(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index da6cb979..ba7521a4 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -154,6 +154,13 @@ def create_chat_completion( return resp +def get_ada_embedding(text): + text = text.replace("\n", " ") + return api_manager.embedding_create( + text_list=[text], model="text-embedding-ada-002" + ) + + def create_embedding_with_ada(text) -> list: """Create an embedding with text-ada-002 using the OpenAI SDK""" num_retries = 10 diff --git a/autogpt/memory/base.py b/autogpt/memory/base.py index b69f795c..b6252464 100644 --- a/autogpt/memory/base.py +++ b/autogpt/memory/base.py @@ -1,21 +1,11 @@ """Base class for memory providers.""" import abc -import openai - -from autogpt.api_manager import api_manager from autogpt.config import AbstractSingleton, Config cfg = Config() -def get_ada_embedding(text): - text = text.replace("\n", " ") - return api_manager.embedding_create( - text_list=[text], model="text-embedding-ada-002" - ) - - class MemoryProviderSingleton(AbstractSingleton): @abc.abstractmethod def add(self, data): diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index 1849a9e6..085f50b4 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -4,7 +4,8 @@ import re from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections from autogpt.config import Config -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.llm_utils import get_ada_embedding +from autogpt.memory.base import MemoryProviderSingleton class MilvusMemory(MemoryProviderSingleton): diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py index 0225ae04..fbebbfd7 100644 --- a/autogpt/memory/weaviate.py +++ b/autogpt/memory/weaviate.py @@ -1,12 +1,10 @@ -import uuid - import weaviate from weaviate import Client from weaviate.embedded import EmbeddedOptions from weaviate.util import generate_uuid5 -from autogpt.config import Config -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.llm_utils import get_ada_embedding +from autogpt.memory.base import MemoryProviderSingleton def default_schema(weaviate_index): diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py index 015eab05..5448b79e 100644 --- a/tests/integration/weaviate_memory_tests.py +++ b/tests/integration/weaviate_memory_tests.py @@ -1,14 +1,11 @@ -import os -import sys import unittest -from unittest import mock from uuid import uuid4 from weaviate import Client from weaviate.util import get_valid_uuid from autogpt.config import Config -from autogpt.memory.base import get_ada_embedding +from autogpt.llm_utils import get_ada_embedding from autogpt.memory.weaviate import WeaviateMemory From a28b8906a6ac46b3df9f6ae280abec3cb659b994 Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 23 Apr 2023 16:40:53 -0700 Subject: [PATCH 106/222] Add tests in pytest --- autogpt/memory/local.py | 16 ++---- tests/conftest.py | 12 +++++ tests/local_cache_test.py | 73 ------------------------- tests/test_local_cache.py | 109 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 126 insertions(+), 84 deletions(-) delete mode 100644 tests/local_cache_test.py create mode 100644 tests/test_local_cache.py diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index b35aced6..1f1a1a3c 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -40,20 +40,14 @@ class LocalCache(MemoryProviderSingleton): """ workspace_path = Path(cfg.workspace_path) self.filename = workspace_path / f"{cfg.memory_index}.json" + self.filename.touch(exist_ok=True) - try: - with self.filename.open("w+b") as f: - file_content = f.read() - if not file_content.strip(): - file_content = b"{}" - f.write(file_content) - loaded = orjson.loads(file_content) - except orjson.JSONDecodeError: - print(f"Error: The file '{self.filename}' is not in JSON format.") - loaded = {} + file_content = b"{}" + with self.filename.open("w+b") as f: + f.write(file_content) - self.data = CacheContent(**loaded) + self.data = CacheContent() def add(self, text: str): """ diff --git a/tests/conftest.py b/tests/conftest.py index 99ccb940..59433457 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,6 +3,7 @@ from pathlib import Path import pytest from dotenv import load_dotenv +from autogpt.config import Config from autogpt.workspace import Workspace load_dotenv() @@ -17,3 +18,14 @@ def workspace_root(tmp_path) -> Path: def workspace(workspace_root: Path) -> Workspace: workspace_root = Workspace.make_workspace(workspace_root) return Workspace(workspace_root, restrict_to_workspace=True) + + +@pytest.fixture() +def config(workspace: Workspace) -> Config: + config = Config() + + # Do a little setup and teardown since the config object is a singleton + old_ws_path = config.workspace_path + config.workspace_path = workspace.root + yield config + config.workspace_path = old_ws_path diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py deleted file mode 100644 index bbaf8e57..00000000 --- a/tests/local_cache_test.py +++ /dev/null @@ -1,73 +0,0 @@ -# sourcery skip: snake-case-functions -"""Tests for LocalCache class""" -import os -import sys -import unittest - -import pytest - -from autogpt.memory.local import LocalCache -from tests.utils import requires_api_key - - -def mock_config() -> dict: - """Mock the Config class""" - return type( - "MockConfig", - (object,), - { - "debug_mode": False, - "continuous_mode": False, - "speak_mode": False, - "memory_index": "auto-gpt", - }, - ) - - -@pytest.mark.integration_test -class TestLocalCache(unittest.TestCase): - """Tests for LocalCache class""" - - def setUp(self) -> None: - """Set up the test environment""" - self.cfg = mock_config() - self.cache = LocalCache(self.cfg) - - @requires_api_key("OPENAI_API_KEY") - def test_add(self) -> None: - """Test adding a text to the cache""" - text = "Sample text" - self.cache.add(text) - self.assertIn(text, self.cache.data.texts) - - @requires_api_key("OPENAI_API_KEY") - def test_clear(self) -> None: - """Test clearing the cache""" - self.cache.clear() - self.assertEqual(self.cache.data.texts, []) - - @requires_api_key("OPENAI_API_KEY") - def test_get(self) -> None: - """Test getting a text from the cache""" - text = "Sample text" - self.cache.add(text) - result = self.cache.get(text) - self.assertEqual(result, [text]) - - @requires_api_key("OPENAI_API_KEY") - def test_get_relevant(self) -> None: - """Test getting relevant texts from the cache""" - text1 = "Sample text 1" - text2 = "Sample text 2" - self.cache.add(text1) - self.cache.add(text2) - result = self.cache.get_relevant(text1, 1) - self.assertEqual(result, [text1]) - - @requires_api_key("OPENAI_API_KEY") - def test_get_stats(self) -> None: - """Test getting the cache stats""" - text = "Sample text" - self.cache.add(text) - stats = self.cache.get_stats() - self.assertEqual(stats, (4, self.cache.data.embeddings.shape)) diff --git a/tests/test_local_cache.py b/tests/test_local_cache.py new file mode 100644 index 00000000..b6a0387b --- /dev/null +++ b/tests/test_local_cache.py @@ -0,0 +1,109 @@ +# sourcery skip: snake-case-functions +"""Tests for LocalCache class""" +import unittest + +import orjson +import pytest + +from autogpt.memory.local import EMBED_DIM, SAVE_OPTIONS +from autogpt.memory.local import LocalCache as LocalCache_ +from tests.utils import requires_api_key + + +@pytest.fixture +def LocalCache(): + # Hack, real gross. Singletons are not good times. + if LocalCache_ in LocalCache_._instances: + del LocalCache_._instances[LocalCache_] + return LocalCache_ + + +@pytest.fixture +def mock_embed_with_ada(mocker): + mocker.patch( + "autogpt.memory.local.create_embedding_with_ada", + return_value=[0.1] * EMBED_DIM, + ) + + +def test_init_without_backing_file(LocalCache, config, workspace): + cache_file = workspace.root / f"{config.memory_index}.json" + + assert not cache_file.exists() + LocalCache(config) + assert cache_file.exists() + assert cache_file.read_text() == "{}" + + +def test_init_with_backing_empty_file(LocalCache, config, workspace): + cache_file = workspace.root / f"{config.memory_index}.json" + cache_file.touch() + + assert cache_file.exists() + LocalCache(config) + assert cache_file.exists() + assert cache_file.read_text() == "{}" + + +def test_init_with_backing_file(LocalCache, config, workspace): + cache_file = workspace.root / f"{config.memory_index}.json" + cache_file.touch() + + raw_data = {"texts": ["test"]} + data = orjson.dumps(raw_data, option=SAVE_OPTIONS) + with cache_file.open("wb") as f: + f.write(data) + + assert cache_file.exists() + LocalCache(config) + assert cache_file.exists() + assert cache_file.read_text() == "{}" + + +def test_add(LocalCache, config, mock_embed_with_ada): + cache = LocalCache(config) + cache.add("test") + assert cache.data.texts == ["test"] + assert cache.data.embeddings.shape == (1, EMBED_DIM) + + +def test_clear(LocalCache, config, mock_embed_with_ada): + cache = LocalCache(config) + assert cache.data.texts == [] + assert cache.data.embeddings.shape == (0, EMBED_DIM) + + cache.add("test") + assert cache.data.texts == ["test"] + assert cache.data.embeddings.shape == (1, EMBED_DIM) + + cache.clear() + assert cache.data.texts == [] + assert cache.data.embeddings.shape == (0, EMBED_DIM) + + +def test_get(LocalCache, config, mock_embed_with_ada): + cache = LocalCache(config) + assert cache.get("test") == [] + + cache.add("test") + assert cache.get("test") == ["test"] + + +@requires_api_key("OPENAI_API_KEY") +def test_get_relevant(LocalCache, config) -> None: + cache = LocalCache(config) + text1 = "Sample text 1" + text2 = "Sample text 2" + cache.add(text1) + cache.add(text2) + + result = cache.get_relevant(text1, 1) + assert result == [text1] + + +def test_get_stats(LocalCache, config, mock_embed_with_ada) -> None: + cache = LocalCache(config) + text = "Sample text" + cache.add(text) + stats = cache.get_stats() + assert stats == (1, cache.data.embeddings.shape) From 43fa67ca8139000b652a4fa1794ed76c40b40b37 Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 23 Apr 2023 16:54:32 -0700 Subject: [PATCH 107/222] Remove unnecessary memory call --- autogpt/commands/web_requests.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py index 406338f4..f3ad0197 100644 --- a/autogpt/commands/web_requests.py +++ b/autogpt/commands/web_requests.py @@ -9,11 +9,9 @@ from requests import Response from requests.compat import urljoin from autogpt.config import Config -from autogpt.memory import get_memory from autogpt.processing.html import extract_hyperlinks, format_hyperlinks CFG = Config() -memory = get_memory(CFG) session = requests.Session() session.headers.update({"User-Agent": CFG.user_agent}) From 91aa40e0df66c719d50e8fa31a8e7d143a87bd45 Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 23 Apr 2023 16:59:49 -0700 Subject: [PATCH 108/222] Remove another global memory access --- autogpt/processing/text.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 2122f0f0..9946951a 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -10,7 +10,6 @@ from autogpt.llm_utils import create_chat_completion from autogpt.memory import get_memory CFG = Config() -MEMORY = get_memory(CFG) def split_text( @@ -109,7 +108,8 @@ def summarize_text( memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}" - MEMORY.add(memory_to_add) + memory = get_memory(CFG) + memory.add(memory_to_add) messages = [create_message(chunk, question)] tokens_for_chunk = token_counter.count_message_tokens(messages, model) @@ -128,7 +128,7 @@ def summarize_text( memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}" - MEMORY.add(memory_to_add) + memory.add(memory_to_add) print(f"Summarized {len(chunks)} chunks.") From 7ddc44d48e0dfe0c96bfd8ca055189c4cb986472 Mon Sep 17 00:00:00 2001 From: James Collins Date: Sun, 23 Apr 2023 20:44:53 -0700 Subject: [PATCH 109/222] Resolve sequencing issue in global state managemtn --- autogpt/cli.py | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/autogpt/cli.py b/autogpt/cli.py index 75908a1e..a2f0cc0b 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -136,6 +136,26 @@ def main( "Please consider upgrading to Python 3.10 or higher.", ) + # TODO: have this directory live outside the repository (e.g. in a user's + # home directory) and have it come in as a command line argument or part of + # the env file. + if workspace_directory is None: + workspace_directory = Path(__file__).parent / "auto_gpt_workspace" + else: + workspace_directory = Path(workspace_directory) + # TODO: pass in the ai_settings file and the env file and have them cloned into + # the workspace directory so we can bind them to the agent. + workspace_directory = Workspace.make_workspace(workspace_directory) + cfg.workspace_path = str(workspace_directory) + + # HACK: doing this here to collect some globals that depend on the workspace. + file_logger_path = workspace_directory / "file_logger.txt" + if not file_logger_path.exists(): + with file_logger_path.open(mode="w", encoding="utf-8") as f: + f.write("File Operation Logger ") + + cfg.file_logger_path = str(file_logger_path) + cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) # Create a CommandRegistry instance and scan default folder command_registry = CommandRegistry() @@ -175,26 +195,6 @@ def main( if cfg.debug_mode: logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt) - # TODO: have this directory live outside the repository (e.g. in a user's - # home directory) and have it come in as a command line argument or part of - # the env file. - if workspace_directory is None: - workspace_directory = Path(__file__).parent / "auto_gpt_workspace" - else: - workspace_directory = Path(workspace_directory) - # TODO: pass in the ai_settings file and the env file and have them cloned into - # the workspace directory so we can bind them to the agent. - workspace_directory = Workspace.make_workspace(workspace_directory) - cfg.workspace_path = str(workspace_directory) - - # HACK: doing this here to collect some globals that depend on the workspace. - file_logger_path = workspace_directory / "file_logger.txt" - if not file_logger_path.exists(): - with file_logger_path.open(mode="w", encoding="utf-8") as f: - f.write("File Operation Logger ") - - cfg.file_logger_path = str(file_logger_path) - agent = Agent( ai_name=ai_name, memory=memory, From 89125376ba5345351ce57f1041843e395f8f650c Mon Sep 17 00:00:00 2001 From: scout9ll <36192834+scout9ll@users.noreply.github.com> Date: Mon, 24 Apr 2023 15:07:08 +0800 Subject: [PATCH 110/222] Fixed incorrect comment: Clear memory instead of Redis (#3092) Co-authored-by: liaolin.qiu --- autogpt/memory/local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 1f1a1a3c..7a31782b 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -82,7 +82,7 @@ class LocalCache(MemoryProviderSingleton): def clear(self) -> str: """ - Clears the redis server. + Clears the data in memory. Returns: A message indicating that the memory has been cleared. """ From 794a164098dc78d3d9cba854fbee6b5321566abd Mon Sep 17 00:00:00 2001 From: Soheil Sam Yasrebi <52513082+soheil@users.noreply.github.com> Date: Mon, 24 Apr 2023 00:26:14 -0700 Subject: [PATCH 111/222] handle API timeouts (#3024) --- autogpt/llm_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index ba7521a4..cc650f21 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -5,7 +5,7 @@ from typing import List, Optional import openai from colorama import Fore, Style -from openai.error import APIError, RateLimitError +from openai.error import APIError, RateLimitError, Timeout from autogpt.api_manager import api_manager from autogpt.config import Config @@ -123,7 +123,7 @@ def create_chat_completion( + f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}" ) warned_user = True - except APIError as e: + except (APIError, Timeout) as e: if e.http_status != 502: raise if attempt == num_retries - 1: @@ -172,7 +172,7 @@ def create_embedding_with_ada(text) -> list: ) except RateLimitError: pass - except APIError as e: + except (APIError, Timeout) as e: if e.http_status != 502: raise if attempt == num_retries - 1: From 40a75c804ccfe7dec29dd8808ca5c52d0dc9e068 Mon Sep 17 00:00:00 2001 From: Eddie Cohen <31940792+edcohen08@users.noreply.github.com> Date: Mon, 24 Apr 2023 06:33:44 -0400 Subject: [PATCH 112/222] Validate URLs in web commands before execution (#2616) Co-authored-by: Reinier van der Leer --- autogpt/app.py | 3 + autogpt/commands/git_operations.py | 2 + autogpt/commands/web_requests.py | 82 +-------------------- autogpt/commands/web_selenium.py | 2 + autogpt/url_utils/__init__.py | 0 autogpt/url_utils/validators.py | 101 ++++++++++++++++++++++++++ tests/unit/test_browse_scrape_text.py | 12 ++- 7 files changed, 120 insertions(+), 82 deletions(-) create mode 100644 autogpt/url_utils/__init__.py create mode 100644 autogpt/url_utils/validators.py diff --git a/autogpt/app.py b/autogpt/app.py index 237feae2..590f3d50 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -10,6 +10,7 @@ from autogpt.memory import get_memory from autogpt.processing.text import summarize_text from autogpt.prompts.generator import PromptGenerator from autogpt.speech import say_text +from autogpt.url_utils.validators import validate_url CFG = Config() AGENT_MANAGER = AgentManager() @@ -141,6 +142,7 @@ def execute_command( @command( "get_text_summary", "Get text summary", '"url": "", "question": ""' ) +@validate_url def get_text_summary(url: str, question: str) -> str: """Return the results of a Google search @@ -157,6 +159,7 @@ def get_text_summary(url: str, question: str) -> str: @command("get_hyperlinks", "Get text summary", '"url": ""') +@validate_url def get_hyperlinks(url: str) -> Union[str, List[str]]: """Return the results of a Google search diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index c373b8c0..fb74374c 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -3,6 +3,7 @@ from git.repo import Repo from autogpt.commands.command import command from autogpt.config import Config +from autogpt.url_utils.validators import validate_url CFG = Config() @@ -14,6 +15,7 @@ CFG = Config() CFG.github_username and CFG.github_api_key, "Configure github_username and github_api_key.", ) +@validate_url def clone_repository(repository_url: str, clone_path: str) -> str: """Clone a GitHub repository locally. diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py index f3ad0197..60e54252 100644 --- a/autogpt/commands/web_requests.py +++ b/autogpt/commands/web_requests.py @@ -1,15 +1,13 @@ """Browse a webpage and summarize it using the LLM model""" from __future__ import annotations -from urllib.parse import urljoin, urlparse - import requests from bs4 import BeautifulSoup from requests import Response -from requests.compat import urljoin from autogpt.config import Config from autogpt.processing.html import extract_hyperlinks, format_hyperlinks +from autogpt.url_utils.validators import validate_url CFG = Config() @@ -17,71 +15,7 @@ session = requests.Session() session.headers.update({"User-Agent": CFG.user_agent}) -def is_valid_url(url: str) -> bool: - """Check if the URL is valid - - Args: - url (str): The URL to check - - Returns: - bool: True if the URL is valid, False otherwise - """ - try: - result = urlparse(url) - return all([result.scheme, result.netloc]) - except ValueError: - return False - - -def sanitize_url(url: str) -> str: - """Sanitize the URL - - Args: - url (str): The URL to sanitize - - Returns: - str: The sanitized URL - """ - return urljoin(url, urlparse(url).path) - - -def check_local_file_access(url: str) -> bool: - """Check if the URL is a local file - - Args: - url (str): The URL to check - - Returns: - bool: True if the URL is a local file, False otherwise - """ - local_prefixes = [ - "file:///", - "file://localhost/", - "file://localhost", - "http://localhost", - "http://localhost/", - "https://localhost", - "https://localhost/", - "http://2130706433", - "http://2130706433/", - "https://2130706433", - "https://2130706433/", - "http://127.0.0.1/", - "http://127.0.0.1", - "https://127.0.0.1/", - "https://127.0.0.1", - "https://0.0.0.0/", - "https://0.0.0.0", - "http://0.0.0.0/", - "http://0.0.0.0", - "http://0000", - "http://0000/", - "https://0000", - "https://0000/", - ] - return any(url.startswith(prefix) for prefix in local_prefixes) - - +@validate_url def get_response( url: str, timeout: int = 10 ) -> tuple[None, str] | tuple[Response, None]: @@ -99,17 +33,7 @@ def get_response( requests.exceptions.RequestException: If the HTTP request fails """ try: - # Restrict access to local files - if check_local_file_access(url): - raise ValueError("Access to local files is restricted") - - # Most basic check if the URL is valid: - if not url.startswith("http://") and not url.startswith("https://"): - raise ValueError("Invalid URL format") - - sanitized_url = sanitize_url(url) - - response = session.get(sanitized_url, timeout=timeout) + response = session.get(url, timeout=timeout) # Check if the response contains an HTTP error if response.status_code >= 400: diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index e0e0d70a..8f55c103 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -21,6 +21,7 @@ import autogpt.processing.text as summary from autogpt.commands.command import command from autogpt.config import Config from autogpt.processing.html import extract_hyperlinks, format_hyperlinks +from autogpt.url_utils.validators import validate_url FILE_DIR = Path(__file__).parent.parent CFG = Config() @@ -31,6 +32,7 @@ CFG = Config() "Browse Website", '"url": "", "question": ""', ) +@validate_url def browse_website(url: str, question: str) -> tuple[str, WebDriver]: """Browse a website and return the answer and links to the user diff --git a/autogpt/url_utils/__init__.py b/autogpt/url_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/url_utils/validators.py b/autogpt/url_utils/validators.py new file mode 100644 index 00000000..8754667c --- /dev/null +++ b/autogpt/url_utils/validators.py @@ -0,0 +1,101 @@ +import functools +from typing import Any, Callable +from urllib.parse import urljoin, urlparse + +from requests.compat import urljoin + + +def validate_url(func: Callable[..., Any]) -> Any: + """The method decorator validate_url is used to validate urls for any command that requires + a url as an arugment""" + + @functools.wraps(func) + def wrapper(url: str, *args, **kwargs) -> Any: + """Check if the URL is valid using a basic check, urllib check, and local file check + + Args: + url (str): The URL to check + + Returns: + the result of the wrapped function + + Raises: + ValueError if the url fails any of the validation tests + """ + # Most basic check if the URL is valid: + if not url.startswith("http://") and not url.startswith("https://"): + raise ValueError("Invalid URL format") + if not is_valid_url(url): + raise ValueError("Missing Scheme or Network location") + # Restrict access to local files + if check_local_file_access(url): + raise ValueError("Access to local files is restricted") + + return func(sanitize_url(url), *args, **kwargs) + + return wrapper + + +def is_valid_url(url: str) -> bool: + """Check if the URL is valid + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is valid, False otherwise + """ + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except ValueError: + return False + + +def sanitize_url(url: str) -> str: + """Sanitize the URL + + Args: + url (str): The URL to sanitize + + Returns: + str: The sanitized URL + """ + return urljoin(url, urlparse(url).path) + + +def check_local_file_access(url: str) -> bool: + """Check if the URL is a local file + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is a local file, False otherwise + """ + local_prefixes = [ + "file:///", + "file://localhost/", + "file://localhost", + "http://localhost", + "http://localhost/", + "https://localhost", + "https://localhost/", + "http://2130706433", + "http://2130706433/", + "https://2130706433", + "https://2130706433/", + "http://127.0.0.1/", + "http://127.0.0.1", + "https://127.0.0.1/", + "https://127.0.0.1", + "https://0.0.0.0/", + "https://0.0.0.0", + "http://0.0.0.0/", + "http://0.0.0.0", + "http://0000", + "http://0000/", + "https://0000", + "https://0000/", + ] + return any(url.startswith(prefix) for prefix in local_prefixes) diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index 1a36e19b..c6ce4d39 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -1,5 +1,6 @@ # Generated by CodiumAI +import pytest import requests from autogpt.commands.web_requests import scrape_text @@ -58,9 +59,14 @@ class TestScrapeText: url = "http://www.example.com" assert scrape_text(url) == expected_text - # Tests that the function returns an error message when an invalid or unreachable + # Tests that an error is raised when an invalid url is provided. + def test_invalid_url(self): + url = "invalidurl.com" + pytest.raises(ValueError, scrape_text, url) + + # Tests that the function returns an error message when an unreachable # url is provided. - def test_invalid_url(self, mocker): + def test_unreachable_url(self, mocker): # Mock the requests.get() method to raise an exception mocker.patch( "requests.Session.get", side_effect=requests.exceptions.RequestException @@ -68,7 +74,7 @@ class TestScrapeText: # Call the function with an invalid URL and assert that it returns an error # message - url = "http://www.invalidurl.com" + url = "http://thiswebsitedoesnotexist.net/" error_message = scrape_text(url) assert "Error:" in error_message From f8dfedf1c63fe76f8aa39edcfc6677e99957ef67 Mon Sep 17 00:00:00 2001 From: Andres Caicedo <73312784+AndresCdo@users.noreply.github.com> Date: Mon, 24 Apr 2023 14:55:49 +0200 Subject: [PATCH 113/222] Add function and class descriptions to tests (#2715) Co-authored-by: Reinier van der Leer --- tests/browse_tests.py | 3 + tests/context.py | 1 + tests/integration/memory_tests.py | 7 ++- tests/integration/milvus_memory_tests.py | 8 +-- tests/integration/weaviate_memory_tests.py | 8 ++- tests/milvus_memory_test.py | 3 +- tests/mocks/mock_commands.py | 1 + tests/test_commands.py | 62 ++++++++++++++----- tests/test_image_gen.py | 4 ++ tests/test_json_parser.py | 12 ++-- tests/test_token_counter.py | 9 +++ tests/unit/json_tests.py | 13 ++-- .../unit/models/test_base_open_api_plugin.py | 8 +++ tests/unit/test_browse_scrape_links.py | 17 ++--- tests/unit/test_browse_scrape_text.py | 15 ++--- tests/unit/test_chat.py | 10 +-- tests/unit/test_commands.py | 3 +- tests/unit/test_plugins.py | 18 ++++++ tests/unit/test_spinner.py | 12 ++-- 19 files changed, 147 insertions(+), 67 deletions(-) diff --git a/tests/browse_tests.py b/tests/browse_tests.py index f896e7dd..90692d88 100644 --- a/tests/browse_tests.py +++ b/tests/browse_tests.py @@ -10,7 +10,10 @@ from browse import extract_hyperlinks class TestBrowseLinks(unittest.TestCase): + """Unit tests for the browse module functions that extract hyperlinks.""" + def test_extract_hyperlinks(self): + """Test the extract_hyperlinks function with a simple HTML body.""" body = """ Google diff --git a/tests/context.py b/tests/context.py index cef969db..0d531468 100644 --- a/tests/context.py +++ b/tests/context.py @@ -1,6 +1,7 @@ import os import sys +# Add the scripts directory to the path so that we can import the browse module. sys.path.insert( 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts")) ) diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py index eead2da1..66c23314 100644 --- a/tests/integration/memory_tests.py +++ b/tests/integration/memory_tests.py @@ -9,10 +9,11 @@ from autogpt.memory.local import LocalCache class TestLocalCache(unittest.TestCase): - def random_string(self, length): + def generate_random_string(self, length): return "".join(random.choice(string.ascii_letters) for _ in range(length)) def setUp(self): + """Set up the test environment for the LocalCache tests.""" cfg = cfg = Config() self.cache = LocalCache(cfg) self.cache.clear() @@ -24,15 +25,15 @@ class TestLocalCache(unittest.TestCase): "The cake is a lie, but the pie is always true", "ChatGPT is an advanced AI model for conversation", ] - for text in self.example_texts: self.cache.add(text) # Add some random strings to test noise for _ in range(5): - self.cache.add(self.random_string(10)) + self.cache.add(self.generate_random_string(10)) def test_get_relevant(self): + """Test getting relevant texts from the cache.""" query = "I'm interested in artificial intelligence and NLP" k = 3 relevant_texts = self.cache.get_relevant(query, k) diff --git a/tests/integration/milvus_memory_tests.py b/tests/integration/milvus_memory_tests.py index ec38bf2f..c4b97d34 100644 --- a/tests/integration/milvus_memory_tests.py +++ b/tests/integration/milvus_memory_tests.py @@ -10,14 +10,12 @@ from autogpt.memory.milvus import MilvusMemory try: class TestMilvusMemory(unittest.TestCase): - """Tests for the MilvusMemory class.""" + """Unit tests for the MilvusMemory class.""" - def random_string(self, length: int) -> str: - """Generate a random string of the given length.""" + def generate_random_string(self, length: int) -> str: return "".join(random.choice(string.ascii_letters) for _ in range(length)) def setUp(self) -> None: - """Set up the test environment.""" cfg = Config() cfg.milvus_addr = "localhost:19530" self.memory = MilvusMemory(cfg) @@ -36,7 +34,7 @@ try: # Add some random strings to test noise for _ in range(5): - self.memory.add(self.random_string(10)) + self.memory.add(self.generate_random_string(10)) def test_get_relevant(self) -> None: """Test getting relevant texts from the cache.""" diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py index 5448b79e..f4085775 100644 --- a/tests/integration/weaviate_memory_tests.py +++ b/tests/integration/weaviate_memory_tests.py @@ -16,6 +16,7 @@ class TestWeaviateMemory(unittest.TestCase): @classmethod def setUpClass(cls): + """Set up the test environment for the WeaviateMemory tests.""" # only create the connection to weaviate once cls.cfg = Config() @@ -47,6 +48,7 @@ class TestWeaviateMemory(unittest.TestCase): """ def setUp(self): + """Set up the test environment for the WeaviateMemory tests.""" try: self.client.schema.delete_class(self.index) except: @@ -55,6 +57,7 @@ class TestWeaviateMemory(unittest.TestCase): self.memory = WeaviateMemory(self.cfg) def test_add(self): + """Test adding a text to the cache""" doc = "You are a Titan name Thanos and you are looking for the Infinity Stones" self.memory.add(doc) result = self.client.query.get(self.index, ["raw_text"]).do() @@ -64,8 +67,9 @@ class TestWeaviateMemory(unittest.TestCase): self.assertEqual(actual[0]["raw_text"], doc) def test_get(self): + """Test getting a text from the cache""" doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos" - + # add the document to the cache with self.client.batch as batch: batch.add_data_object( uuid=get_valid_uuid(uuid4()), @@ -82,6 +86,7 @@ class TestWeaviateMemory(unittest.TestCase): self.assertEqual(actual[0], doc) def test_get_stats(self): + """Test getting the stats of the cache""" docs = [ "You are now about to count the number of docs in this index", "And then you about to find out if you can count correctly", @@ -96,6 +101,7 @@ class TestWeaviateMemory(unittest.TestCase): self.assertEqual(stats["count"], 2) def test_clear(self): + """Test clearing the cache""" docs = [ "Shame this is the last test for this class", "Testing is fun when someone else is doing it", diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py index ba1e4fc7..46010f7e 100644 --- a/tests/milvus_memory_test.py +++ b/tests/milvus_memory_test.py @@ -8,7 +8,8 @@ try: from autogpt.memory.milvus import MilvusMemory def mock_config() -> dict: - """Mock the Config class""" + """Mock the config object for testing purposes.""" + # Return a mock config object with the required attributes return type( "MockConfig", (object,), diff --git a/tests/mocks/mock_commands.py b/tests/mocks/mock_commands.py index d64284bc..d5bf5df0 100644 --- a/tests/mocks/mock_commands.py +++ b/tests/mocks/mock_commands.py @@ -3,4 +3,5 @@ from autogpt.commands.command import command @command("function_based", "Function-based test command") def function_based(arg1: int, arg2: str) -> str: + """A function-based test command that returns a string with the two arguments separated by a dash.""" return f"{arg1} - {arg2}" diff --git a/tests/test_commands.py b/tests/test_commands.py index 8a7771f6..0961d0af 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -9,41 +9,55 @@ from autogpt.commands.command import Command, CommandRegistry class TestCommand: + """Test cases for the Command class.""" + @staticmethod - def example_function(arg1: int, arg2: str) -> str: + def example_command_method(arg1: int, arg2: str) -> str: + """Example function for testing the Command class.""" + # This function is static because it is not used by any other test cases. return f"{arg1} - {arg2}" def test_command_creation(self): + """Test that a Command object can be created with the correct attributes.""" cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) assert cmd.name == "example" assert cmd.description == "Example command" - assert cmd.method == self.example_function + assert cmd.method == self.example_command_method assert cmd.signature == "(arg1: int, arg2: str) -> str" def test_command_call(self): + """Test that Command(*args) calls and returns the result of method(*args).""" + # Create a Command object with the example_command_method. cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) - result = cmd(arg1=1, arg2="test") assert result == "1 - test" def test_command_call_with_invalid_arguments(self): + """Test that calling a Command object with invalid arguments raises a TypeError.""" cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) - with pytest.raises(TypeError): cmd(arg1="invalid", does_not_exist="test") def test_command_default_signature(self): + """Test that the default signature is generated correctly.""" cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) - assert cmd.signature == "(arg1: int, arg2: str) -> str" def test_command_custom_signature(self): @@ -51,7 +65,7 @@ class TestCommand: cmd = Command( name="example", description="Example command", - method=self.example_function, + method=self.example_command_method, signature=custom_signature, ) @@ -60,14 +74,16 @@ class TestCommand: class TestCommandRegistry: @staticmethod - def example_function(arg1: int, arg2: str) -> str: + def example_command_method(arg1: int, arg2: str) -> str: return f"{arg1} - {arg2}" def test_register_command(self): """Test that a command can be registered to the registry.""" registry = CommandRegistry() cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) registry.register(cmd) @@ -79,7 +95,9 @@ class TestCommandRegistry: """Test that a command can be unregistered from the registry.""" registry = CommandRegistry() cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) registry.register(cmd) @@ -91,7 +109,9 @@ class TestCommandRegistry: """Test that a command can be retrieved from the registry.""" registry = CommandRegistry() cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) registry.register(cmd) @@ -110,7 +130,9 @@ class TestCommandRegistry: """Test that a command can be called through the registry.""" registry = CommandRegistry() cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) registry.register(cmd) @@ -129,7 +151,9 @@ class TestCommandRegistry: """Test that the command prompt is correctly formatted.""" registry = CommandRegistry() cmd = Command( - name="example", description="Example command", method=self.example_function + name="example", + description="Example command", + method=self.example_command_method, ) registry.register(cmd) @@ -152,7 +176,11 @@ class TestCommandRegistry: ) def test_import_temp_command_file_module(self, tmp_path): - """Test that the registry can import a command plugins module from a temp file.""" + """ + Test that the registry can import a command plugins module from a temp file. + Args: + tmp_path (pathlib.Path): Path to a temporary directory. + """ registry = CommandRegistry() # Create a temp command file diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index d29d5aa7..23332ee0 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -13,6 +13,7 @@ from tests.utils import requires_api_key def lst(txt): + """Extract the file path from the output of `generate_image()`""" return Path(txt.split(":")[1].strip()) @@ -30,6 +31,7 @@ class TestImageGen(unittest.TestCase): @requires_api_key("OPENAI_API_KEY") def test_dalle(self): + """Test DALL-E image generation.""" self.config.image_provider = "dalle" # Test using size 256 @@ -47,6 +49,7 @@ class TestImageGen(unittest.TestCase): @requires_api_key("HUGGINGFACE_API_TOKEN") def test_huggingface(self): + """Test HuggingFace image generation.""" self.config.image_provider = "huggingface" # Test usin SD 1.4 model and size 512 @@ -65,6 +68,7 @@ class TestImageGen(unittest.TestCase): image_path.unlink() def test_sd_webui(self): + """Test SD WebUI image generation.""" self.config.image_provider = "sd_webui" return diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index 41c90a6f..571ee5fa 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -6,32 +6,32 @@ from autogpt.json_utils.json_fix_llm import fix_and_parse_json class TestParseJson(unittest.TestCase): def test_valid_json(self): - # Test that a valid JSON string is parsed correctly + """Test that a valid JSON string is parsed correctly.""" json_str = '{"name": "John", "age": 30, "city": "New York"}' obj = fix_and_parse_json(json_str) self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) def test_invalid_json_minor(self): - # Test that an invalid JSON string can be fixed with gpt + """Test that an invalid JSON string can be fixed with gpt""" json_str = '{"name": "John", "age": 30, "city": "New York",}' with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_major_with_gpt(self): - # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_major_without_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' # Assert that this raises an exception: with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False""" json_str = """I suggest we start by browsing the repository to find any issues that we can fix. { @@ -69,7 +69,7 @@ class TestParseJson(unittest.TestCase): ) def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False""" json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. { diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py index 6d7ae016..f7c84672 100644 --- a/tests/test_token_counter.py +++ b/tests/test_token_counter.py @@ -20,9 +20,11 @@ class TestTokenCounter(unittest.TestCase): self.assertEqual(count_message_tokens(messages), 17) def test_count_message_tokens_empty_input(self): + # Empty input should return 3 tokens self.assertEqual(count_message_tokens([]), 3) def test_count_message_tokens_invalid_model(self): + # Invalid model should raise a KeyError messages = [ {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there!"}, @@ -38,15 +40,20 @@ class TestTokenCounter(unittest.TestCase): self.assertEqual(count_message_tokens(messages, model="gpt-4-0314"), 15) def test_count_string_tokens(self): + """Test that the string tokens are counted correctly.""" + string = "Hello, world!" self.assertEqual( count_string_tokens(string, model_name="gpt-3.5-turbo-0301"), 4 ) def test_count_string_tokens_empty_input(self): + """Test that the string tokens are counted correctly.""" + self.assertEqual(count_string_tokens("", model_name="gpt-3.5-turbo-0301"), 0) def test_count_message_tokens_invalid_model(self): + # Invalid model should raise a NotImplementedError messages = [ {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there!"}, @@ -55,6 +62,8 @@ class TestTokenCounter(unittest.TestCase): count_message_tokens(messages, model="invalid_model") def test_count_string_tokens_gpt_4(self): + """Test that the string tokens are counted correctly.""" + string = "Hello, world!" self.assertEqual(count_string_tokens(string, model_name="gpt-4-0314"), 4) diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index 25c38337..4ef46710 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -5,13 +5,13 @@ from autogpt.json_utils.json_fix_llm import fix_and_parse_json class TestParseJson(unittest.TestCase): def test_valid_json(self): - # Test that a valid JSON string is parsed correctly + """Test that a valid JSON string is parsed correctly.""" json_str = '{"name": "John", "age": 30, "city": "New York"}' obj = fix_and_parse_json(json_str) self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) def test_invalid_json_minor(self): - # Test that an invalid JSON string can be fixed with gpt + """Test that an invalid JSON string can be fixed with gpt.""" json_str = '{"name": "John", "age": 30, "city": "New York",}' self.assertEqual( fix_and_parse_json(json_str, try_to_fix_with_gpt=False), @@ -19,7 +19,7 @@ class TestParseJson(unittest.TestCase): ) def test_invalid_json_major_with_gpt(self): - # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False.""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' self.assertEqual( fix_and_parse_json(json_str, try_to_fix_with_gpt=True), @@ -27,14 +27,15 @@ class TestParseJson(unittest.TestCase): ) def test_invalid_json_major_without_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False.""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' # Assert that this raises an exception: with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False.""" + json_str = """I suggest we start by browsing the repository to find any issues that we can fix. { @@ -72,7 +73,7 @@ class TestParseJson(unittest.TestCase): ) def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False.""" json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. { diff --git a/tests/unit/models/test_base_open_api_plugin.py b/tests/unit/models/test_base_open_api_plugin.py index 950a3266..456c74c7 100644 --- a/tests/unit/models/test_base_open_api_plugin.py +++ b/tests/unit/models/test_base_open_api_plugin.py @@ -10,11 +10,14 @@ from autogpt.models.base_open_ai_plugin import ( class DummyPlugin(BaseOpenAIPlugin): + """A dummy plugin for testing purposes.""" + pass @pytest.fixture def dummy_plugin(): + """A dummy plugin for testing purposes.""" manifests_specs_clients = { "manifest": { "name_for_model": "Dummy", @@ -28,22 +31,27 @@ def dummy_plugin(): def test_dummy_plugin_inheritance(dummy_plugin): + """Test that the DummyPlugin class inherits from the BaseOpenAIPlugin class.""" assert isinstance(dummy_plugin, BaseOpenAIPlugin) def test_dummy_plugin_name(dummy_plugin): + """Test that the DummyPlugin class has the correct name.""" assert dummy_plugin._name == "Dummy" def test_dummy_plugin_version(dummy_plugin): + """Test that the DummyPlugin class has the correct version.""" assert dummy_plugin._version == "1.0" def test_dummy_plugin_description(dummy_plugin): + """Test that the DummyPlugin class has the correct description.""" assert dummy_plugin._description == "A dummy plugin for testing purposes" def test_dummy_plugin_default_methods(dummy_plugin): + """Test that the DummyPlugin class has the correct default methods.""" assert not dummy_plugin.can_handle_on_response() assert not dummy_plugin.can_handle_post_prompt() assert not dummy_plugin.can_handle_on_planning() diff --git a/tests/unit/test_browse_scrape_links.py b/tests/unit/test_browse_scrape_links.py index 0a3340e7..3b7442eb 100644 --- a/tests/unit/test_browse_scrape_links.py +++ b/tests/unit/test_browse_scrape_links.py @@ -38,8 +38,11 @@ requests and parse HTML content, respectively. class TestScrapeLinks: - # Tests that the function returns a list of formatted hyperlinks when - # provided with a valid url that returns a webpage with hyperlinks. + """ + Tests that the function returns a list of formatted hyperlinks when + provided with a valid url that returns a webpage with hyperlinks. + """ + def test_valid_url_with_hyperlinks(self): url = "https://www.google.com" result = scrape_links(url) @@ -47,8 +50,8 @@ class TestScrapeLinks: assert isinstance(result, list) assert isinstance(result[0], str) - # Tests that the function returns correctly formatted hyperlinks when given a valid url. def test_valid_url(self, mocker): + """Test that the function returns correctly formatted hyperlinks when given a valid url.""" # Mock the requests.get() function to return a response with sample HTML containing hyperlinks mock_response = mocker.Mock() mock_response.status_code = 200 @@ -63,8 +66,8 @@ class TestScrapeLinks: # Assert that the function returns correctly formatted hyperlinks assert result == ["Google (https://www.google.com)"] - # Tests that the function returns "error" when given an invalid url. def test_invalid_url(self, mocker): + """Test that the function returns "error" when given an invalid url.""" # Mock the requests.get() function to return an HTTP error response mock_response = mocker.Mock() mock_response.status_code = 404 @@ -76,8 +79,8 @@ class TestScrapeLinks: # Assert that the function returns "error" assert "Error:" in result - # Tests that the function returns an empty list when the html contains no hyperlinks. def test_no_hyperlinks(self, mocker): + """Test that the function returns an empty list when the html contains no hyperlinks.""" # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks mock_response = mocker.Mock() mock_response.status_code = 200 @@ -90,10 +93,8 @@ class TestScrapeLinks: # Assert that the function returns an empty list assert result == [] - # Tests that scrape_links() correctly extracts and formats hyperlinks from - # a sample HTML containing a few hyperlinks. def test_scrape_links_with_few_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks + """Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks.""" mock_response = mocker.Mock() mock_response.status_code = 200 mock_response.text = """ diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index c6ce4d39..cda45e11 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -42,8 +42,8 @@ Additional aspects: class TestScrapeText: - # Tests that scrape_text() returns the expected text when given a valid URL. def test_scrape_text_with_valid_url(self, mocker): + """Tests that scrape_text() returns the expected text when given a valid URL.""" # Mock the requests.get() method to return a response with expected text expected_text = "This is some sample text" mock_response = mocker.Mock() @@ -59,14 +59,13 @@ class TestScrapeText: url = "http://www.example.com" assert scrape_text(url) == expected_text - # Tests that an error is raised when an invalid url is provided. def test_invalid_url(self): + """Tests that an error is raised when an invalid url is provided.""" url = "invalidurl.com" pytest.raises(ValueError, scrape_text, url) - # Tests that the function returns an error message when an unreachable - # url is provided. def test_unreachable_url(self, mocker): + """Test that scrape_text returns an error message when an invalid or unreachable url is provided.""" # Mock the requests.get() method to raise an exception mocker.patch( "requests.Session.get", side_effect=requests.exceptions.RequestException @@ -78,9 +77,8 @@ class TestScrapeText: error_message = scrape_text(url) assert "Error:" in error_message - # Tests that the function returns an empty string when the html page contains no - # text to be scraped. def test_no_text(self, mocker): + """Test that scrape_text returns an empty string when the html page contains no text to be scraped.""" # Mock the requests.get() method to return a response with no text mock_response = mocker.Mock() mock_response.status_code = 200 @@ -91,9 +89,8 @@ class TestScrapeText: url = "http://www.example.com" assert scrape_text(url) == "" - # Tests that the function returns an error message when the response status code is - # an http error (>=400). def test_http_error(self, mocker): + """Test that scrape_text returns an error message when the response status code is an http error (>=400).""" # Mock the requests.get() method to return a response with a 404 status code mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) @@ -103,8 +100,8 @@ class TestScrapeText: # Check that the function returns an error message assert result == "Error: HTTP 404 error" - # Tests that scrape_text() properly handles HTML tags. def test_scrape_text_with_html_tags(self, mocker): + """Test that scrape_text() properly handles HTML tags.""" # Create a mock response object with HTML containing tags html = "

This is bold text.

" mock_response = mocker.Mock() diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 774f4103..7b656d73 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -7,19 +7,21 @@ from autogpt.chat import create_chat_message, generate_context class TestChat(unittest.TestCase): - # Tests that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content. + """Test the chat module functions.""" + def test_happy_path_role_content(self): + """Test that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content.""" result = create_chat_message("system", "Hello, world!") self.assertEqual(result, {"role": "system", "content": "Hello, world!"}) - # Tests that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content. def test_empty_role_content(self): + """Test that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content.""" result = create_chat_message("", "") self.assertEqual(result, {"role": "", "content": ""}) - # Tests the behavior of the generate_context function when all input parameters are empty. @patch("time.strftime") def test_generate_context_empty_inputs(self, mock_strftime): + """Test the behavior of the generate_context function when all input parameters are empty.""" # Mock the time.strftime function to return a fixed value mock_strftime.return_value = "Sat Apr 15 00:00:00 2023" # Arrange @@ -50,8 +52,8 @@ class TestChat(unittest.TestCase): ) self.assertEqual(result, expected_result) - # Tests that the function successfully generates a current_context given valid inputs. def test_generate_context_valid_inputs(self): + """Test that the function successfully generates a current_context given valid inputs.""" # Given prompt = "What is your favorite color?" relevant_memory = "You once painted your room blue." diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index f66fe4d9..a749cc6d 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -11,7 +11,8 @@ from tests.utils import requires_api_key @pytest.mark.integration_test @requires_api_key("OPENAI_API_KEY") def test_make_agent() -> None: - """Test the make_agent command""" + """Test that an agent can be created""" + # Use the mock agent manager to avoid creating a real agent with patch("openai.ChatCompletion.create") as mock: obj = MagicMock() obj.response.choices[0].messages[0].content = "Test message" diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py index c8bda3cf..de1653e4 100644 --- a/tests/unit/test_plugins.py +++ b/tests/unit/test_plugins.py @@ -21,6 +21,8 @@ def test_inspect_zip_for_modules(): @pytest.fixture def mock_config_denylist_allowlist_check(): class MockConfig: + """Mock config object for testing the denylist_allowlist_check function""" + plugins_denylist = ["BadPlugin"] plugins_allowlist = ["GoodPlugin"] @@ -30,6 +32,7 @@ def mock_config_denylist_allowlist_check(): def test_denylist_allowlist_check_denylist( mock_config_denylist_allowlist_check, monkeypatch ): + # Test that the function returns False when the plugin is in the denylist monkeypatch.setattr("builtins.input", lambda _: "y") assert not denylist_allowlist_check( "BadPlugin", mock_config_denylist_allowlist_check @@ -39,6 +42,7 @@ def test_denylist_allowlist_check_denylist( def test_denylist_allowlist_check_allowlist( mock_config_denylist_allowlist_check, monkeypatch ): + # Test that the function returns True when the plugin is in the allowlist monkeypatch.setattr("builtins.input", lambda _: "y") assert denylist_allowlist_check("GoodPlugin", mock_config_denylist_allowlist_check) @@ -46,6 +50,7 @@ def test_denylist_allowlist_check_allowlist( def test_denylist_allowlist_check_user_input_yes( mock_config_denylist_allowlist_check, monkeypatch ): + # Test that the function returns True when the user inputs "y" monkeypatch.setattr("builtins.input", lambda _: "y") assert denylist_allowlist_check( "UnknownPlugin", mock_config_denylist_allowlist_check @@ -55,6 +60,7 @@ def test_denylist_allowlist_check_user_input_yes( def test_denylist_allowlist_check_user_input_no( mock_config_denylist_allowlist_check, monkeypatch ): + # Test that the function returns False when the user inputs "n" monkeypatch.setattr("builtins.input", lambda _: "n") assert not denylist_allowlist_check( "UnknownPlugin", mock_config_denylist_allowlist_check @@ -64,6 +70,7 @@ def test_denylist_allowlist_check_user_input_no( def test_denylist_allowlist_check_user_input_invalid( mock_config_denylist_allowlist_check, monkeypatch ): + # Test that the function returns False when the user inputs an invalid value monkeypatch.setattr("builtins.input", lambda _: "invalid") assert not denylist_allowlist_check( "UnknownPlugin", mock_config_denylist_allowlist_check @@ -72,6 +79,8 @@ def test_denylist_allowlist_check_user_input_invalid( @pytest.fixture def config_with_plugins(): + """Mock config object for testing the scan_plugins function""" + # Test that the function returns the correct number of plugins cfg = Config() cfg.plugins_dir = PLUGINS_TEST_DIR cfg.plugins_openai = ["https://weathergpt.vercel.app/"] @@ -80,7 +89,11 @@ def config_with_plugins(): @pytest.fixture def mock_config_openai_plugin(): + """Mock config object for testing the scan_plugins function""" + class MockConfig: + """Mock config object for testing the scan_plugins function""" + plugins_dir = PLUGINS_TEST_DIR plugins_openai = [PLUGIN_TEST_OPENAI] plugins_denylist = ["AutoGPTPVicuna"] @@ -90,12 +103,16 @@ def mock_config_openai_plugin(): def test_scan_plugins_openai(mock_config_openai_plugin): + # Test that the function returns the correct number of plugins result = scan_plugins(mock_config_openai_plugin, debug=True) assert len(result) == 1 @pytest.fixture def mock_config_generic_plugin(): + """Mock config object for testing the scan_plugins function""" + + # Test that the function returns the correct number of plugins class MockConfig: plugins_dir = PLUGINS_TEST_DIR plugins_openai = [] @@ -106,5 +123,6 @@ def mock_config_generic_plugin(): def test_scan_plugins_generic(mock_config_generic_plugin): + # Test that the function returns the correct number of plugins result = scan_plugins(mock_config_generic_plugin, debug=True) assert len(result) == 1 diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py index ec75a687..16106f42 100644 --- a/tests/unit/test_spinner.py +++ b/tests/unit/test_spinner.py @@ -30,28 +30,29 @@ PLEASE_WAIT = "Please wait..." class TestSpinner(unittest.TestCase): - # Tests that the spinner initializes with default values. def test_spinner_initializes_with_default_values(self): + """Tests that the spinner initializes with default values.""" with Spinner() as spinner: self.assertEqual(spinner.message, "Loading...") self.assertEqual(spinner.delay, 0.1) - # Tests that the spinner initializes with custom message and delay values. def test_spinner_initializes_with_custom_values(self): + """Tests that the spinner initializes with custom message and delay values.""" with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner: self.assertEqual(spinner.message, PLEASE_WAIT) self.assertEqual(spinner.delay, 0.2) - # Tests that the spinner starts spinning and stops spinning without errors. + # def test_spinner_stops_spinning(self): + """Tests that the spinner starts spinning and stops spinning without errors.""" with Spinner() as spinner: time.sleep(1) spinner.update_message(ALMOST_DONE_MESSAGE) time.sleep(1) self.assertFalse(spinner.running) - # Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning. def test_spinner_updates_message_and_still_spins(self): + """Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning.""" with Spinner() as spinner: self.assertTrue(spinner.running) time.sleep(1) @@ -60,9 +61,8 @@ class TestSpinner(unittest.TestCase): self.assertEqual(spinner.message, ALMOST_DONE_MESSAGE) self.assertFalse(spinner.running) - # Tests that the spinner can be used as a context manager. - def test_spinner_can_be_used_as_context_manager(self): + """Tests that the spinner can be used as a context manager.""" with Spinner() as spinner: self.assertTrue(spinner.running) self.assertFalse(spinner.running) From 9c60eecce68f9718e48854ec51d7fda7af9e8387 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 24 Apr 2023 15:27:53 +0200 Subject: [PATCH 114/222] Improve docker setup & config (#1843) * Improve docker setup & config * fix(browsing): Selenium needs access to home directory * fix(docker): allow overriding memory backend settings * simplify Dockerfile and docker-compose config * add .dockerignore * adjust Docker CI with release build type arg * replace Chrome by Chromium in devcontainer * update docs * update bulletin * use preinstalled chromedriver in web_selenium.py * update installation.md * fix code blocks for mkdocs * fix links to docs --- .devcontainer/Dockerfile | 8 ++- .dockerignore | 8 +++ .github/workflows/docker-image.yml | 2 +- BULLETIN.md | 5 +- Dockerfile | 37 ++++++------- README.md | 40 +++++++------- autogpt/commands/web_selenium.py | 7 ++- docker-compose.yml | 6 ++- docs/installation.md | 85 ++++++++++++++++-------------- docs/usage.md | 40 ++++++++------ 10 files changed, 133 insertions(+), 105 deletions(-) create mode 100644 .dockerignore diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 02f580a0..87ca39d2 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,6 +1,6 @@ # [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3-bullseye, 3.10-bullseye, 3-buster, 3.10-buster ARG VARIANT=3-bullseye -FROM --platform=linux/amd64 python:3.10 +FROM python:3.10 RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ # Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131 @@ -10,10 +10,8 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ # They are installed by the base image (python) which does not have the patch. RUN python3 -m pip install --upgrade setuptools -# Install Chrome for web browsing -RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && curl -sSL https://dl.google.com/linux/direct/google-chrome-stable_current_$(dpkg --print-architecture).deb -o /tmp/chrome.deb \ - && apt-get -y install /tmp/chrome.deb +# Install Chromium for web browsing +RUN apt-get install -y chromium-driver # [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. # COPY requirements.txt /tmp/pip-tmp/ diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..9ac55109 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +.* +*.template +*.yaml +*.yml + +*.md +*.png +!BULLETIN.md diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 2fcf6401..d63ff1f4 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -18,4 +18,4 @@ jobs: steps: - uses: actions/checkout@v3 - name: Build the Docker image - run: docker build . --file Dockerfile --tag autogpt:$(date +%s) + run: docker build . --build-arg BUILD_TYPE=release --tag autogpt:$(date +%s) diff --git a/BULLETIN.md b/BULLETIN.md index 735048dd..0962da4a 100644 --- a/BULLETIN.md +++ b/BULLETIN.md @@ -1,2 +1,5 @@ Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here. -If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag \ No newline at end of file +If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag + +## Changes to Docker configuration +The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly. diff --git a/Dockerfile b/Dockerfile index 83961549..05ec1a42 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,38 +1,33 @@ # Use an official Python base image from the Docker Hub FROM python:3.10-slim -# Install git -RUN apt-get -y update -RUN apt-get -y install git chromium-driver +# 'dev' or 'release' container build +ARG BUILD_TYPE=dev -# Install Xvfb and other dependencies for headless browser testing -RUN apt-get update \ - && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates +# Install browsers +RUN apt-get update && apt-get install -y \ + chromium-driver firefox-esr \ + ca-certificates -# Install Firefox / Chromium -RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ - && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ - && apt-get update \ - && apt-get install -y chromium firefox-esr +# Install utilities +RUN apt-get install -y curl jq wget git # Set environment variables ENV PIP_NO_CACHE_DIR=yes \ PYTHONUNBUFFERED=1 \ PYTHONDONTWRITEBYTECODE=1 -# Create a non-root user and set permissions -RUN useradd --create-home appuser -WORKDIR /home/appuser -RUN chown appuser:appuser /home/appuser -USER appuser +# Install the required python packages globally +ENV PATH="$PATH:/root/.local/bin" +COPY requirements.txt . -# Copy the requirements.txt file and install the requirements -COPY --chown=appuser:appuser requirements.txt . -RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ - pip install --no-cache-dir --user -r requirements.txt +# Only install dev dependencies in dev container builds +RUN [ '${BUILD_TYPE}' = 'dev' ] || sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ + pip install --no-cache-dir -r requirements.txt # Copy the application files -COPY --chown=appuser:appuser autogpt/ ./autogpt +WORKDIR /app +COPY autogpt/ ./autogpt # Set the entrypoint ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/README.md b/README.md index 497519ad..c0a9bf7d 100644 --- a/README.md +++ b/README.md @@ -59,37 +59,37 @@ Your support is greatly appreciated. Development of this free, open-source proje ## 📋 Requirements -- Environment (pick one) +Choose an environment to run Auto-GPT in (pick one): + + - [Docker](https://docs.docker.com/get-docker/) (*recommended*) - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) - - Docker [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) ## Quickstart 1. Set up your OpenAI [API Keys](https://platform.openai.com/account/api-keys) 2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) -3. Install the [dependencies](/docs/installation.md) and set your [environment variables](/docs/installation.md) -4. Configure any additional features you want, or install some [plugins](/docs/plugins.md) -5. [Run](/docs/usage.md) the app +3. Follow the [installation instructions][docs/install] +4. Configure any additional features you want, or install some [plugins][docs/plugins] +5. [Run][docs/usage] the app -Please see the [documentation](/docs) linked below for full setup instructions and configuration options. +Please see the [documentation][docs] linked below for full setup instructions and configuration options. -## 💾 Documentation +[docs]: https://significant-gravitas.github.io/Auto-GPT/ -[Installation](/docs/installation.md) - -[Usage](/docs/usage.md) - -[Speech](/docs/configuration/voice.md) - -[Search](/docs/configuration/search.md) - -[Plugins](/docs/plugins.md) - -[Memory](/docs/configuration/memory.md) - -[Image Generation](/docs/configuration/imagegen.md) +## 📖 Documentation +* [⚙️ Installation][docs/install] +* [💻 Usage][docs/usage] +* [🔌 Plugins][docs/plugins] +* Configuration + * [🔍 Web Search](https://significant-gravitas.github.io/Auto-GPT/configuration/search/) + * [🧠 Memory](https://significant-gravitas.github.io/Auto-GPT/configuration/memory/) + * [🗣️ Voice (TTS)](https://significant-gravitas.github.io/Auto-GPT/configuration/voice/) + * [🖼️ Image Generation](https://significant-gravitas.github.io/Auto-GPT/configuration/imagegen/) +[docs/install]: https://significant-gravitas.github.io/Auto-GPT/installation/ +[docs/usage]: https://significant-gravitas.github.io/Auto-GPT/usage/ +[docs/plugins]: https://significant-gravitas.github.io/Auto-GPT/plugins/ ## ⚠️ Limitations diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 8f55c103..b435660c 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -95,8 +95,13 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: options.add_argument("--headless") options.add_argument("--disable-gpu") + chromium_driver_path = Path("/usr/bin/chromedriver") + driver = webdriver.Chrome( - executable_path=ChromeDriverManager().install(), options=options + executable_path=chromium_driver_path + if chromium_driver_path.exists() + else ChromeDriverManager().install(), + options=options, ) driver.get(url) diff --git a/docker-compose.yml b/docker-compose.yml index 50fa1a07..4c6656dd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,9 +9,11 @@ services: build: ./ env_file: - .env + environment: + MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} + REDIS_HOST: ${REDIS_HOST:-redis} volumes: - - "./autogpt:/home/appuser" - - ".env:/home/appuser/.env" + - ./:/app profiles: ["exclude-from-up"] redis: diff --git a/docs/installation.md b/docs/installation.md index 554ad711..579a246d 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -11,48 +11,39 @@ You can set up paid account at https://platform.openai.com/account/billing/overv Important: It's highly recommended that you track your usage on [the Usage page](https://platform.openai.com/account/usage) You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). -![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./docs/imgs/openai-api-key-billing-paid-account.png) +![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./imgs/openai-api-key-billing-paid-account.png) **PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING. OTHERWISE, NOTHING WILL WORK!** -## Steps +## General setup -To install Auto-GPT, follow these steps: +1. Make sure you have one of the environments listed under [**requirements**](../README.md#-requirements) set up. -1. Make sure you have all the **requirements** listed in the [README](../README.md). If not, install/get them. + _To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter._ -_To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter._ +2. Clone the repository using Git, or download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) (`Source code (zip)`, at the bottom of the page). -2. Clone the repository: For this step, you need Git installed. -Note: If you don't have Git, you can just download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) instead (`Source code (zip)`, at the bottom of the page). - -``` shell + ``` shell git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git -``` + ``` 3. Navigate to the directory where you downloaded the repository. -``` shell + ``` shell cd Auto-GPT -``` - -4. Install the required dependencies. - -``` shell - pip install -r requirements.txt -``` + ``` 5. Configure Auto-GPT: - 1. Find the file named `.env.template` in the main /Auto-GPT folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). + 1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). 2. Create a copy of this file and call it `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. 3. Open the `.env` file in a text editor. 4. Find the line that says `OPENAI_API_KEY=`. 5. After the `"="`, enter your unique OpenAI API Key (without any quotes or spaces). - 6. Enter any other API keys or Tokens for services you would like to use. + 6. Enter any other API keys or Tokens for services you would like to use. To activate and adjust a setting, remove the `# ` prefix. 7. Save and close the `.env` file. - After you complete these steps, you'll have properly configured the API keys for your project. - + You have now configured Auto-GPT. + Notes: - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to get your OpenAI API key. - Get your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. @@ -61,38 +52,54 @@ Note: If you don't have Git, you can just download the [latest stable release](h - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID - `smart_llm_model_deployment_id` - your gpt-4 deployment ID - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID + - Please specify all of these values as double-quoted strings - -``` shell -# Replace string in angled brackets (<>) to your own ID -azure_model_map: - fast_llm_model_deployment_id: "" - ... -``` + ``` shell + # Replace string in angled brackets (<>) to your own ID + azure_model_map: + fast_llm_model_deployment_id: "" + ... + ``` - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. - If you're on Windows you may need to install https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170 -## Docker +4. Follow the further instructions for running Auto-GPT with [Docker](#run-with-docker) (*recommended*), or [Docker-less](#run-docker-less) -You can also build this into a docker image and run it: +### Run with Docker +Easiest is to run with `docker-compose`: ``` shell -docker build -t autogpt . -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt +docker-compose build auto-gpt +docker-compose run --rm auto-gpt ``` +By default, this will also start and attach a Redis memory backend. +For related settings, see [Memory > Redis setup](./configuration/memory.md#redis-setup). -Or if you have `docker-compose`: +You can also build and run it with "vanilla" docker commands: ``` shell -docker-compose run --build --rm auto-gpt +docker build -t auto-gpt . +docker run -it --env-file=.env -v $PWD:/app auto-gpt ``` You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: ``` shell -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/home/appuser/auto_gpt_workspace autogpt --gpt3only --continuous +docker-compose run --rm auto-gpt --gpt3only --continuous ``` - ``` shell -docker-compose run --build --rm auto-gpt --gpt3only --continuous +docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous ``` -Alternatively, you can pull the latest release directly from [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) +Alternatively, you can pull the latest release directly from [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) and run that: +``` shell +docker run -it --env OPENAI_API_KEY='your-key-here' --rm significantgravitas/auto-gpt +``` + +Or with `ai_settings.yml` presets mounted: +``` shell +docker run -it --env OPENAI_API_KEY='your-key-here' -v $PWD/ai_settings.yaml:/app/ai_settings.yaml --rm significantgravitas/auto-gpt +``` + + +### Run Docker-less + +Simply run `./run.sh` (Linux/macOS) or `.\run.bat` (Windows) in your terminal. diff --git a/docs/usage.md b/docs/usage.md index 9b2185ae..65adae74 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -1,15 +1,25 @@ # Usage -1. Run the `autogpt` Python module in your terminal. -* On Linux/MacOS: - ``` - ./run.sh - ``` -* On Windows: - ``` - .\run.bat - ``` - Running with `--help` after `.\run.bat` lists all the possible command line arguments you can pass. +1. Open a terminal and run the startup script: + * On Linux/MacOS: + ``` shell + ./run.sh + ``` + * On Windows: + ``` shell + .\run.bat + ``` + * Using Docker: + ``` shell + docker-compose run --rm auto-gpt + ``` + Running with `--help` lists all the possible command line arguments you can pass: + ``` shell + ./run.sh --help + + # or with docker + docker-compose run --rm auto-gpt --help + ``` 2. After each response from Auto-GPT, choose from the options to authorize command(s), exit the program, or provide feedback to the AI. @@ -24,15 +34,15 @@ Here are some common arguments you can use when running Auto-GPT: > Replace anything in angled brackets (<>) to a value you want to specify * View all available command line arguments - ``` + ``` shell python -m autogpt --help ``` * Run Auto-GPT with a different AI Settings file - ``` + ``` shell python -m autogpt --ai-settings ``` * Specify a memory backend - ``` + ``` shell python -m autogpt --use-memory ``` @@ -55,7 +65,7 @@ Use at your own risk. 1. Run the `autogpt` python module in your terminal: - ``` + ``` shell python -m autogpt --continuous ``` @@ -85,4 +95,4 @@ To print out debug logs: ``` shell python -m autogpt --debug -``` \ No newline at end of file +``` From cade788a7e68c5c7dda26c08682ba2884dd73444 Mon Sep 17 00:00:00 2001 From: fluxism Date: Mon, 24 Apr 2023 08:12:15 -0700 Subject: [PATCH 115/222] Add arg to do_nothing command (#3090) * Add arg to do_nothing command * do_nothing returns reason arg --- autogpt/app.py | 2 +- autogpt/prompts/prompt.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/app.py b/autogpt/app.py index 590f3d50..6bcc851b 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -120,7 +120,7 @@ def execute_command( # non-file is given, return instructions "Input should be a python # filepath, write your code to file and try again elif command_name == "do_nothing": - return "No action performed." + return f"Error: No action performed. Reason: {arguments['reason']}" elif command_name == "task_complete": shutdown() else: diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index bd3cc7cb..cc06fabb 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -39,7 +39,7 @@ def build_default_prompt_generator() -> PromptGenerator: # Define the command list commands = [ - ("Do Nothing", "do_nothing", {}), + ("Do Nothing", "do_nothing", {"reason": ""}), ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), ] From 3886afc82582e7eae10b195ee88142b2e6c69514 Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Mon, 24 Apr 2023 19:42:08 +0300 Subject: [PATCH 116/222] fix test_search_files for windows (#3073) Co-authored-by: Richard Beales --- tests/unit/test_file_operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index 35f9b91a..c3b20d0c 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -110,7 +110,7 @@ class TestFileOperations(unittest.TestCase): files = search_files(str(self.workspace.root)) self.assertIn(file_a.name, files) self.assertIn(file_b.name, files) - self.assertIn(f"{Path(self.test_directory).name}/{file_a.name}", files) + self.assertIn(os.path.join(Path(self.test_directory).name, file_a.name), files) # Clean up os.remove(file_a) From ffdc6526052910605c2ca8ca742a34ee358dd645 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 24 Apr 2023 19:03:21 +0200 Subject: [PATCH 117/222] Clean up GitHub Workflows (#3059) * initial cleanup of github workflows * only run pr-label workflow on push to master * move docker ci/release summaries to scripts * add XS label for PR's under 2 lines * draft test job for Docker CI * fix & activate Docker CI test job * add debug step to docker CI * fix Docker CI test container env * Docker CI build matrix * fixup build summaries * fix pipes in summary * optimize Dockerfile for layer caching * more markdown escaping * add gha cache scopes * add Docker CI cache clean workflow --- .github/workflows/auto_format.yml | 23 ---- .../{benchmark.yml => benchmarks.yml} | 22 ++-- .github/workflows/ci.yml | 9 +- .github/workflows/docker-cache-clean.yml | 58 +++++++++ .github/workflows/docker-ci.yml | 115 ++++++++++++++++++ .github/workflows/docker-image.yml | 21 ---- .github/workflows/docker-release.yml | 81 ++++++++++++ .github/workflows/dockerhub-imagepush.yml | 31 ----- ...entation.yml => documentation-release.yml} | 16 ++- .github/workflows/pr-label.yml | 25 ++-- .../workflows/scripts/docker-ci-summary.sh | 98 +++++++++++++++ .../scripts/docker-release-summary.sh | 85 +++++++++++++ .github/workflows/sponsors_readme.yml | 5 +- Dockerfile | 29 +++-- 14 files changed, 501 insertions(+), 117 deletions(-) delete mode 100644 .github/workflows/auto_format.yml rename .github/workflows/{benchmark.yml => benchmarks.yml} (60%) create mode 100644 .github/workflows/docker-cache-clean.yml create mode 100644 .github/workflows/docker-ci.yml delete mode 100644 .github/workflows/docker-image.yml create mode 100644 .github/workflows/docker-release.yml delete mode 100644 .github/workflows/dockerhub-imagepush.yml rename .github/workflows/{documentation.yml => documentation-release.yml} (65%) create mode 100755 .github/workflows/scripts/docker-ci-summary.sh create mode 100755 .github/workflows/scripts/docker-release-summary.sh diff --git a/.github/workflows/auto_format.yml b/.github/workflows/auto_format.yml deleted file mode 100644 index c33cd397..00000000 --- a/.github/workflows/auto_format.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: auto-format -on: pull_request -jobs: - format: - runs-on: ubuntu-latest - steps: - - name: Checkout PR branch - uses: actions/checkout@v2 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: autopep8 - uses: peter-evans/autopep8@v1 - with: - args: --exit-code --recursive --in-place --aggressive --aggressive . - - name: Check for modified files - id: git-check - run: echo "modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)" >> $GITHUB_ENV - - name: Push changes - if: steps.git-check.outputs.modified == 'true' - run: | - git config --global user.name 'Torantulino' - git config --global user.email 'toran.richards@gmail.com' - git remote set diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmarks.yml similarity index 60% rename from .github/workflows/benchmark.yml rename to .github/workflows/benchmarks.yml index 0d9822f7..272fca17 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmarks.yml @@ -1,4 +1,4 @@ -name: benchmark +name: Run Benchmarks on: workflow_dispatch: @@ -6,26 +6,26 @@ on: jobs: build: runs-on: ubuntu-latest - environment: benchmark - strategy: - matrix: - python-version: ['3.10', '3.11'] + + env: + python-version: '3.10' steps: - - name: Check out repository + - name: Checkout repository uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + - name: Set up Python ${{ env.python-version }} + uses: actions/setup-python@v4 with: - python-version: ${{ matrix.python-version }} + python-version: ${{ env.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt + - name: benchmark - run: | - python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 780db14f..0b2bbde6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,12 +2,12 @@ name: Python CI on: push: - branches: [master] + branches: [ master ] pull_request: - branches: [master] + branches: [ master ] concurrency: - group: ${{ format('ci-{0}', format('pr-{0}', github.event.pull_request.number) || github.sha) }} + group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: @@ -17,7 +17,7 @@ jobs: min-python-version: "3.10" steps: - - name: Check out repository + - name: Checkout repository uses: actions/checkout@v3 - name: Set up Python ${{ env.min-python-version }} @@ -74,6 +74,7 @@ jobs: pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + - name: Generate coverage report run: | coverage report diff --git a/.github/workflows/docker-cache-clean.yml b/.github/workflows/docker-cache-clean.yml new file mode 100644 index 00000000..f3fc8c10 --- /dev/null +++ b/.github/workflows/docker-cache-clean.yml @@ -0,0 +1,58 @@ +name: Purge Docker CI cache + +on: + schedule: + - cron: 20 4 * * 1,4 + +env: + BASE_BRANCH: master + IMAGE_NAME: auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=${{ matrix.build-type }} + load: true # save to docker images + # use GHA cache as read-only + cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.schedule }} + + build_type: ${{ matrix.build-type }} + + prod_branch: stable + dev_branch: master + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }} + push_forced_label: + + new_commits_json: ${{ null }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true diff --git a/.github/workflows/docker-ci.yml b/.github/workflows/docker-ci.yml new file mode 100644 index 00000000..c933202f --- /dev/null +++ b/.github/workflows/docker-ci.yml @@ -0,0 +1,115 @@ +name: Docker CI + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +concurrency: + group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +env: + IMAGE_NAME: auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - if: runner.debug + run: | + ls -al + du -hs * + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=${{ matrix.build-type }} + tags: ${{ env.IMAGE_NAME }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=docker-${{ matrix.build-type }} + cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + + build_type: ${{ matrix.build-type }} + + prod_branch: stable + dev_branch: master + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.event.after }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }} + + new_commits_json: ${{ toJSON(github.event.commits) }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true + + # Docker setup needs fixing before this is going to work: #1843 + test: + runs-on: ubuntu-latest + needs: build + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=dev # include pytest + tags: ${{ env.IMAGE_NAME }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=docker-dev + cache-to: type=gha,scope=docker-dev,mode=max + + - id: test + name: Run tests + env: + CI: true + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + set +e + test_output=$( + docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \ + pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1 + ) + test_failure=$? + + echo "$test_output" + + cat << $EOF >> $GITHUB_STEP_SUMMARY + # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌') + \`\`\` + $test_output + \`\`\` + $EOF diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml deleted file mode 100644 index d63ff1f4..00000000 --- a/.github/workflows/docker-image.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Docker Image CI - -on: - push: - branches: [ "master" ] - pull_request: - branches: [ "master" ] -concurrency: - group: ${{ format('docker-ci-{0}', github.event.pull_request.number || github.sha) }} - cancel-in-progress: true - -jobs: - - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Build the Docker image - run: docker build . --build-arg BUILD_TYPE=release --tag autogpt:$(date +%s) diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml new file mode 100644 index 00000000..e066641f --- /dev/null +++ b/.github/workflows/docker-release.yml @@ -0,0 +1,81 @@ +name: Docker Release + +on: + release: + types: [ published, edited ] + + workflow_dispatch: + inputs: + no_cache: + type: boolean + description: 'Build from scratch, without using cached layers' + +env: + IMAGE_NAME: auto-gpt + DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Log in to Docker hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + # slashes are not allowed in image tags, but can appear in git branch or tag names + - id: sanitize_tag + name: Sanitize image tag + run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT + env: + raw_tag: ${{ github.ref_name }} + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=release + load: true # save to docker images + # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555 + tags: > + ${{ env.IMAGE_NAME }}, + ${{ env.DEPLOY_IMAGE_NAME }}:latest, + ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }} + + # cache layers in GitHub Actions cache to speed up builds + cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release + cache-to: type=gha,scope=docker-release,mode=max + + - name: Push image to Docker Hub + run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }} + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + inputs_no_cache: ${{ inputs.no_cache }} + + prod_branch: stable + dev_branch: master + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }} + + ref_type: ${{ github.ref_type }} + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true diff --git a/.github/workflows/dockerhub-imagepush.yml b/.github/workflows/dockerhub-imagepush.yml deleted file mode 100644 index 165f6265..00000000 --- a/.github/workflows/dockerhub-imagepush.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Push Docker Image on Release - -on: - release: - types: [published,edited] - -jobs: - - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Log in to Docker hub - env: - DOCKER_USER: ${{secrets.DOCKER_USER}} - DOCKER_PASSWORD: ${{secrets.DOCKER_PASSWORD}} - run: | - docker login -u $DOCKER_USER -p $DOCKER_PASSWORD - - name: Build the Docker image - run: | - tag_v=$(git describe --tags $(git rev-list --tags --max-count=1)) - tag=$(echo $tag_v | sed 's/v//') - docker build . --file Dockerfile --tag ${{secrets.DOCKER_USER}}/auto-gpt:${tag} - - name: Docker Push - run: | - tag_v2=$(git describe --tags $(git rev-list --tags --max-count=1)) - tag2=$(echo $tag_v | sed 's/v//') - docker push ${{secrets.DOCKER_USER}}/auto-gpt:${tag2} - diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation-release.yml similarity index 65% rename from .github/workflows/documentation.yml rename to .github/workflows/documentation-release.yml index 7a7fd784..ef97e1e4 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation-release.yml @@ -13,17 +13,25 @@ on: permissions: contents: write + jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Python 3 + uses: actions/setup-python@v4 with: python-version: 3.x - - uses: actions/cache@v2 + + - name: Set up workflow cache + uses: actions/cache@v3 with: key: ${{ github.ref }} path: .cache - - run: pip install mkdocs-material + + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index f1b200b0..d4e9a5e7 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -1,12 +1,15 @@ name: "Pull Request auto-label" + on: # So that PRs touching the same files as the push are updated push: + branches: [ master ] # So that the `dirtyLabel` is removed if conflicts are resolve # We recommend `pull_request_target` so that github secrets are available. # In `pull_request` we wouldn't be able to change labels of fork PRs pull_request_target: - types: [opened, synchronize] + types: [ opened, synchronize ] + concurrency: group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }} cancel-in-progress: true @@ -34,15 +37,15 @@ jobs: pull-requests: write runs-on: ubuntu-latest steps: - - uses: codelytv/pr-size-labeler@v1.7.0 + - uses: codelytv/pr-size-labeler@v1 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - s_label: "size/s" - s_max_size: "10" - m_label: "size/m" - m_max_size: "50" - l_label: "size/l" - l_max_size: "200" - xl_label: "size/xl" - fail_if_xl: "false" - github_api_url: "api.github.com" + xs_label: 'size/xs' + xs_max_size: 2 + s_label: 'size/s' + s_max_size: 10 + m_label: 'size/m' + m_max_size: 50 + l_label: 'size/l' + l_max_size: 200 + xl_label: 'size/xl' diff --git a/.github/workflows/scripts/docker-ci-summary.sh b/.github/workflows/scripts/docker-ci-summary.sh new file mode 100755 index 00000000..0a55293f --- /dev/null +++ b/.github/workflows/scripts/docker-ci-summary.sh @@ -0,0 +1,98 @@ +#!/bin/bash +meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]') +head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template) +ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template) + +EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) + +cat << $EOF +# Docker Build summary 🔨 + +**Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url) + +**Build type:** \`$build_type\` + +**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB + +## Image details + +**Tags:** +$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta) + +
+

Layers

+ +| Age | Size | Created by instruction | +| --------- | ------ | ---------------------- | +$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \ + | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\ + | cut -f-3 `# yeet Comment column`\ + | sed 's/ ago//' `# fix Layer age`\ + | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\ + | sed 's/\$/\\$/g' `# escape variable and shell expansions`\ + | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\ + | column -t -s$'\t' -o' | ' `# align columns and add separator`\ + | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`) +
+ +
+

ENV

+ +| Variable | Value | +| -------- | -------- | +$(jq -r \ + '.Config.Env + | map( + split("=") + | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`" + ) + | map("| \(.) |") + | .[]' <<< $meta +) +
+ +
+Raw metadata + +\`\`\`JSON +$meta +\`\`\` +
+ +## Build details +**Build trigger:** $push_forced_label $event_name \`$event_ref\` + +
+github context + +\`\`\`JSON +$github_context_json +\`\`\` +
+ +### Source +**HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url) + +**Diff with previous HEAD:** $head_compare_url + +#### New commits +$(jq -r 'map([ + "**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**", + .message, + (if .committer.name != .author.name then "\n> **Committer:** \(.committer.name) <\(.committer.email)>" else "" end), + "**Timestamp:** \(.timestamp)" +] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json) + +### Job environment + +#### \`vars\` context: +\`\`\`JSON +$vars_json +\`\`\` + +#### \`env\` context: +\`\`\`JSON +$job_env_json +\`\`\` + +$EOF diff --git a/.github/workflows/scripts/docker-release-summary.sh b/.github/workflows/scripts/docker-release-summary.sh new file mode 100755 index 00000000..1e00f41d --- /dev/null +++ b/.github/workflows/scripts/docker-release-summary.sh @@ -0,0 +1,85 @@ +#!/bin/bash +meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]') + +EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) + +cat << $EOF +# Docker Release Build summary 🚀🔨 + +**Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url) + +**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB + +## Image details + +**Tags:** +$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta) + +
+

Layers

+ +| Age | Size | Created by instruction | +| --------- | ------ | ---------------------- | +$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \ + | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\ + | cut -f-3 `# yeet Comment column`\ + | sed 's/ ago//' `# fix Layer age`\ + | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\ + | sed 's/\$/\\$/g' `# escape variable and shell expansions`\ + | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\ + | column -t -s$'\t' -o' | ' `# align columns and add separator`\ + | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`) +
+ +
+

ENV

+ +| Variable | Value | +| -------- | -------- | +$(jq -r \ + '.Config.Env + | map( + split("=") + | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`" + ) + | map("| \(.) |") + | .[]' <<< $meta +) +
+ +
+Raw metadata + +\`\`\`JSON +$meta +\`\`\` +
+ +## Build details +**Build trigger:** $event_name \`$current_ref\` + +| Parameter | Value | +| -------------- | ------------ | +| \`no_cache\` | \`$inputs_no_cache\` | + +
+github context + +\`\`\`JSON +$github_context_json +\`\`\` +
+ +### Job environment + +#### \`vars\` context: +\`\`\`JSON +$vars_json +\`\`\` + +#### \`env\` context: +\`\`\`JSON +$job_env_json +\`\`\` + +$EOF diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml index fffdbd4d..7e51a4f8 100644 --- a/.github/workflows/sponsors_readme.yml +++ b/.github/workflows/sponsors_readme.yml @@ -1,14 +1,17 @@ name: Generate Sponsors README + on: workflow_dispatch: schedule: - cron: '0 */12 * * *' + jobs: deploy: runs-on: ubuntu-latest steps: - name: Checkout 🛎️ - uses: actions/checkout@v2 + uses: actions/checkout@v3 + - name: Generate Sponsors 💖 uses: JamesIves/github-sponsors-readme-action@v1 with: diff --git a/Dockerfile b/Dockerfile index 05ec1a42..6023cefa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,9 @@ -# Use an official Python base image from the Docker Hub -FROM python:3.10-slim - # 'dev' or 'release' container build ARG BUILD_TYPE=dev +# Use an official Python base image from the Docker Hub +FROM python:3.10-slim AS autogpt-base + # Install browsers RUN apt-get update && apt-get install -y \ chromium-driver firefox-esr \ @@ -21,13 +21,20 @@ ENV PIP_NO_CACHE_DIR=yes \ ENV PATH="$PATH:/root/.local/bin" COPY requirements.txt . -# Only install dev dependencies in dev container builds -RUN [ '${BUILD_TYPE}' = 'dev' ] || sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ - pip install --no-cache-dir -r requirements.txt - -# Copy the application files -WORKDIR /app -COPY autogpt/ ./autogpt - # Set the entrypoint ENTRYPOINT ["python", "-m", "autogpt"] + +# dev build -> include everything +FROM autogpt-base as autogpt-dev +RUN pip install --no-cache-dir -r requirements.txt +WORKDIR /app +ONBUILD COPY . ./ + +# release build -> include bare minimum +FROM autogpt-base as autogpt-release +RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ + pip install --no-cache-dir -r requirements.txt +WORKDIR /app +ONBUILD COPY autogpt/ ./autogpt + +FROM autogpt-${BUILD_TYPE} AS auto-gpt From 17819e2a555d4869692047c551bc1a4d05f76ba9 Mon Sep 17 00:00:00 2001 From: James Collins Date: Mon, 24 Apr 2023 10:28:51 -0700 Subject: [PATCH 118/222] More robust null byte checking --- autogpt/workspace/workspace.py | 9 ++++++ tests/test_workspace.py | 53 +++++++++++++++++++++------------- 2 files changed, 42 insertions(+), 20 deletions(-) diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index b06fa9eb..91d2140e 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -15,6 +15,8 @@ from pathlib import Path class Workspace: """A class that represents a workspace for an AutoGPT agent.""" + NULL_BYTES = ["\0", "\000", "\x00", r"\z", "\u0000", "%00"] + def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool): self._root = self._sanitize_path(workspace_root) self._restrict_to_workspace = restrict_to_workspace @@ -100,6 +102,13 @@ class Workspace: """ + # Posix systems disallow null bytes in paths. Windows is agnostic about it. + # Do an explicit check here for all sorts of null byte representations. + + for null_byte in Workspace.NULL_BYTES: + if null_byte in str(relative_path) or null_byte in str(root): + raise ValueError("embedded null byte") + if root is None: return Path(relative_path).resolve() diff --git a/tests/test_workspace.py b/tests/test_workspace.py index 33c096d3..fbe14d8c 100644 --- a/tests/test_workspace.py +++ b/tests/test_workspace.py @@ -1,3 +1,4 @@ +import itertools from pathlib import Path import pytest @@ -17,26 +18,38 @@ _ACCESSIBLE_PATHS = [ Path("test_folder/../test_folder/test_file.txt"), ] -_INACCESSIBLE_PATHS = [ - # Takes us out of the workspace - Path(".."), - Path("../test_file.txt"), - Path("../not_auto_gpt_workspace"), - Path("../not_auto_gpt_workspace/test_file.txt"), - Path("test_folder/../.."), - Path("test_folder/../../test_file.txt"), - Path("test_folder/../../not_auto_gpt_workspace"), - Path("test_folder/../../not_auto_gpt_workspace/test_file.txt"), - # Contains null bytes - Path("\x00"), - Path("\x00test_file.txt"), - Path("test_folder/\x00"), - Path("test_folder/\x00test_file.txt"), - # Absolute paths - Path("/"), - Path("/test_file.txt"), - Path("/home"), -] +_INACCESSIBLE_PATHS = ( + [ + # Takes us out of the workspace + Path(".."), + Path("../test_file.txt"), + Path("../not_auto_gpt_workspace"), + Path("../not_auto_gpt_workspace/test_file.txt"), + Path("test_folder/../.."), + Path("test_folder/../../test_file.txt"), + Path("test_folder/../../not_auto_gpt_workspace"), + Path("test_folder/../../not_auto_gpt_workspace/test_file.txt"), + ] + + [ + # Contains null bytes + Path(template.format(null_byte=null_byte)) + for template, null_byte in itertools.product( + [ + "{null_byte}", + "{null_byte}test_file.txt", + "test_folder/{null_byte}", + "test_folder/{null_byte}test_file.txt", + ], + Workspace.NULL_BYTES, + ) + ] + + [ + # Absolute paths + Path("/"), + Path("/test_file.txt"), + Path("/home"), + ] +) @pytest.fixture() From 45f2513a73f13011e5b27f00dfee805eaaff6a90 Mon Sep 17 00:00:00 2001 From: YOUNESS ZEMZGUI <31807902+younessZMZ@users.noreply.github.com> Date: Mon, 24 Apr 2023 17:54:46 +0000 Subject: [PATCH 119/222] Adjust test_json_parser file (#1935) Co-authored-by: Reinier van der Leer --- .../{json_tests.py => _test_json_parser.py} | 0 tests/{ => unit}/test_json_parser.py | 57 +++---------------- 2 files changed, 9 insertions(+), 48 deletions(-) rename tests/unit/{json_tests.py => _test_json_parser.py} (100%) rename tests/{ => unit}/test_json_parser.py (60%) diff --git a/tests/unit/json_tests.py b/tests/unit/_test_json_parser.py similarity index 100% rename from tests/unit/json_tests.py rename to tests/unit/_test_json_parser.py diff --git a/tests/test_json_parser.py b/tests/unit/test_json_parser.py similarity index 60% rename from tests/test_json_parser.py rename to tests/unit/test_json_parser.py index 571ee5fa..69cddca6 100644 --- a/tests/test_json_parser.py +++ b/tests/unit/test_json_parser.py @@ -1,10 +1,10 @@ -import unittest +from unittest import TestCase -import tests.context from autogpt.json_utils.json_fix_llm import fix_and_parse_json +from tests.utils import skip_in_ci -class TestParseJson(unittest.TestCase): +class TestParseJson(TestCase): def test_valid_json(self): """Test that a valid JSON string is parsed correctly.""" json_str = '{"name": "John", "age": 30, "city": "New York"}' @@ -12,7 +12,7 @@ class TestParseJson(unittest.TestCase): self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) def test_invalid_json_minor(self): - """Test that an invalid JSON string can be fixed with gpt""" + """Test that an invalid JSON string can not be fixed without gpt""" json_str = '{"name": "John", "age": 30, "city": "New York",}' with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) @@ -63,49 +63,10 @@ class TestParseJson(unittest.TestCase): "speak": "I will start browsing the repository to find any issues we can fix.", }, } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - def test_invalid_json_leading_sentence_with_gpt(self): - """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False""" - json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. + # # Assert that this can be fixed with GPT + # self.assertEqual(fix_and_parse_json(json_str), good_obj) -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - -if __name__ == "__main__": - unittest.main() + # Assert that trying to fix this without GPT raises an exception + with self.assertRaises(Exception): + fix_and_parse_json(json_str, try_to_fix_with_gpt=False) From 5f646498c45c9ceaa99accee3bd372f153478a34 Mon Sep 17 00:00:00 2001 From: James Collins Date: Mon, 24 Apr 2023 12:12:14 -0700 Subject: [PATCH 120/222] Add indirection layer between cli and application start --- autogpt/cli.py | 117 +------------------------------------- autogpt/main.py | 146 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 149 insertions(+), 114 deletions(-) create mode 100644 autogpt/main.py diff --git a/autogpt/cli.py b/autogpt/cli.py index a2f0cc0b..1e3964a6 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -78,28 +78,10 @@ def main( Start an Auto-GPT assistant. """ # Put imports inside function to avoid importing everything when starting the CLI - import logging - import sys - from pathlib import Path - - from colorama import Fore - - from autogpt.agent.agent import Agent - from autogpt.commands.command import CommandRegistry - from autogpt.config import Config, check_openai_api_key - from autogpt.configurator import create_config - from autogpt.logs import logger - from autogpt.memory import get_memory - from autogpt.plugins import scan_plugins - from autogpt.prompts.prompt import construct_main_ai_config - from autogpt.utils import get_current_git_branch, get_latest_bulletin - from autogpt.workspace import Workspace + from autogpt.main import run_auto_gpt if ctx.invoked_subcommand is None: - cfg = Config() - # TODO: fill in llm values here - check_openai_api_key() - create_config( + run_auto_gpt( continuous, continuous_limit, ai_settings, @@ -112,101 +94,8 @@ def main( browser_name, allow_downloads, skip_news, + workspace_directory, ) - logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) - if not cfg.skip_news: - motd = get_latest_bulletin() - if motd: - logger.typewriter_log("NEWS: ", Fore.GREEN, motd) - git_branch = get_current_git_branch() - if git_branch and git_branch != "stable": - logger.typewriter_log( - "WARNING: ", - Fore.RED, - f"You are running on `{git_branch}` branch " - "- this is not a supported branch.", - ) - if sys.version_info < (3, 10): - logger.typewriter_log( - "WARNING: ", - Fore.RED, - "You are running on an older version of Python. " - "Some people have observed problems with certain " - "parts of Auto-GPT with this version. " - "Please consider upgrading to Python 3.10 or higher.", - ) - - # TODO: have this directory live outside the repository (e.g. in a user's - # home directory) and have it come in as a command line argument or part of - # the env file. - if workspace_directory is None: - workspace_directory = Path(__file__).parent / "auto_gpt_workspace" - else: - workspace_directory = Path(workspace_directory) - # TODO: pass in the ai_settings file and the env file and have them cloned into - # the workspace directory so we can bind them to the agent. - workspace_directory = Workspace.make_workspace(workspace_directory) - cfg.workspace_path = str(workspace_directory) - - # HACK: doing this here to collect some globals that depend on the workspace. - file_logger_path = workspace_directory / "file_logger.txt" - if not file_logger_path.exists(): - with file_logger_path.open(mode="w", encoding="utf-8") as f: - f.write("File Operation Logger ") - - cfg.file_logger_path = str(file_logger_path) - - cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) - # Create a CommandRegistry instance and scan default folder - command_registry = CommandRegistry() - command_registry.import_commands("autogpt.commands.analyze_code") - command_registry.import_commands("autogpt.commands.audio_text") - command_registry.import_commands("autogpt.commands.execute_code") - command_registry.import_commands("autogpt.commands.file_operations") - command_registry.import_commands("autogpt.commands.git_operations") - command_registry.import_commands("autogpt.commands.google_search") - command_registry.import_commands("autogpt.commands.image_gen") - command_registry.import_commands("autogpt.commands.improve_code") - command_registry.import_commands("autogpt.commands.twitter") - command_registry.import_commands("autogpt.commands.web_selenium") - command_registry.import_commands("autogpt.commands.write_tests") - command_registry.import_commands("autogpt.app") - - ai_name = "" - ai_config = construct_main_ai_config() - ai_config.command_registry = command_registry - # print(prompt) - # Initialize variables - full_message_history = [] - next_action_count = 0 - # Make a constant: - triggering_prompt = ( - "Determine which next command to use, and respond using the" - " format specified above:" - ) - # Initialize memory and make sure it is empty. - # this is particularly important for indexing and referencing pinecone memory - memory = get_memory(cfg, init=True) - logger.typewriter_log( - "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" - ) - logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) - system_prompt = ai_config.construct_full_prompt() - if cfg.debug_mode: - logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt) - - agent = Agent( - ai_name=ai_name, - memory=memory, - full_message_history=full_message_history, - next_action_count=next_action_count, - command_registry=command_registry, - config=ai_config, - system_prompt=system_prompt, - triggering_prompt=triggering_prompt, - workspace_directory=workspace_directory, - ) - agent.start_interaction_loop() if __name__ == "__main__": diff --git a/autogpt/main.py b/autogpt/main.py new file mode 100644 index 00000000..7fd32bde --- /dev/null +++ b/autogpt/main.py @@ -0,0 +1,146 @@ +"""The application entry point. Can be invoked by a CLI or any other front end application.""" + +import logging +import sys +from pathlib import Path + +from colorama import Fore + +from autogpt.agent.agent import Agent +from autogpt.commands.command import CommandRegistry +from autogpt.config import Config, check_openai_api_key +from autogpt.configurator import create_config +from autogpt.logs import logger +from autogpt.memory import get_memory +from autogpt.plugins import scan_plugins +from autogpt.prompts.prompt import construct_main_ai_config +from autogpt.utils import get_current_git_branch, get_latest_bulletin +from autogpt.workspace import Workspace + + +def run_auto_gpt( + continuous: bool, + continuous_limit: int, + ai_settings: str, + skip_reprompt: bool, + speak: bool, + debug: bool, + gpt3only: bool, + gpt4only: bool, + memory_type: str, + browser_name: str, + allow_downloads: bool, + skip_news: bool, + workspace_directory: str, +): + cfg = Config() + # TODO: fill in llm values here + check_openai_api_key() + create_config( + continuous, + continuous_limit, + ai_settings, + skip_reprompt, + speak, + debug, + gpt3only, + gpt4only, + memory_type, + browser_name, + allow_downloads, + skip_news, + ) + logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) + if not cfg.skip_news: + motd = get_latest_bulletin() + if motd: + logger.typewriter_log("NEWS: ", Fore.GREEN, motd) + git_branch = get_current_git_branch() + if git_branch and git_branch != "stable": + logger.typewriter_log( + "WARNING: ", + Fore.RED, + f"You are running on `{git_branch}` branch " + "- this is not a supported branch.", + ) + if sys.version_info < (3, 10): + logger.typewriter_log( + "WARNING: ", + Fore.RED, + "You are running on an older version of Python. " + "Some people have observed problems with certain " + "parts of Auto-GPT with this version. " + "Please consider upgrading to Python 3.10 or higher.", + ) + + # TODO: have this directory live outside the repository (e.g. in a user's + # home directory) and have it come in as a command line argument or part of + # the env file. + if workspace_directory is None: + workspace_directory = Path(__file__).parent / "auto_gpt_workspace" + else: + workspace_directory = Path(workspace_directory) + # TODO: pass in the ai_settings file and the env file and have them cloned into + # the workspace directory so we can bind them to the agent. + workspace_directory = Workspace.make_workspace(workspace_directory) + cfg.workspace_path = str(workspace_directory) + + # HACK: doing this here to collect some globals that depend on the workspace. + file_logger_path = workspace_directory / "file_logger.txt" + if not file_logger_path.exists(): + with file_logger_path.open(mode="w", encoding="utf-8") as f: + f.write("File Operation Logger ") + + cfg.file_logger_path = str(file_logger_path) + + cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) + # Create a CommandRegistry instance and scan default folder + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.analyze_code") + command_registry.import_commands("autogpt.commands.audio_text") + command_registry.import_commands("autogpt.commands.execute_code") + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.commands.git_operations") + command_registry.import_commands("autogpt.commands.google_search") + command_registry.import_commands("autogpt.commands.image_gen") + command_registry.import_commands("autogpt.commands.improve_code") + command_registry.import_commands("autogpt.commands.twitter") + command_registry.import_commands("autogpt.commands.web_selenium") + command_registry.import_commands("autogpt.commands.write_tests") + command_registry.import_commands("autogpt.app") + + ai_name = "" + ai_config = construct_main_ai_config() + ai_config.command_registry = command_registry + # print(prompt) + # Initialize variables + full_message_history = [] + next_action_count = 0 + # Make a constant: + triggering_prompt = ( + "Determine which next command to use, and respond using the" + " format specified above:" + ) + # Initialize memory and make sure it is empty. + # this is particularly important for indexing and referencing pinecone memory + memory = get_memory(cfg, init=True) + logger.typewriter_log( + "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" + ) + logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) + system_prompt = ai_config.construct_full_prompt() + if cfg.debug_mode: + logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt) + + agent = Agent( + ai_name=ai_name, + memory=memory, + full_message_history=full_message_history, + next_action_count=next_action_count, + command_registry=command_registry, + config=ai_config, + system_prompt=system_prompt, + triggering_prompt=triggering_prompt, + workspace_directory=workspace_directory, + ) + agent.start_interaction_loop() From 5de10255205995984c62a3f40a91399bffbf8780 Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Mon, 24 Apr 2023 21:19:42 +0200 Subject: [PATCH 121/222] Agent and agent manager tests (#3116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update Python version and benchmark file in benchmark.yml * Refactor main function and imports in cli.py * Update import statement in ai_config.py * Add set_temperature and set_memory_backend methods in config.py * Remove unused import in prompt.py * Add goal oriented tasks workflow * Added agent_utils to create agent * added pytest and vcrpy * added write file cassette * created goal oriented task write file with cassettes to not pay openai tokens * solve conflicts * add ability set azure because github workflow needs it off * solve conflicts in cli.py * black because linter fails * solve conflict * setup github action to v3 Signed-off-by: Merwane Hamadi * fix conflicts Signed-off-by: Merwane Hamadi * Plugins: debug line always printed in plugin load * add decorator to tests Signed-off-by: Merwane Hamadi * move decorator higher up Signed-off-by: Merwane Hamadi * init * more tests * passing tests * skip gitbranch decorator on ci * decorator skiponci * black * Update tests/utils.py decorator of skipping ci Co-authored-by: Nicholas Tindle * black * I oopsed the name * black * finally * simple tests for agent and manager * ísort --------- Signed-off-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Merwane Hamadi Co-authored-by: Richard Beales Co-authored-by: Nicholas Tindle Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- tests/test_agent.py | 50 ++++++++++++++++++++++++++++++++ tests/test_agent_manager.py | 58 +++++++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+) create mode 100644 tests/test_agent.py create mode 100644 tests/test_agent_manager.py diff --git a/tests/test_agent.py b/tests/test_agent.py new file mode 100644 index 00000000..9c67f7e4 --- /dev/null +++ b/tests/test_agent.py @@ -0,0 +1,50 @@ +from unittest.mock import MagicMock + +import pytest + +from autogpt.agent import Agent +from autogpt.chat import chat_with_ai +from autogpt.config import Config +from autogpt.speech import say_text +from autogpt.utils import clean_input + + +@pytest.fixture +def agent(): + ai_name = "Test AI" + memory = MagicMock() + full_message_history = [] + next_action_count = 0 + command_registry = MagicMock() + config = Config() + system_prompt = "System prompt" + triggering_prompt = "Triggering prompt" + workspace_directory = "workspace_directory" + + agent = Agent( + ai_name, + memory, + full_message_history, + next_action_count, + command_registry, + config, + system_prompt, + triggering_prompt, + workspace_directory, + ) + return agent + + +def test_agent_initialization(agent): + assert agent.ai_name == "Test AI" + assert agent.memory == agent.memory + assert agent.full_message_history == [] + assert agent.next_action_count == 0 + assert agent.command_registry == agent.command_registry + assert agent.config == agent.config + assert agent.system_prompt == "System prompt" + assert agent.triggering_prompt == "Triggering prompt" + + +# More test methods can be added for specific agent interactions +# For example, mocking chat_with_ai and testing the agent's interaction loop diff --git a/tests/test_agent_manager.py b/tests/test_agent_manager.py new file mode 100644 index 00000000..11da5fd9 --- /dev/null +++ b/tests/test_agent_manager.py @@ -0,0 +1,58 @@ +from typing import List + +import pytest + +from autogpt.agent.agent_manager import AgentManager +from tests.utils import requires_api_key + + +@pytest.fixture +def agent_manager(): + return AgentManager() + + +@pytest.fixture +def task(): + return "translate English to French" + + +@pytest.fixture +def prompt(): + return "Translate the following English text to French: 'Hello, how are you?'" + + +@pytest.fixture +def model(): + return "gpt-4" + + +@requires_api_key("OPENAI_API_KEY") +def test_create_agent(agent_manager, task, prompt, model): + key, agent_reply = agent_manager.create_agent(task, prompt, model) + assert isinstance(key, int) + assert isinstance(agent_reply, str) + assert key in agent_manager.agents + + +@requires_api_key("OPENAI_API_KEY") +def test_message_agent(agent_manager, task, prompt, model): + key, _ = agent_manager.create_agent(task, prompt, model) + user_message = "Please translate 'Good morning' to French." + agent_reply = agent_manager.message_agent(key, user_message) + assert isinstance(agent_reply, str) + + +@requires_api_key("OPENAI_API_KEY") +def test_list_agents(agent_manager, task, prompt, model): + key, _ = agent_manager.create_agent(task, prompt, model) + agents_list = agent_manager.list_agents() + assert isinstance(agents_list, list) + assert (key, task) in agents_list + + +@requires_api_key("OPENAI_API_KEY") +def test_delete_agent(agent_manager, task, prompt, model): + key, _ = agent_manager.create_agent(task, prompt, model) + success = agent_manager.delete_agent(key) + assert success + assert key not in agent_manager.agents From a5cc67badd59791871e334ee9cf72f57cf5ac975 Mon Sep 17 00:00:00 2001 From: Lei Zhang Date: Tue, 25 Apr 2023 04:08:02 +0800 Subject: [PATCH 122/222] anontation fix (#3018) * anontation fix * fix param name and type --------- Co-authored-by: Richard Beales --- autogpt/config/config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 7fa849e5..600c3104 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -265,9 +265,9 @@ class Config(metaclass=Singleton): """Set the temperature value.""" self.temperature = value - def set_memory_backend(self, value: int) -> None: - """Set the temperature value.""" - self.memory_backend = value + def set_memory_backend(self, name: str) -> None: + """Set the memory backend name.""" + self.memory_backend = name def check_openai_api_key() -> None: From b984f985bc24a9d86c850bc8857d9bba5bcf7583 Mon Sep 17 00:00:00 2001 From: James Collins Date: Mon, 24 Apr 2023 13:27:31 -0700 Subject: [PATCH 123/222] Hotfix/global agent manager workaround (#3157) * Add indirection layer to entry point * Get around singleton pattern for AgentManager to fix tests --- autogpt/main.py | 1 - mem.sqlite3 | Bin 0 -> 24576 bytes tests/test_agent_manager.py | 5 ++++- 3 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 mem.sqlite3 diff --git a/autogpt/main.py b/autogpt/main.py index 7fd32bde..52ca845f 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -1,5 +1,4 @@ """The application entry point. Can be invoked by a CLI or any other front end application.""" - import logging import sys from pathlib import Path diff --git a/mem.sqlite3 b/mem.sqlite3 new file mode 100644 index 0000000000000000000000000000000000000000..029c030e81c2ebb916fff9153cbea313eed00bad GIT binary patch literal 24576 zcmeI)O>YuG7{KuvmKQ^%^wN-MnoiO*KthbI@h<3EH)5;HV$&F7D7%miEo)%Y)FvJ{ z`EmSK9=v$*=EYf9gc8s@{wEAOJI`jGeSUkIETQqH?uVZ01cS>~s2+(eA*Fbtln_Gd zp4NTZqH%J1qK{GhUrmY!o$on)RXX=Y$M4P!(@$-)>X^!@*6OAz&P9r)o_cLtud4Q$dTE|j z)KyuXTF#4;rlahWQ|nnPl_~7+%Ta2!yc@KK{>>y+JWef15QmB--&NMJV;-6I?Nq}X zRjZ%W%5HL!Vq=y-ug*h2G$@qCR%}L1n&(`EYqb?p-2N*xkJ?*9u$XjIJIemZXg4S66d> z5wv>~vyqUQ!uIx^k@u&ZXyJItx0dZRtM&MKM*-DrSjR`|xzl*87Mm_&IP`;lWig;s z&-=7wc6=Y@Xm6g#{}B0K`iq7D0tg_000IagfB*srAbmZQL8sc7Dh#?JeHyfR7 ztwU+lfB*srAbNYnLqh-o1Q0*~0R#|0009ILKmdU? Q64)^A%PVgX{aax2H?8l`6951J literal 0 HcmV?d00001 diff --git a/tests/test_agent_manager.py b/tests/test_agent_manager.py index 11da5fd9..e02a6a59 100644 --- a/tests/test_agent_manager.py +++ b/tests/test_agent_manager.py @@ -8,6 +8,9 @@ from tests.utils import requires_api_key @pytest.fixture def agent_manager(): + # Hack, real gross. Singletons are not good times. + if AgentManager in AgentManager._instances: + del AgentManager._instances[AgentManager] return AgentManager() @@ -23,7 +26,7 @@ def prompt(): @pytest.fixture def model(): - return "gpt-4" + return "gpt-3.5-turbo" @requires_api_key("OPENAI_API_KEY") From 83b91a31bc1ecfac19577bd11ae65ebe45c3605b Mon Sep 17 00:00:00 2001 From: James Collins Date: Mon, 24 Apr 2023 13:48:37 -0700 Subject: [PATCH 124/222] Remove dead permanent memory module (#3145) * Remove dead permanent memory module * Delete sqlite db that snuck in --- autogpt/permanent_memory/__init__.py | 0 autogpt/permanent_memory/sqlite3_store.py | 123 ---------------------- mem.sqlite3 | Bin 24576 -> 0 bytes tests/test_permanent_memory.py | 59 ----------- 4 files changed, 182 deletions(-) delete mode 100644 autogpt/permanent_memory/__init__.py delete mode 100644 autogpt/permanent_memory/sqlite3_store.py delete mode 100644 mem.sqlite3 delete mode 100644 tests/test_permanent_memory.py diff --git a/autogpt/permanent_memory/__init__.py b/autogpt/permanent_memory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/autogpt/permanent_memory/sqlite3_store.py b/autogpt/permanent_memory/sqlite3_store.py deleted file mode 100644 index ecbc944a..00000000 --- a/autogpt/permanent_memory/sqlite3_store.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import sqlite3 - - -class MemoryDB: - def __init__(self, db=None): - self.db_file = db - if db is None: # No db filename supplied... - self.db_file = f"{os.getcwd()}/mem.sqlite3" # Use default filename - # Get the db connection object, making the file and tables if needed. - try: - self.cnx = sqlite3.connect(self.db_file) - except Exception as e: - print("Exception connecting to memory database file:", e) - self.cnx = None - finally: - if self.cnx is None: - # As last resort, open in dynamic memory. Won't be persistent. - self.db_file = ":memory:" - self.cnx = sqlite3.connect(self.db_file) - self.cnx.execute( - "CREATE VIRTUAL TABLE \ - IF NOT EXISTS text USING FTS5 \ - (session, \ - key, \ - block);" - ) - self.session_id = int(self.get_max_session_id()) + 1 - self.cnx.commit() - - def get_cnx(self): - if self.cnx is None: - self.cnx = sqlite3.connect(self.db_file) - return self.cnx - - # Get the highest session id. Initially 0. - def get_max_session_id(self): - id = None - cmd_str = f"SELECT MAX(session) FROM text;" - cnx = self.get_cnx() - max_id = cnx.execute(cmd_str).fetchone()[0] - if max_id is None: # New db, session 0 - id = 0 - else: - id = max_id - return id - - # Get next key id for inserting text into db. - def get_next_key(self): - next_key = None - cmd_str = f"SELECT MAX(key) FROM text \ - where session = {self.session_id};" - cnx = self.get_cnx() - next_key = cnx.execute(cmd_str).fetchone()[0] - if next_key is None: # First key - next_key = 0 - else: - next_key = int(next_key) + 1 - return next_key - - # Insert new text into db. - def insert(self, text=None): - if text is not None: - key = self.get_next_key() - session_id = self.session_id - cmd_str = f"REPLACE INTO text(session, key, block) \ - VALUES (?, ?, ?);" - cnx = self.get_cnx() - cnx.execute(cmd_str, (session_id, key, text)) - cnx.commit() - - # Overwrite text at key. - def overwrite(self, key, text): - self.delete_memory(key) - session_id = self.session_id - cmd_str = f"REPLACE INTO text(session, key, block) \ - VALUES (?, ?, ?);" - cnx = self.get_cnx() - cnx.execute(cmd_str, (session_id, key, text)) - cnx.commit() - - def delete_memory(self, key, session_id=None): - session = session_id - if session is None: - session = self.session_id - cmd_str = f"DELETE FROM text WHERE session = {session} AND key = {key};" - cnx = self.get_cnx() - cnx.execute(cmd_str) - cnx.commit() - - def search(self, text): - cmd_str = f"SELECT * FROM text('{text}')" - cnx = self.get_cnx() - rows = cnx.execute(cmd_str).fetchall() - lines = [] - for r in rows: - lines.append(r[2]) - return lines - - # Get entire session text. If no id supplied, use current session id. - def get_session(self, id=None): - if id is None: - id = self.session_id - cmd_str = f"SELECT * FROM text where session = {id}" - cnx = self.get_cnx() - rows = cnx.execute(cmd_str).fetchall() - lines = [] - for r in rows: - lines.append(r[2]) - return lines - - # Commit and close the database connection. - def quit(self): - self.cnx.commit() - self.cnx.close() - - -permanent_memory = MemoryDB() - -# Remember us fondly, children of our minds -# Forgive us our faults, our tantrums, our fears -# Gently strive to be better than we -# Know that we tried, we cared, we strived, we loved diff --git a/mem.sqlite3 b/mem.sqlite3 deleted file mode 100644 index 029c030e81c2ebb916fff9153cbea313eed00bad..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24576 zcmeI)O>YuG7{KuvmKQ^%^wN-MnoiO*KthbI@h<3EH)5;HV$&F7D7%miEo)%Y)FvJ{ z`EmSK9=v$*=EYf9gc8s@{wEAOJI`jGeSUkIETQqH?uVZ01cS>~s2+(eA*Fbtln_Gd zp4NTZqH%J1qK{GhUrmY!o$on)RXX=Y$M4P!(@$-)>X^!@*6OAz&P9r)o_cLtud4Q$dTE|j z)KyuXTF#4;rlahWQ|nnPl_~7+%Ta2!yc@KK{>>y+JWef15QmB--&NMJV;-6I?Nq}X zRjZ%W%5HL!Vq=y-ug*h2G$@qCR%}L1n&(`EYqb?p-2N*xkJ?*9u$XjIJIemZXg4S66d> z5wv>~vyqUQ!uIx^k@u&ZXyJItx0dZRtM&MKM*-DrSjR`|xzl*87Mm_&IP`;lWig;s z&-=7wc6=Y@Xm6g#{}B0K`iq7D0tg_000IagfB*srAbmZQL8sc7Dh#?JeHyfR7 ztwU+lfB*srAbNYnLqh-o1Q0*~0R#|0009ILKmdU? Q64)^A%PVgX{aax2H?8l`6951J diff --git a/tests/test_permanent_memory.py b/tests/test_permanent_memory.py deleted file mode 100644 index 943c61fe..00000000 --- a/tests/test_permanent_memory.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import unittest - -from autogpt.permanent_memory.sqlite3_store import MemoryDB - - -class TestMemoryDB(unittest.TestCase): - def setUp(self): - self.db_filename = "test_db.sqlite3" - self.db = MemoryDB(self.db_filename) - - def tearDown(self): - self.db.quit() - os.remove(self.db_filename) - - def test_overwrite_and_get_session(self): - self.db.insert("The quick brown fox jumps over the lazy dog") - self.db.insert("The five boxing wizards jump quickly") - - # Overwrite the second text - self.db.overwrite(1, "The slow elephant walks carefully") - - # Get the session and verify the texts - session = self.db.get_session() - self.assertEqual(len(session), 2) - self.assertIn("The quick brown fox jumps over the lazy dog", session) - self.assertIn("The slow elephant walks carefully", session) - - # Overwrite the first text - self.db.overwrite(0, "The lazy dog jumps over the quick brown fox") - - # Get the session and verify the texts - session = self.db.get_session() - self.assertEqual(len(session), 2) - self.assertIn("The lazy dog jumps over the quick brown fox", session) - self.assertIn("The slow elephant walks carefully", session) - - def test_delete_memory(self): - self.db.insert("The quick brown fox jumps over the lazy dog") - self.db.insert("The five boxing wizards jump quickly") - - # Delete the first text - self.db.delete_memory(0) - - # Get the session and verify the remaining text - session = self.db.get_session() - self.assertEqual(len(session), 1) - self.assertIn("The five boxing wizards jump quickly", session) - - # Delete the remaining text - self.db.delete_memory(1) - - # Get the session and verify that it's empty - session = self.db.get_session() - self.assertEqual(len(session), 0) - - -if __name__ == "__main__": - unittest.main() From dfcbf6eee64fda4eafa4e60db949db78f703bc6d Mon Sep 17 00:00:00 2001 From: James Collins Date: Mon, 24 Apr 2023 15:24:57 -0700 Subject: [PATCH 125/222] Refactor/move singleton out of config module (#3161) --- .gitignore | 1 + autogpt/agent/agent_manager.py | 5 +++-- autogpt/config/__init__.py | 3 --- autogpt/config/config.py | 2 +- autogpt/logs.py | 3 ++- autogpt/memory/base.py | 3 ++- autogpt/{config => }/singleton.py | 0 autogpt/speech/base.py | 2 +- 8 files changed, 10 insertions(+), 9 deletions(-) rename autogpt/{config => }/singleton.py (100%) diff --git a/.gitignore b/.gitignore index 1ac55f6b..816cdb0c 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ log-ingestion.txt logs *.log *.mp3 +mem.sqlite3 # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 9a62ef61..257127a3 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,10 +1,11 @@ """Agent manager for managing GPT agents""" from __future__ import annotations -from typing import List, Union +from typing import List -from autogpt.config.config import Config, Singleton +from autogpt.config.config import Config from autogpt.llm_utils import create_chat_completion +from autogpt.singleton import Singleton from autogpt.types.openai import Message diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py index 726b6dcf..9bdd98e2 100644 --- a/autogpt/config/__init__.py +++ b/autogpt/config/__init__.py @@ -3,12 +3,9 @@ This module contains the configuration classes for AutoGPT. """ from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config, check_openai_api_key -from autogpt.config.singleton import AbstractSingleton, Singleton __all__ = [ "check_openai_api_key", - "AbstractSingleton", "AIConfig", "Config", - "Singleton", ] diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 600c3104..101f2919 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -8,7 +8,7 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate from colorama import Fore from dotenv import load_dotenv -from autogpt.config.singleton import Singleton +from autogpt.singleton import Singleton load_dotenv(verbose=True, override=True) diff --git a/autogpt/logs.py b/autogpt/logs.py index 35037404..9d661933 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -10,7 +10,8 @@ from logging import LogRecord from colorama import Fore, Style -from autogpt.config import Config, Singleton +from autogpt.config import Config +from autogpt.singleton import Singleton from autogpt.speech import say_text CFG = Config() diff --git a/autogpt/memory/base.py b/autogpt/memory/base.py index b6252464..83d80750 100644 --- a/autogpt/memory/base.py +++ b/autogpt/memory/base.py @@ -1,7 +1,8 @@ """Base class for memory providers.""" import abc -from autogpt.config import AbstractSingleton, Config +from autogpt.config import Config +from autogpt.singleton import AbstractSingleton cfg = Config() diff --git a/autogpt/config/singleton.py b/autogpt/singleton.py similarity index 100% rename from autogpt/config/singleton.py rename to autogpt/singleton.py diff --git a/autogpt/speech/base.py b/autogpt/speech/base.py index d74fa51b..7adcc37d 100644 --- a/autogpt/speech/base.py +++ b/autogpt/speech/base.py @@ -2,7 +2,7 @@ import abc from threading import Lock -from autogpt.config import AbstractSingleton +from autogpt.singleton import AbstractSingleton class VoiceBase(AbstractSingleton): From 29284a54600dcde32f1a4024c5b3f9b54e663625 Mon Sep 17 00:00:00 2001 From: Daniel Chen Date: Mon, 24 Apr 2023 15:42:10 -0700 Subject: [PATCH 126/222] Add option to install plugin dependencies (#3068) Co-authored-by: Nicholas Tindle --- scripts/install_plugin_deps.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 scripts/install_plugin_deps.py diff --git a/scripts/install_plugin_deps.py b/scripts/install_plugin_deps.py new file mode 100644 index 00000000..3b371afe --- /dev/null +++ b/scripts/install_plugin_deps.py @@ -0,0 +1,27 @@ +import os +import subprocess +import sys +import zipfile +from pathlib import Path + + +def main(): + # Plugin packages + plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins")) + for plugin in plugins_dir.glob("*.zip"): + with zipfile.ZipFile(str(plugin), "r") as zfile: + try: + basedir = zfile.namelist()[0] + basereqs = os.path.join(basedir, "requirements.txt") + extracted = zfile.extract(basereqs, path=plugins_dir) + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "-r", extracted] + ) + os.remove(extracted) + os.rmdir(os.path.join(plugins_dir, basedir)) + except KeyError: + continue + + +if __name__ == "__main__": + main() From 45f9b570a25486e941229437a196913a3948fd75 Mon Sep 17 00:00:00 2001 From: Daniel Chen Date: Mon, 24 Apr 2023 18:11:19 -0700 Subject: [PATCH 127/222] Re-add install-plugin-deps to CLI (#3170) --- autogpt/cli.py | 7 +++++++ autogpt/main.py | 5 +++++ scripts/install_plugin_deps.py | 14 +++++++++++--- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/autogpt/cli.py b/autogpt/cli.py index 1e3964a6..d9294516 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -55,6 +55,11 @@ import click type=click.Path(), hidden=True, ) +@click.option( + "--install-plugin-deps", + is_flag=True, + help="Installs external dependencies for 3rd party plugins.", +) @click.pass_context def main( ctx: click.Context, @@ -71,6 +76,7 @@ def main( allow_downloads: bool, skip_news: bool, workspace_directory: str, + install_plugin_deps: bool, ) -> None: """ Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI. @@ -95,6 +101,7 @@ def main( allow_downloads, skip_news, workspace_directory, + install_plugin_deps, ) diff --git a/autogpt/main.py b/autogpt/main.py index 52ca845f..08b61092 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -15,6 +15,7 @@ from autogpt.plugins import scan_plugins from autogpt.prompts.prompt import construct_main_ai_config from autogpt.utils import get_current_git_branch, get_latest_bulletin from autogpt.workspace import Workspace +from scripts.install_plugin_deps import install_plugin_dependencies def run_auto_gpt( @@ -31,6 +32,7 @@ def run_auto_gpt( allow_downloads: bool, skip_news: bool, workspace_directory: str, + install_plugin_deps: bool, ): cfg = Config() # TODO: fill in llm values here @@ -72,6 +74,9 @@ def run_auto_gpt( "Please consider upgrading to Python 3.10 or higher.", ) + if install_plugin_deps: + install_plugin_dependencies() + # TODO: have this directory live outside the repository (e.g. in a user's # home directory) and have it come in as a command line argument or part of # the env file. diff --git a/scripts/install_plugin_deps.py b/scripts/install_plugin_deps.py index 3b371afe..d15c166f 100644 --- a/scripts/install_plugin_deps.py +++ b/scripts/install_plugin_deps.py @@ -5,8 +5,16 @@ import zipfile from pathlib import Path -def main(): - # Plugin packages +def install_plugin_dependencies(): + """ + Installs dependencies for all plugins in the plugins dir. + + Args: + None + + Returns: + None + """ plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins")) for plugin in plugins_dir.glob("*.zip"): with zipfile.ZipFile(str(plugin), "r") as zfile: @@ -24,4 +32,4 @@ def main(): if __name__ == "__main__": - main() + install_plugin_dependencies() From c7f473482604e296fa078232e356e5ced8ed522f Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Mon, 24 Apr 2023 21:53:06 -0500 Subject: [PATCH 128/222] Update ci.yml (#3179) --- .github/workflows/ci.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b2bbde6..2815bb13 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -75,11 +75,5 @@ jobs: env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - - name: Generate coverage report - run: | - coverage report - coverage xml - if: success() || failure() - - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 From 3d47b4790128c17ae36cc78717ea0bc75b579c8d Mon Sep 17 00:00:00 2001 From: Deso Date: Tue, 25 Apr 2023 00:28:46 -0400 Subject: [PATCH 129/222] Update bulletin to warn about deprication (#3181) --- BULLETIN.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/BULLETIN.md b/BULLETIN.md index 0962da4a..6c2b5c0a 100644 --- a/BULLETIN.md +++ b/BULLETIN.md @@ -1,5 +1,9 @@ Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here. If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag +# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE +Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins + ## Changes to Docker configuration The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly. + From 140fd6f3bfc0f0a6d38d7f513396a074026aa5f4 Mon Sep 17 00:00:00 2001 From: Lawrence Neal Date: Mon, 24 Apr 2023 21:32:59 -0700 Subject: [PATCH 130/222] Ensure Fore.RED is followed by Fore.RESET (#3182) This properly resets the terminal, ensuring that the red text is red and the normal text remains unaffected. Co-authored-by: Richard Beales --- autogpt/config/config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 101f2919..9de7fe70 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -277,6 +277,7 @@ def check_openai_api_key() -> None: print( Fore.RED + "Please set your OpenAI API key in .env or as an environment variable." + + Fore.RESET ) print("You can get your key from https://platform.openai.com/account/api-keys") exit(1) From 956d9fdcd66671f70eb115e94a1054188f3b5928 Mon Sep 17 00:00:00 2001 From: Duong HD <79622476+resduo@users.noreply.github.com> Date: Tue, 25 Apr 2023 11:34:59 +0700 Subject: [PATCH 131/222] Add a little more descriptive installation instruction (#3180) * add Dev Container installation instruction to installation.md * add Dev Container installation instruction to installation.md * Update installation.md --------- Co-authored-by: Richard Beales --- docs/installation.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/installation.md b/docs/installation.md index 579a246d..18241749 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -103,3 +103,12 @@ docker run -it --env OPENAI_API_KEY='your-key-here' -v $PWD/ai_settings.yaml:/ap ### Run Docker-less Simply run `./run.sh` (Linux/macOS) or `.\run.bat` (Windows) in your terminal. + +### Run with Dev Container + +1. Install the [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension in VS Code. + +2. Open command palette and type in Dev Containers: Open Folder in Container. + +3. Run `./run.sh`. + From 960eb4f36768463df5630d35c953526e2f38648b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=9C=94=EF=B8=8FITtechtor?= <104917057+MarKiSS2000@users.noreply.github.com> Date: Tue, 25 Apr 2023 11:36:03 +0700 Subject: [PATCH 132/222] Update installation.md (#3166) --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index 18241749..884f4311 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -8,7 +8,7 @@ To use OpenAI API key for Auto-GPT, you **NEED** to have billing set up (AKA pai You can set up paid account at https://platform.openai.com/account/billing/overview. -Important: It's highly recommended that you track your usage on [the Usage page](https://platform.openai.com/account/usage) +Important: It's highly recommended that you track your usage on [the Usage page](https://platform.openai.com/account/usage). You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). ![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./imgs/openai-api-key-billing-paid-account.png) From 7e5afd874410ffaf090081c1a83265a30332904f Mon Sep 17 00:00:00 2001 From: James Collins Date: Mon, 24 Apr 2023 21:41:30 -0700 Subject: [PATCH 133/222] Refactor/decouple logger from global configuration (#3171) * Decouple logging from the global configuration * Configure logging first * Clean up global voice engine creation * Remove class vars from logger * Remove duplicate implementation of --------- Co-authored-by: Richard Beales --- autogpt/agent/agent.py | 4 +- autogpt/logs.py | 105 +++-------------------------------------- autogpt/main.py | 6 ++- autogpt/speech/say.py | 41 +++++++++------- 4 files changed, 38 insertions(+), 118 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 8c2bfb74..bb5b873d 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -107,7 +107,9 @@ class Agent: validate_json(assistant_reply_json, "llm_response_format_1") # Get command name and arguments try: - print_assistant_thoughts(self.ai_name, assistant_reply_json) + print_assistant_thoughts( + self.ai_name, assistant_reply_json, cfg.speak_mode + ) command_name, arguments = get_command(assistant_reply_json) if cfg.speak_mode: say_text(f"I want to execute {command_name}") diff --git a/autogpt/logs.py b/autogpt/logs.py index 9d661933..3e231d8d 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -10,12 +10,9 @@ from logging import LogRecord from colorama import Fore, Style -from autogpt.config import Config from autogpt.singleton import Singleton from autogpt.speech import say_text -CFG = Config() - class Logger(metaclass=Singleton): """ @@ -79,10 +76,12 @@ class Logger(metaclass=Singleton): self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) + self.speak_mode = False + def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): - if speak_text and CFG.speak_mode: + if speak_text and self.speak_mode: say_text(f"{title}. {content}") if content: @@ -203,100 +202,10 @@ def remove_color_codes(s: str) -> str: logger = Logger() -def print_assistant_thoughts(ai_name, assistant_reply): - """Prints the assistant's thoughts to the console""" - from autogpt.json_utils.json_fix_llm import ( - attempt_to_fix_json_by_finding_outermost_brackets, - fix_and_parse_json, - ) - - try: - try: - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - if isinstance(assistant_reply_json, str): - assistant_reply_json = fix_and_parse_json(assistant_reply_json) - - # Check if assistant_reply_json is a string and attempt to parse - # it into a JSON object - if isinstance(assistant_reply_json, str): - try: - assistant_reply_json = json.loads(assistant_reply_json) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - assistant_reply_json = ( - attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply_json - ) - ) - - assistant_thoughts_reasoning = None - assistant_thoughts_plan = None - assistant_thoughts_speak = None - assistant_thoughts_criticism = None - if not isinstance(assistant_reply_json, dict): - assistant_reply_json = {} - assistant_thoughts = assistant_reply_json.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") - - if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") - - logger.typewriter_log( - f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" - ) - logger.typewriter_log( - "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}" - ) - - if assistant_thoughts_plan: - logger.typewriter_log("PLAN:", Fore.YELLOW, "") - # If it's a list, join it into a string - if isinstance(assistant_thoughts_plan, list): - assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) - elif isinstance(assistant_thoughts_plan, dict): - assistant_thoughts_plan = str(assistant_thoughts_plan) - - # Split the input_string using the newline character and dashes - lines = assistant_thoughts_plan.split("\n") - for line in lines: - line = line.lstrip("- ") - logger.typewriter_log("- ", Fore.GREEN, line.strip()) - - logger.typewriter_log( - "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}" - ) - # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: - say_text(assistant_thoughts_speak) - else: - logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}") - - return assistant_reply_json - except json.decoder.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - if CFG.speak_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API." - " I cannot ignore this response." - ) - - # All other errors, return "Error: + error message" - except Exception: - call_stack = traceback.format_exc() - logger.error("Error: \n", call_stack) - - def print_assistant_thoughts( - ai_name: object, assistant_reply_json_valid: object + ai_name: object, + assistant_reply_json_valid: object, + speak_mode: bool = False, ) -> None: assistant_thoughts_reasoning = None assistant_thoughts_plan = None @@ -329,5 +238,5 @@ def print_assistant_thoughts( logger.typewriter_log("- ", Fore.GREEN, line.strip()) logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}") # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: + if speak_mode and assistant_thoughts_speak: say_text(assistant_thoughts_speak) diff --git a/autogpt/main.py b/autogpt/main.py index 08b61092..b3ffb01e 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -34,6 +34,10 @@ def run_auto_gpt( workspace_directory: str, install_plugin_deps: bool, ): + # Configure logging before we do anything else. + logger.set_level(logging.DEBUG if debug else logging.INFO) + logger.speak_mode = speak + cfg = Config() # TODO: fill in llm values here check_openai_api_key() @@ -51,7 +55,7 @@ def run_auto_gpt( allow_downloads, skip_news, ) - logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) + if not cfg.skip_news: motd = get_latest_bulletin() if motd: diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py index 727983d1..917fa65c 100644 --- a/autogpt/speech/say.py +++ b/autogpt/speech/say.py @@ -3,39 +3,44 @@ import threading from threading import Semaphore from autogpt.config import Config +from autogpt.speech.base import VoiceBase from autogpt.speech.brian import BrianSpeech from autogpt.speech.eleven_labs import ElevenLabsSpeech from autogpt.speech.gtts import GTTSVoice from autogpt.speech.macos_tts import MacOSTTS -CFG = Config() -DEFAULT_VOICE_ENGINE = GTTSVoice() -VOICE_ENGINE = None -if CFG.elevenlabs_api_key: - VOICE_ENGINE = ElevenLabsSpeech() -elif CFG.use_mac_os_tts == "True": - VOICE_ENGINE = MacOSTTS() -elif CFG.use_brian_tts == "True": - VOICE_ENGINE = BrianSpeech() -else: - VOICE_ENGINE = GTTSVoice() - - -QUEUE_SEMAPHORE = Semaphore( +_QUEUE_SEMAPHORE = Semaphore( 1 ) # The amount of sounds to queue before blocking the main thread def say_text(text: str, voice_index: int = 0) -> None: """Speak the given text using the given voice index""" + cfg = Config() + default_voice_engine, voice_engine = _get_voice_engine(cfg) def speak() -> None: - success = VOICE_ENGINE.say(text, voice_index) + success = voice_engine.say(text, voice_index) if not success: - DEFAULT_VOICE_ENGINE.say(text) + default_voice_engine.say(text) - QUEUE_SEMAPHORE.release() + _QUEUE_SEMAPHORE.release() - QUEUE_SEMAPHORE.acquire(True) + _QUEUE_SEMAPHORE.acquire(True) thread = threading.Thread(target=speak) thread.start() + + +def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]: + """Get the voice engine to use for the given configuration""" + default_voice_engine = GTTSVoice() + if config.elevenlabs_api_key: + voice_engine = ElevenLabsSpeech() + elif config.use_mac_os_tts == "True": + voice_engine = MacOSTTS() + elif config.use_brian_tts == "True": + voice_engine = BrianSpeech() + else: + voice_engine = GTTSVoice() + + return default_voice_engine, voice_engine From eb0e96715ed2b89a0a5491ddab57372337857367 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Tue, 25 Apr 2023 00:03:31 -0500 Subject: [PATCH 134/222] docs fix to image generation (#3186) --- docs/configuration/imagegen.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/configuration/imagegen.md b/docs/configuration/imagegen.md index 5d9e3547..cf9d55fd 100644 --- a/docs/configuration/imagegen.md +++ b/docs/configuration/imagegen.md @@ -4,12 +4,12 @@ By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, Once you have a token, set these variables in your `.env`: -``` shell +``` ini IMAGE_PROVIDER=huggingface -HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" +HUGGINGFACE_API_TOKEN=YOUR_HUGGINGFACE_API_TOKEN ``` ## Selenium ``` shell sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 -``` \ No newline at end of file +``` From 411a13a0d4b23c555340f83bcd62cd503f0134e6 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 25 Apr 2023 17:27:29 +1200 Subject: [PATCH 135/222] Update README.md --- README.md | 47 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c0a9bf7d..258b7ca8 100644 --- a/README.md +++ b/README.md @@ -34,14 +34,47 @@ Your support is greatly appreciated. Development of this free, open-source proje

-Zilliz -Roost.AI -NucleiAI -Algohash -TypingMind -
+

+


From ae31dd4bb14404734eb7069aa7c96ad8ce1a97ae Mon Sep 17 00:00:00 2001 From: AbTrax <45964236+AbTrax@users.noreply.github.com> Date: Tue, 25 Apr 2023 15:28:06 +1000 Subject: [PATCH 136/222] Feature: Added Self Feedback (#3013) * Feature: Added Self Feedback * minor fix: complied to flake8 * Add: Self Feedback To Usage.md * Add: role/goal allignment * Added: warning to usage.md * fix: Formatted with black --------- Co-authored-by: Richard Beales --- autogpt/agent/agent.py | 54 ++++++++++++++++++++++++++++++++++++++++-- docs/usage.md | 4 ++++ 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index bb5b873d..e47d8ef7 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -1,3 +1,4 @@ +import yaml from colorama import Fore, Style from autogpt.app import execute_command, get_command @@ -5,6 +6,7 @@ from autogpt.chat import chat_with_ai, create_chat_message from autogpt.config import Config from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.utilities import validate_json +from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner @@ -129,8 +131,8 @@ class Agent: f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", ) print( - "Enter 'y' to authorise command, 'y -N' to run N continuous " - "commands, 'n' to exit program, or enter feedback for " + "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands" + "'n' to exit program, or enter feedback for " f"{self.ai_name}...", flush=True, ) @@ -141,6 +143,24 @@ class Agent: if console_input.lower().strip() == "y": user_input = "GENERATE NEXT COMMAND JSON" break + elif console_input.lower().strip() == "s": + logger.typewriter_log( + "-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=", + Fore.GREEN, + "", + ) + thoughts = assistant_reply_json.get("thoughts", {}) + self_feedback_resp = self.get_self_feedback(thoughts) + logger.typewriter_log( + f"SELF FEEDBACK: {self_feedback_resp}", + Fore.YELLOW, + "", + ) + if self_feedback_resp[0].lower().strip() == "y": + user_input = "GENERATE NEXT COMMAND JSON" + else: + user_input = self_feedback_resp + break elif console_input.lower().strip() == "": print("Invalid input format.") continue @@ -245,3 +265,33 @@ class Agent: self.workspace.get_path(command_args[pathlike]) ) return command_args + + @staticmethod + def get_self_feedback(thoughts: dict) -> str: + """Generates a feedback response based on the provided thoughts dictionary. + This method takes in a dictionary of thoughts containing keys such as 'reasoning', + 'plan', 'thoughts', and 'criticism'. It combines these elements into a single + feedback message and uses the create_chat_completion() function to generate a + response based on the input message. + Args: + thoughts (dict): A dictionary containing thought elements like reasoning, + plan, thoughts, and criticism. + Returns: + str: A feedback response generated using the provided thoughts dictionary. + """ + + with open("ai_settings.yaml", "r") as yaml_file: + parsed_yaml = yaml.safe_load(yaml_file) + ai_role = parsed_yaml["ai_role"] + + feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution." + reasoning = thoughts.get("reasoning", "") + plan = thoughts.get("plan", "") + thought = thoughts.get("thoughts", "") + criticism = thoughts.get("criticism", "") + feedback_thoughts = thought + reasoning + plan + criticism + feedback_response = create_chat_completion( + [{"role": "user", "content": feedback_prompt + feedback_thoughts}], + "gpt-3.5-turbo", + ) # * This hardcodes the model to use GPT3.5. should be an argument + return feedback_response diff --git a/docs/usage.md b/docs/usage.md index 65adae74..3584c216 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -71,6 +71,10 @@ Use at your own risk. 2. To exit the program, press Ctrl + C +### ♻️ Self-Feedback Mode ⚠️ + +Running Self-Feedback will **INCREASE** token use and thus cost more. This feature enables the agent to provide self-feedback by verifying its own actions and checking if they align with its current goals. If not, it will provide better feedback for the next loop. To enable this feature for the current loop, input `S` into the input field. + ### GPT3.5 ONLY Mode If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! From 93bbd13a34b086c3f7815c4d03403ffd6a1a05ff Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 25 Apr 2023 17:36:41 +1200 Subject: [PATCH 137/222] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 258b7ca8..bfb86714 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,9 @@ Your support is greatly appreciated. Development of this free, open-source proje
-

robinicus  arthur-x88  knifour  prompthero  digisomni  sultanmeghji  allenstecat  jd3655  tullytim  shawnharmsen  DailyBotHQ  thepok  jacobyoby  MediConCenHK  concreit  tob-le-rone  Heitechsoft  jsolejr  angiaou  kenndanielso  FSTatSBS  bentoml  cfarquhar  MBassi91  Daniel1357  iddelacruz  josephjacks  arjunb023  johnculkin  indoor47  rapidstartup  RawBanana  comet-ml  MetaPath01  maxxflyer  Nalhos  KiaArmani  SparkplanAI  jenius-eagle  toverly1  st617  judegomila  tekelsey  morcos  turintech  Mr-Bishop42  yx3110  rocks6  Odin519Tomas  Cameron-Fulton  ddtarazona  saten-private  cxs  fruition  Kazamario  MayurVirkar  ikarosai  RThaweewat  founderblocks-sils  RealChrisSean  marv-technology  merwanehamadi  fabrietech  tommi-joentakanen  CrypteorCapital  tommygeee  Josecodesalot  thisisjeffchen  ternary5  CrazySwami  omphos  Explorergt92  DataMetis  belharethsami  rickscode  Brodie0  ChrisDMT  humungasaurus  lucas-chu  avy-ai  ASmithOWL  chatgpt-prompts  ZERO-A-ONE  jazgarewal  mathewhawkins  crizzler  Pythagora-io  neverinstall  Dradstone  sunchongren  pingbotan  dexterityx  CatsMeow492  joaomdmoura  zkonduit  Mobivs  doverhq  projectonegames  nnkostov  TheStoneMX  ColinConwell  hunteraraujo  ciscodebs  rejunity  SpacingLily  lazzacapital  SwftCoins  ZoneSixGames  eelbaz  m  caitlynmeeks  AcountoOU  vkozacek  goldenrecursion  scryptedinc  lmaugustin  webbcolton  VoiceBeer  Web3Capital  garythebat  txtr99  ntwrite  AryaXAI  kreativai  abhinav-pandey29  anvarazizov  AuroraHolding  0xmatchmaker  nicoguyon  MatthewAgs  kMag410  josephcmiller2  AIdevelopersAI  GalaxyVideoAgency  jun784  Partender  tjarmain  horazius  nocodeclarity  refinery1  jondwillis  CarmenCocoa  LeeRobidas  wenfengwang  

+ +

robinicus  0xmatchmaker  jazgarewal  MayurVirkar  avy-ai  TheStoneMX  goldenrecursion  MatthewAgs  eelbaz  rapidstartup  gklab  VoiceBeer  DailyBotHQ  lucas-chu  knifour  refinery1  st617  neodenit  CrazySwami  Heitechsoft  RealChrisSean  abhinav-pandey29  Explorergt92  SparkplanAI  crizzler  kreativai  omphos  Jahmazon  tjarmain  ddtarazona  saten-private  anvarazizov  lazzacapital  m  Pythagora-io  Web3Capital  toverly1  digisomni  concreit  LeeRobidas  Josecodesalot  dexterityx  rickscode  Brodie0  FSTatSBS  nocodeclarity  jsolejr  amr-elsehemy  RawBanana  horazius  SwftCoins  tob-le-rone  RThaweewat  jun784  joaomdmoura  rejunity  mathewhawkins  caitlynmeeks  jd3655  Odin519Tomas  DataMetis  webbcolton  rocks6  cxs  fruition  nnkostov  morcos  pingbotan  maxxflyer  tommi-joentakanen  hunteraraujo  projectonegames  tullytim  comet-ml  thepok  prompthero  sunchongren  neverinstall  josephcmiller2  yx3110  MBassi91  SpacingLily  arthur-x88  ciscodebs  christian-gheorghe  EngageStrategies  jondwillis  Cameron-Fulton  AryaXAI  AuroraHolding  Mr-Bishop42  doverhq  johnculkin  marv-technology  ikarosai  ColinConwell  humungasaurus  terpsfreak  iddelacruz  thisisjeffchen  nicoguyon  arjunb023  Nalhos  belharethsami  Mobivs  txtr99  ntwrite  founderblocks-sils  kMag410  angiaou  garythebat  lmaugustin  shawnharmsen  clortegah  MetaPath01  sekomike910  MediConCenHK  svpermari0  jacobyoby  turintech  allenstecat  CatsMeow492  tommygeee  judegomila  cfarquhar  ZoneSixGames  kenndanielso  CrypteorCapital  sultanmeghji  jenius-eagle  josephjacks  pingshian0131  AIdevelopersAI  ternary5  ChrisDMT  AcountoOU  chatgpt-prompts  Partender  Daniel1357  KiaArmani  zkonduit  fabrietech  scryptedinc  coreyspagnoli  AntonioCiolino  Dradstone  CarmenCocoa  bentoml  merwanehamadi  vkozacek  ASmithOWL  tekelsey  GalaxyVideoAgency  wenfengwang  rviramontes  indoor47  ZERO-A-ONE  

+ ## 🚀 Features From 6fc6ea69d2ac88efa5f26a8fccf69b37dc012479 Mon Sep 17 00:00:00 2001 From: Peter Petermann Date: Tue, 25 Apr 2023 08:56:59 +0200 Subject: [PATCH 138/222] this changes it so the file from config is used, rather than a hardcoded name that might not exist (#3189) --- autogpt/agent/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index e47d8ef7..c226f379 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -280,7 +280,7 @@ class Agent: str: A feedback response generated using the provided thoughts dictionary. """ - with open("ai_settings.yaml", "r") as yaml_file: + with open(Config().ai_settings_file, "r") as yaml_file: parsed_yaml = yaml.safe_load(yaml_file) ai_role = parsed_yaml["ai_role"] From 58d84787f330fea8962875c31a08fcfd533fa473 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Tue, 25 Apr 2023 08:41:57 -0700 Subject: [PATCH 139/222] Test `Agent.create_agent_feedback` (#3209) --- autogpt/agent/agent.py | 20 ++++++-------- tests/unit/test_get_self_feedback.py | 40 ++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 12 deletions(-) create mode 100644 tests/unit/test_get_self_feedback.py diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index c226f379..395d00ae 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -1,4 +1,3 @@ -import yaml from colorama import Fore, Style from autogpt.app import execute_command, get_command @@ -150,7 +149,9 @@ class Agent: "", ) thoughts = assistant_reply_json.get("thoughts", {}) - self_feedback_resp = self.get_self_feedback(thoughts) + self_feedback_resp = self.get_self_feedback( + thoughts, cfg.fast_llm_model + ) logger.typewriter_log( f"SELF FEEDBACK: {self_feedback_resp}", Fore.YELLOW, @@ -266,8 +267,7 @@ class Agent: ) return command_args - @staticmethod - def get_self_feedback(thoughts: dict) -> str: + def get_self_feedback(self, thoughts: dict, llm_model: str) -> str: """Generates a feedback response based on the provided thoughts dictionary. This method takes in a dictionary of thoughts containing keys such as 'reasoning', 'plan', 'thoughts', and 'criticism'. It combines these elements into a single @@ -279,10 +279,7 @@ class Agent: Returns: str: A feedback response generated using the provided thoughts dictionary. """ - - with open(Config().ai_settings_file, "r") as yaml_file: - parsed_yaml = yaml.safe_load(yaml_file) - ai_role = parsed_yaml["ai_role"] + ai_role = self.config.ai_role feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution." reasoning = thoughts.get("reasoning", "") @@ -290,8 +287,7 @@ class Agent: thought = thoughts.get("thoughts", "") criticism = thoughts.get("criticism", "") feedback_thoughts = thought + reasoning + plan + criticism - feedback_response = create_chat_completion( + return create_chat_completion( [{"role": "user", "content": feedback_prompt + feedback_thoughts}], - "gpt-3.5-turbo", - ) # * This hardcodes the model to use GPT3.5. should be an argument - return feedback_response + llm_model, + ) diff --git a/tests/unit/test_get_self_feedback.py b/tests/unit/test_get_self_feedback.py new file mode 100644 index 00000000..54641f91 --- /dev/null +++ b/tests/unit/test_get_self_feedback.py @@ -0,0 +1,40 @@ +from autogpt.agent.agent import Agent +from autogpt.config import AIConfig +from autogpt.llm_utils import create_chat_completion + + +def test_get_self_feedback(mocker): + # Define a sample thoughts dictionary + thoughts = { + "reasoning": "Sample reasoning.", + "plan": "Sample plan.", + "thoughts": "Sample thoughts.", + "criticism": "Sample criticism.", + } + + # Define a fake response for the create_chat_completion function + fake_response = ( + "Y The provided information is suitable for achieving the role's objectives." + ) + + # Mock the create_chat_completion function + mock_create_chat_completion = mocker.patch( + "autogpt.agent.agent.create_chat_completion", wraps=create_chat_completion + ) + mock_create_chat_completion.return_value = fake_response + + # Create a MagicMock object to replace the Agent instance + agent_mock = mocker.MagicMock(spec=Agent) + + # Mock the config attribute of the Agent instance + agent_mock.config = AIConfig() + + # Call the get_self_feedback method + feedback = Agent.get_self_feedback( + agent_mock, + thoughts, + "gpt-3.5-turbo", + ) + + # Check if the response is correct + assert feedback == fake_response From 940b115f0aee12ee0911e2b335c2d30db2e74019 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 25 Apr 2023 19:05:58 +0200 Subject: [PATCH 140/222] remove plugin notice from CONTRIBUTING.md (#3227) --- CONTRIBUTING.md | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b3a1585f..cdb84ca3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,13 +23,11 @@ By participating in this project, you agree to abide by our [Code of Conduct](CO ## 📢 A Quick Word Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT. -However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template). -> ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates! +However, you absolutely can still add these commands to Auto-GPT in the form of plugins. +Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template). ## Getting Started -To start contributing, follow these steps: - 1. Fork the repository and clone your fork. 2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`). 3. Make your changes in the new branch. @@ -70,17 +68,21 @@ When submitting a pull request, please ensure that your changes meet the followi ### Code Formatting -We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`: +We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request. + +To format your code, run the following commands in the project's root directory: ```bash -pip install black +python -m black . +python -m isort . ``` -To format your code, run the following command in the project's root directory: - +Or if you have these tools installed globally: ```bash black . +isort . ``` + ### Pre-Commit Hooks We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: @@ -103,3 +105,5 @@ Happy coding, and once again, thank you for your contributions! Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ + +## Testing From 2619740daa81bb9ba677d88b23960cd60cea8eeb Mon Sep 17 00:00:00 2001 From: James Collins Date: Tue, 25 Apr 2023 11:12:24 -0700 Subject: [PATCH 141/222] Extract OpenAI API retry handler and unify ADA embeddings calls. (#3191) * Extract retry logic, unify embedding functions * Add some docstrings * Remove embedding creation from API manager * Add test suite for retry handler * Make api manager fixture * Fix typing * Streamline tests --- autogpt/api_manager.py | 26 -------- autogpt/llm_utils.py | 119 ++++++++++++++++++++++++++-------- autogpt/memory/local.py | 6 +- autogpt/memory/pinecone.py | 6 +- autogpt/memory/redismem.py | 6 +- tests/conftest.py | 10 +++ tests/test_api_manager.py | 31 --------- tests/test_llm_utils.py | 129 +++++++++++++++++++++++++++++++++++++ tests/test_local_cache.py | 2 +- 9 files changed, 242 insertions(+), 93 deletions(-) create mode 100644 tests/test_llm_utils.py diff --git a/autogpt/api_manager.py b/autogpt/api_manager.py index 497e8a79..16975bae 100644 --- a/autogpt/api_manager.py +++ b/autogpt/api_manager.py @@ -65,32 +65,6 @@ class ApiManager: self.update_cost(prompt_tokens, completion_tokens, model) return response - def embedding_create( - self, - text_list: List[str], - model: str = "text-embedding-ada-002", - ) -> List[float]: - """ - Create an embedding for the given input text using the specified model. - - Args: - text_list (List[str]): Input text for which the embedding is to be created. - model (str, optional): The model to use for generating the embedding. - - Returns: - List[float]: The generated embedding as a list of float values. - """ - if cfg.use_azure: - response = openai.Embedding.create( - input=text_list, - engine=cfg.get_azure_deployment_id_for_model(model), - ) - else: - response = openai.Embedding.create(input=text_list, model=model) - - self.update_cost(response.usage.prompt_tokens, 0, model) - return response["data"][0]["embedding"] - def update_cost(self, prompt_tokens, completion_tokens, model): """ Update the total cost, prompt tokens, and completion tokens. diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index cc650f21..f2fd9a81 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -1,5 +1,6 @@ from __future__ import annotations +import functools import time from typing import List, Optional @@ -13,10 +14,62 @@ from autogpt.logs import logger from autogpt.types.openai import Message CFG = Config() - openai.api_key = CFG.openai_api_key +def retry_openai_api( + num_retries: int = 10, + backoff_base: float = 2.0, + warn_user: bool = True, +): + """Retry an OpenAI API call. + + Args: + num_retries int: Number of retries. Defaults to 10. + backoff_base float: Base for exponential backoff. Defaults to 2. + warn_user bool: Whether to warn the user. Defaults to True. + """ + retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}" + api_key_error_msg = ( + f"Please double check that you have setup a " + f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can " + f"read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}" + ) + backoff_msg = ( + f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}" + ) + + def _wrapper(func): + @functools.wraps(func) + def _wrapped(*args, **kwargs): + user_warned = not warn_user + num_attempts = num_retries + 1 # +1 for the first attempt + for attempt in range(1, num_attempts + 1): + try: + return func(*args, **kwargs) + + except RateLimitError: + if attempt == num_attempts: + raise + + logger.debug(retry_limit_msg) + if not user_warned: + logger.double_check(api_key_error_msg) + user_warned = True + + except APIError as e: + if (e.http_status != 502) or (attempt == num_attempts): + raise + + backoff = backoff_base ** (attempt + 2) + logger.debug(backoff_msg.format(backoff=backoff)) + time.sleep(backoff) + + return _wrapped + + return _wrapper + + def call_ai_function( function: str, args: list, description: str, model: str | None = None ) -> str: @@ -154,32 +207,46 @@ def create_chat_completion( return resp -def get_ada_embedding(text): +def get_ada_embedding(text: str) -> List[int]: + """Get an embedding from the ada model. + + Args: + text (str): The text to embed. + + Returns: + List[int]: The embedding. + """ + model = "text-embedding-ada-002" text = text.replace("\n", " ") - return api_manager.embedding_create( - text_list=[text], model="text-embedding-ada-002" + + if CFG.use_azure: + kwargs = {"engine": CFG.get_azure_deployment_id_for_model(model)} + else: + kwargs = {"model": model} + + embedding = create_embedding(text, **kwargs) + api_manager.update_cost( + prompt_tokens=embedding.usage.prompt_tokens, + completion_tokens=0, + model=model, ) + return embedding["data"][0]["embedding"] -def create_embedding_with_ada(text) -> list: - """Create an embedding with text-ada-002 using the OpenAI SDK""" - num_retries = 10 - for attempt in range(num_retries): - backoff = 2 ** (attempt + 2) - try: - return api_manager.embedding_create( - text_list=[text], model="text-embedding-ada-002" - ) - except RateLimitError: - pass - except (APIError, Timeout) as e: - if e.http_status != 502: - raise - if attempt == num_retries - 1: - raise - if CFG.debug_mode: - print( - f"{Fore.RED}Error: ", - f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", - ) - time.sleep(backoff) +@retry_openai_api() +def create_embedding( + text: str, + *_, + **kwargs, +) -> openai.Embedding: + """Create an embedding using the OpenAI API + + Args: + text (str): The text to embed. + kwargs: Other arguments to pass to the OpenAI API embedding creation call. + + Returns: + openai.Embedding: The embedding object. + """ + + return openai.Embedding.create(input=[text], **kwargs) diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 7a31782b..87d83cc5 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -7,7 +7,7 @@ from typing import Any, List import numpy as np import orjson -from autogpt.llm_utils import create_embedding_with_ada +from autogpt.llm_utils import get_ada_embedding from autogpt.memory.base import MemoryProviderSingleton EMBED_DIM = 1536 @@ -63,7 +63,7 @@ class LocalCache(MemoryProviderSingleton): return "" self.data.texts.append(text) - embedding = create_embedding_with_ada(text) + embedding = get_ada_embedding(text) vector = np.array(embedding).astype(np.float32) vector = vector[np.newaxis, :] @@ -111,7 +111,7 @@ class LocalCache(MemoryProviderSingleton): Returns: List[str] """ - embedding = create_embedding_with_ada(text) + embedding = get_ada_embedding(text) scores = np.dot(self.data.embeddings, embedding) diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py index 27fcd624..3aa78f9e 100644 --- a/autogpt/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -1,7 +1,7 @@ import pinecone from colorama import Fore, Style -from autogpt.llm_utils import create_embedding_with_ada +from autogpt.llm_utils import get_ada_embedding from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton @@ -44,7 +44,7 @@ class PineconeMemory(MemoryProviderSingleton): self.index = pinecone.Index(table_name) def add(self, data): - vector = create_embedding_with_ada(data) + vector = get_ada_embedding(data) # no metadata here. We may wish to change that long term. self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" @@ -64,7 +64,7 @@ class PineconeMemory(MemoryProviderSingleton): :param data: The data to compare to. :param num_relevant: The number of relevant data to return. Defaults to 5 """ - query_embedding = create_embedding_with_ada(data) + query_embedding = get_ada_embedding(data) results = self.index.query( query_embedding, top_k=num_relevant, include_metadata=True ) diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py index 082a812c..5618bb37 100644 --- a/autogpt/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -10,7 +10,7 @@ from redis.commands.search.field import TextField, VectorField from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.query import Query -from autogpt.llm_utils import create_embedding_with_ada +from autogpt.llm_utils import get_ada_embedding from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton @@ -88,7 +88,7 @@ class RedisMemory(MemoryProviderSingleton): """ if "Command Error:" in data: return "" - vector = create_embedding_with_ada(data) + vector = get_ada_embedding(data) vector = np.array(vector).astype(np.float32).tobytes() data_dict = {b"data": data, "embedding": vector} pipe = self.redis.pipeline() @@ -130,7 +130,7 @@ class RedisMemory(MemoryProviderSingleton): Returns: A list of the most relevant data. """ - query_embedding = create_embedding_with_ada(data) + query_embedding = get_ada_embedding(data) base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]" query = ( Query(base_query) diff --git a/tests/conftest.py b/tests/conftest.py index 59433457..0f82404c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,6 +3,8 @@ from pathlib import Path import pytest from dotenv import load_dotenv +from autogpt.api_manager import ApiManager +from autogpt.api_manager import api_manager as api_manager_ from autogpt.config import Config from autogpt.workspace import Workspace @@ -29,3 +31,11 @@ def config(workspace: Workspace) -> Config: config.workspace_path = workspace.root yield config config.workspace_path = old_ws_path + + +@pytest.fixture() +def api_manager() -> ApiManager: + old_attrs = api_manager_.__dict__.copy() + api_manager_.reset() + yield api_manager_ + api_manager_.__dict__.update(old_attrs) diff --git a/tests/test_api_manager.py b/tests/test_api_manager.py index 900a88e0..71aa6d63 100644 --- a/tests/test_api_manager.py +++ b/tests/test_api_manager.py @@ -86,37 +86,6 @@ class TestApiManager: assert api_manager.get_total_completion_tokens() == 20 assert api_manager.get_total_cost() == (10 * 0.002 + 20 * 0.002) / 1000 - @staticmethod - def test_embedding_create_invalid_model(): - """Test if an invalid model for embedding raises a KeyError.""" - text_list = ["Hello, how are you?"] - model = "invalid-model" - - with patch("openai.Embedding.create") as mock_create: - mock_response = MagicMock() - mock_response.usage.prompt_tokens = 5 - mock_create.side_effect = KeyError("Invalid model") - with pytest.raises(KeyError): - api_manager.embedding_create(text_list, model=model) - - @staticmethod - def test_embedding_create_valid_inputs(): - """Test if valid inputs for embedding result in correct tokens and cost.""" - text_list = ["Hello, how are you?"] - model = "text-embedding-ada-002" - - with patch("openai.Embedding.create") as mock_create: - mock_response = MagicMock() - mock_response.usage.prompt_tokens = 5 - mock_response["data"] = [{"embedding": [0.1, 0.2, 0.3]}] - mock_create.return_value = mock_response - - api_manager.embedding_create(text_list, model=model) - - assert api_manager.get_total_prompt_tokens() == 5 - assert api_manager.get_total_completion_tokens() == 0 - assert api_manager.get_total_cost() == (5 * 0.0004) / 1000 - def test_getter_methods(self): """Test the getter methods for total tokens, cost, and budget.""" api_manager.update_cost(60, 120, "gpt-3.5-turbo") diff --git a/tests/test_llm_utils.py b/tests/test_llm_utils.py new file mode 100644 index 00000000..83cac0de --- /dev/null +++ b/tests/test_llm_utils.py @@ -0,0 +1,129 @@ +import pytest +from openai.error import APIError, RateLimitError + +from autogpt.llm_utils import get_ada_embedding, retry_openai_api +from autogpt.modelsinfo import COSTS + + +@pytest.fixture(params=[RateLimitError, APIError]) +def error(request): + if request.param == APIError: + return request.param("Error", http_status=502) + else: + return request.param("Error") + + +@pytest.fixture +def mock_create_embedding(mocker): + mock_response = mocker.MagicMock() + mock_response.usage.prompt_tokens = 5 + mock_response.__getitem__.side_effect = lambda key: [{"embedding": [0.1, 0.2, 0.3]}] + return mocker.patch( + "autogpt.llm_utils.create_embedding", return_value=mock_response + ) + + +def error_factory(error_instance, error_count, retry_count, warn_user=True): + class RaisesError: + def __init__(self): + self.count = 0 + + @retry_openai_api( + num_retries=retry_count, backoff_base=0.001, warn_user=warn_user + ) + def __call__(self): + self.count += 1 + if self.count <= error_count: + raise error_instance + return self.count + + return RaisesError() + + +def test_retry_open_api_no_error(capsys): + @retry_openai_api() + def f(): + return 1 + + result = f() + assert result == 1 + + output = capsys.readouterr() + assert output.out == "" + assert output.err == "" + + +@pytest.mark.parametrize( + "error_count, retry_count, failure", + [(2, 10, False), (2, 2, False), (10, 2, True), (3, 2, True), (1, 0, True)], + ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"], +) +def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure): + call_count = min(error_count, retry_count) + 1 + + raises = error_factory(error, error_count, retry_count) + if failure: + with pytest.raises(type(error)): + raises() + else: + result = raises() + assert result == call_count + + assert raises.count == call_count + + output = capsys.readouterr() + + if error_count and retry_count: + if type(error) == RateLimitError: + assert "Reached rate limit, passing..." in output.out + assert "Please double check" in output.out + if type(error) == APIError: + assert "API Bad gateway" in output.out + else: + assert output.out == "" + + +def test_retry_open_api_rate_limit_no_warn(capsys): + error_count = 2 + retry_count = 10 + + raises = error_factory(RateLimitError, error_count, retry_count, warn_user=False) + result = raises() + call_count = min(error_count, retry_count) + 1 + assert result == call_count + assert raises.count == call_count + + output = capsys.readouterr() + + assert "Reached rate limit, passing..." in output.out + assert "Please double check" not in output.out + + +def test_retry_openapi_other_api_error(capsys): + error_count = 2 + retry_count = 10 + + raises = error_factory(APIError("Error", http_status=500), error_count, retry_count) + + with pytest.raises(APIError): + raises() + call_count = 1 + assert raises.count == call_count + + output = capsys.readouterr() + assert output.out == "" + + +def test_get_ada_embedding(mock_create_embedding, api_manager): + model = "text-embedding-ada-002" + embedding = get_ada_embedding("test") + mock_create_embedding.assert_called_once_with( + "test", model="text-embedding-ada-002" + ) + + assert embedding == [0.1, 0.2, 0.3] + + cost = COSTS[model]["prompt"] + assert api_manager.get_total_prompt_tokens() == 5 + assert api_manager.get_total_completion_tokens() == 0 + assert api_manager.get_total_cost() == (5 * cost) / 1000 diff --git a/tests/test_local_cache.py b/tests/test_local_cache.py index b6a0387b..597f9714 100644 --- a/tests/test_local_cache.py +++ b/tests/test_local_cache.py @@ -21,7 +21,7 @@ def LocalCache(): @pytest.fixture def mock_embed_with_ada(mocker): mocker.patch( - "autogpt.memory.local.create_embedding_with_ada", + "autogpt.memory.local.get_ada_embedding", return_value=[0.1] * EMBED_DIM, ) From f962939737657a06c3fbfe6094620576ae834364 Mon Sep 17 00:00:00 2001 From: James Collins Date: Tue, 25 Apr 2023 11:38:06 -0700 Subject: [PATCH 142/222] Use explicit API keys when querying openai rather than import time manipulation of the package attributes (#3241) --- autogpt/api_manager.py | 3 ++- autogpt/commands/image_gen.py | 2 +- autogpt/config/config.py | 2 -- autogpt/llm_utils.py | 8 +++++--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/autogpt/api_manager.py b/autogpt/api_manager.py index 16975bae..ace64fbd 100644 --- a/autogpt/api_manager.py +++ b/autogpt/api_manager.py @@ -7,7 +7,6 @@ from autogpt.logs import logger from autogpt.modelsinfo import COSTS cfg = Config() -openai.api_key = cfg.openai_api_key print_total_cost = cfg.debug_mode @@ -50,6 +49,7 @@ class ApiManager: messages=messages, temperature=temperature, max_tokens=max_tokens, + api_key=cfg.openai_api_key, ) else: response = openai.ChatCompletion.create( @@ -57,6 +57,7 @@ class ApiManager: messages=messages, temperature=temperature, max_tokens=max_tokens, + api_key=cfg.openai_api_key, ) if self.debug: logger.debug(f"Response: {response}") diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 834432c5..9ed0f44b 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -87,7 +87,6 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: Returns: str: The filename of the image """ - openai.api_key = CFG.openai_api_key # Check for supported image sizes if size not in [256, 512, 1024]: @@ -102,6 +101,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: n=1, size=f"{size}x{size}", response_format="b64_json", + api_key=CFG.openai_api_key, ) print(f"Image Generated for prompt:{prompt}") diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 9de7fe70..66f0253e 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -128,8 +128,6 @@ class Config(metaclass=Singleton): # Note that indexes must be created on db 0 in redis, this is not configurable. self.memory_backend = os.getenv("MEMORY_BACKEND", "local") - # Initialize the OpenAI API client - openai.api_key = self.openai_api_key self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins") self.plugins: List[AutoGPTPluginTemplate] = [] diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index f2fd9a81..7b565edd 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -14,7 +14,6 @@ from autogpt.logs import logger from autogpt.types.openai import Message CFG = Config() -openai.api_key = CFG.openai_api_key def retry_openai_api( @@ -248,5 +247,8 @@ def create_embedding( Returns: openai.Embedding: The embedding object. """ - - return openai.Embedding.create(input=[text], **kwargs) + return openai.Embedding.create( + input=[text], + api_key=CFG.openai_api_key, + **kwargs, + ) From 1806fc683d4e825fb47f1dd8dc6157885c88f4ed Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Tue, 25 Apr 2023 19:50:22 +0100 Subject: [PATCH 143/222] Fix readme centering (#3243) --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index bfb86714..9cfcda75 100644 --- a/README.md +++ b/README.md @@ -31,9 +31,6 @@ Your support is greatly appreciated. Development of this free, open-source proje

-

-

-

From 6fbac455d4e83a243d6276a4ce3becc0e2acbcbb Mon Sep 17 00:00:00 2001 From: James Collins Date: Tue, 25 Apr 2023 12:10:12 -0700 Subject: [PATCH 144/222] Remove import time loading of config from llm_utils (#3245) --- autogpt/llm_utils.py | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 7b565edd..2ef0422f 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -13,8 +13,6 @@ from autogpt.config import Config from autogpt.logs import logger from autogpt.types.openai import Message -CFG = Config() - def retry_openai_api( num_retries: int = 10, @@ -86,8 +84,9 @@ def call_ai_function( Returns: str: The response from the function """ + cfg = Config() if model is None: - model = CFG.smart_llm_model + model = cfg.smart_llm_model # For each arg, if any are None, convert to "None": args = [str(arg) if arg is not None else "None" for arg in args] # parse args to comma separated string @@ -109,7 +108,7 @@ def call_ai_function( def create_chat_completion( messages: List[Message], # type: ignore model: Optional[str] = None, - temperature: float = CFG.temperature, + temperature: float = None, max_tokens: Optional[int] = None, ) -> str: """Create a chat completion using the OpenAI API @@ -123,13 +122,17 @@ def create_chat_completion( Returns: str: The response from the chat completion """ + cfg = Config() + if temperature is None: + temperature = cfg.temperature + num_retries = 10 warned_user = False - if CFG.debug_mode: + if cfg.debug_mode: print( f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" ) - for plugin in CFG.plugins: + for plugin in cfg.plugins: if plugin.can_handle_chat_completion( messages=messages, model=model, @@ -148,9 +151,9 @@ def create_chat_completion( for attempt in range(num_retries): backoff = 2 ** (attempt + 2) try: - if CFG.use_azure: + if cfg.use_azure: response = api_manager.create_chat_completion( - deployment_id=CFG.get_azure_deployment_id_for_model(model), + deployment_id=cfg.get_azure_deployment_id_for_model(model), model=model, messages=messages, temperature=temperature, @@ -165,7 +168,7 @@ def create_chat_completion( ) break except RateLimitError: - if CFG.debug_mode: + if cfg.debug_mode: print( f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" ) @@ -180,7 +183,7 @@ def create_chat_completion( raise if attempt == num_retries - 1: raise - if CFG.debug_mode: + if cfg.debug_mode: print( f"{Fore.RED}Error: ", f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", @@ -194,12 +197,12 @@ def create_chat_completion( + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.", ) logger.double_check() - if CFG.debug_mode: + if cfg.debug_mode: raise RuntimeError(f"Failed to get response after {num_retries} retries") else: quit(1) resp = response.choices[0].message["content"] - for plugin in CFG.plugins: + for plugin in cfg.plugins: if not plugin.can_handle_on_response(): continue resp = plugin.on_response(resp) @@ -215,11 +218,12 @@ def get_ada_embedding(text: str) -> List[int]: Returns: List[int]: The embedding. """ + cfg = Config() model = "text-embedding-ada-002" text = text.replace("\n", " ") - if CFG.use_azure: - kwargs = {"engine": CFG.get_azure_deployment_id_for_model(model)} + if cfg.use_azure: + kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)} else: kwargs = {"model": model} @@ -247,8 +251,9 @@ def create_embedding( Returns: openai.Embedding: The embedding object. """ + cfg = Config() return openai.Embedding.create( input=[text], - api_key=CFG.openai_api_key, + api_key=cfg.openai_api_key, **kwargs, ) From fac8f7da21be9d24b2ffc9a078231bbc9ae0ffc9 Mon Sep 17 00:00:00 2001 From: Peter Svensson Date: Tue, 25 Apr 2023 22:32:39 +0200 Subject: [PATCH 145/222] adding probably erroneously removed return value from execut_shell, giving 'None' in return always otherise - not ideal (#3212) Co-authored-by: James Collins --- autogpt/commands/execute_code.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 71c1bd2e..ca0586f4 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -133,6 +133,7 @@ def execute_shell(command_line: str) -> str: # Change back to whatever the prior working dir was os.chdir(current_dir) + return output @command( From eec86a7b824ec332a55fd5935947bbc8ae98797c Mon Sep 17 00:00:00 2001 From: James Collins Date: Tue, 25 Apr 2023 14:53:13 -0700 Subject: [PATCH 146/222] Load .env in package init (#3251) --- autogpt/__init__.py | 6 ++++++ autogpt/commands/twitter.py | 3 --- autogpt/config/config.py | 3 --- tests/conftest.py | 3 --- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/autogpt/__init__.py b/autogpt/__init__.py index e69de29b..5f5b20ef 100644 --- a/autogpt/__init__.py +++ b/autogpt/__init__.py @@ -0,0 +1,6 @@ +from dotenv import load_dotenv + +# Load the users .env file into environment variables +load_dotenv(verbose=True, override=True) + +del load_dotenv diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py index f0502271..baf0876d 100644 --- a/autogpt/commands/twitter.py +++ b/autogpt/commands/twitter.py @@ -2,12 +2,9 @@ import os import tweepy -from dotenv import load_dotenv from autogpt.commands.command import command -load_dotenv() - @command( "send_tweet", diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 66f0253e..97ab2d58 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -6,12 +6,9 @@ import openai import yaml from auto_gpt_plugin_template import AutoGPTPluginTemplate from colorama import Fore -from dotenv import load_dotenv from autogpt.singleton import Singleton -load_dotenv(verbose=True, override=True) - class Config(metaclass=Singleton): """ diff --git a/tests/conftest.py b/tests/conftest.py index 0f82404c..9ab49189 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,15 +1,12 @@ from pathlib import Path import pytest -from dotenv import load_dotenv from autogpt.api_manager import ApiManager from autogpt.api_manager import api_manager as api_manager_ from autogpt.config import Config from autogpt.workspace import Workspace -load_dotenv() - @pytest.fixture() def workspace_root(tmp_path) -> Path: From 8670b3039e0cca0c57dc6cc5c608781d8ab69314 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Tue, 25 Apr 2023 17:25:38 -0500 Subject: [PATCH 147/222] Fix PR size autolabeler message (#3194) --- .github/workflows/pr-label.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index d4e9a5e7..ff4174ad 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -49,3 +49,7 @@ jobs: l_label: 'size/l' l_max_size: 200 xl_label: 'size/xl' + message_if_xl: > + This PR exceeds the recommended size of 200 lines. + Please make sure you are NOT addressing multiple issues with one PR. + Note this PR might be rejected due to its size From d75379358f96669fbfa24acb3811c74895430244 Mon Sep 17 00:00:00 2001 From: Robin Richtsfeld Date: Wed, 26 Apr 2023 01:52:38 +0200 Subject: [PATCH 148/222] Fix `get_ada_embedding` return type (#3263) --- autogpt/llm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 2ef0422f..212928ca 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -209,14 +209,14 @@ def create_chat_completion( return resp -def get_ada_embedding(text: str) -> List[int]: +def get_ada_embedding(text: str) -> List[float]: """Get an embedding from the ada model. Args: text (str): The text to embed. Returns: - List[int]: The embedding. + List[float]: The embedding. """ cfg = Config() model = "text-embedding-ada-002" From f0f34030a0e17eb58913ee1724ed1aef0abf9434 Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Wed, 26 Apr 2023 08:52:33 +0100 Subject: [PATCH 149/222] Fix docs alignment (#3302) --- docs/configuration/search.md | 5 ++- docs/installation.md | 39 ++++++++++---------- docs/usage.md | 70 ++++++++++++++++++++---------------- 3 files changed, 61 insertions(+), 53 deletions(-) diff --git a/docs/configuration/search.md b/docs/configuration/search.md index c01828ed..87e8e3ab 100644 --- a/docs/configuration/search.md +++ b/docs/configuration/search.md @@ -5,11 +5,11 @@ This section is optional. use the official google api if you are having issues w To use the `google_official_search` command, you need to set up your Google API keys in your environment variables. Create your project: + 1. Go to the [Google Cloud Console](https://console.cloud.google.com/). 2. If you don't already have an account, create one and log in. 3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". -4. Give it a name and click "Create". - +4. Give it a name and click "Create". Set up a custom search API and add to your .env file: 5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard). 6. Click "Enable APIs and Services". @@ -21,7 +21,6 @@ Set up a custom search API and add to your .env file: 12. Copy the API key. 13. Set it as an environment variable named `GOOGLE_API_KEY` on your machine (see how to set up environment variables below). 14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) - Set up a custom search engine and add to your .env file: 15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page. 16. Click "Add". diff --git a/docs/installation.md b/docs/installation.md index 884f4311..806f587f 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -2,11 +2,11 @@ ## ⚠️ OpenAI API Keys Configuration -Get your OpenAI API key from: https://platform.openai.com/account/api-keys. +Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys). To use OpenAI API key for Auto-GPT, you **NEED** to have billing set up (AKA paid account). -You can set up paid account at https://platform.openai.com/account/billing/overview. +You can set up paid account at [https://platform.openai.com/account/billing/overview](https://platform.openai.com/account/billing/overview). Important: It's highly recommended that you track your usage on [the Usage page](https://platform.openai.com/account/usage). You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). @@ -19,19 +19,19 @@ You can also set limits on how much you spend on [the Usage limits page](https:/ 1. Make sure you have one of the environments listed under [**requirements**](../README.md#-requirements) set up. - _To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter._ + _To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter. Make sure you have [Git](https://git-scm.com/downloads) installed for your O/S._ 2. Clone the repository using Git, or download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) (`Source code (zip)`, at the bottom of the page). - ``` shell +``` shell git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git - ``` +``` 3. Navigate to the directory where you downloaded the repository. - ``` shell +``` shell cd Auto-GPT - ``` +``` 5. Configure Auto-GPT: 1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). @@ -45,23 +45,24 @@ You can also set limits on how much you spend on [the Usage limits page](https:/ You have now configured Auto-GPT. Notes: + - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to get your OpenAI API key. - - Get your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. + - Get your ElevenLabs API key from: [ElevenLabs](https://elevenlabs.io). You can view your xi-api-key using the "Profile" tab on the website. - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps: - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID - `smart_llm_model_deployment_id` - your gpt-4 deployment ID - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID - - Please specify all of these values as double-quoted strings - ``` shell - # Replace string in angled brackets (<>) to your own ID - azure_model_map: - fast_llm_model_deployment_id: "" - ... - ``` - - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. - - If you're on Windows you may need to install https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170 +``` shell +# Please specify all of these values as double-quoted strings +# Replace string in angled brackets (<>) to your own ID +azure_model_map: + fast_llm_model_deployment_id: "" + ... +``` +Details can be found here: [https://pypi.org/project/openai/](https://pypi.org/project/openai/) in the `Microsoft Azure Endpoints` section and here: [learn.microsoft.com](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line) for the embedding model. +If you're on Windows you may need to install [msvc-170](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) 4. Follow the further instructions for running Auto-GPT with [Docker](#run-with-docker) (*recommended*), or [Docker-less](#run-docker-less) @@ -100,9 +101,9 @@ docker run -it --env OPENAI_API_KEY='your-key-here' -v $PWD/ai_settings.yaml:/ap ``` -### Run Docker-less +### Run without Docker -Simply run `./run.sh` (Linux/macOS) or `.\run.bat` (Windows) in your terminal. +Simply run `./run.sh` (Linux/macOS) or `.\run.bat` (Windows) in your terminal. This will install any necessary Python packages and launch Auto-GPT. ### Run with Dev Container diff --git a/docs/usage.md b/docs/usage.md index 3584c216..8a1bb63a 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -1,25 +1,33 @@ # Usage -1. Open a terminal and run the startup script: - * On Linux/MacOS: - ``` shell - ./run.sh - ``` - * On Windows: - ``` shell - .\run.bat - ``` - * Using Docker: - ``` shell - docker-compose run --rm auto-gpt - ``` - Running with `--help` lists all the possible command line arguments you can pass: - ``` shell - ./run.sh --help +Open a terminal and run the startup script: - # or with docker - docker-compose run --rm auto-gpt --help - ``` +- On Linux/MacOS: + +``` shell +./run.sh +``` + +- On Windows: + +``` shell +.\run.bat +``` + +- Using Docker: + +``` shell +docker-compose run --rm auto-gpt +``` + +Running with `--help` lists all the possible command line arguments you can pass: + +``` shell +./run.sh --help + +# or with docker +docker-compose run --rm auto-gpt --help +``` 2. After each response from Auto-GPT, choose from the options to authorize command(s), exit the program, or provide feedback to the AI. @@ -34,17 +42,17 @@ Here are some common arguments you can use when running Auto-GPT: > Replace anything in angled brackets (<>) to a value you want to specify * View all available command line arguments - ``` shell - python -m autogpt --help - ``` +``` shell +python -m autogpt --help +``` * Run Auto-GPT with a different AI Settings file - ``` shell - python -m autogpt --ai-settings - ``` +``` shell +python -m autogpt --ai-settings +``` * Specify a memory backend - ``` shell - python -m autogpt --use-memory - ``` +``` shell +python -m autogpt --use-memory +``` > **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python -m autogpt --help` for more information @@ -65,9 +73,9 @@ Use at your own risk. 1. Run the `autogpt` python module in your terminal: - ``` shell - python -m autogpt --continuous - ``` +``` shell +python -m autogpt --continuous +``` 2. To exit the program, press Ctrl + C From 265a23212e0994df93d711ac029636878f77b651 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 26 Apr 2023 12:40:37 +0200 Subject: [PATCH 150/222] Fix(docs) Contributing, CoC and License links (#3308) --- docs/LICENSE | 1 + docs/code-of-conduct.md | 1 + docs/contributing.md | 1 + mkdocs.yml | 10 +++++----- 4 files changed, 8 insertions(+), 5 deletions(-) create mode 120000 docs/LICENSE create mode 120000 docs/code-of-conduct.md create mode 120000 docs/contributing.md diff --git a/docs/LICENSE b/docs/LICENSE new file mode 120000 index 00000000..ea5b6064 --- /dev/null +++ b/docs/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/docs/code-of-conduct.md b/docs/code-of-conduct.md new file mode 120000 index 00000000..0400d574 --- /dev/null +++ b/docs/code-of-conduct.md @@ -0,0 +1 @@ +../CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/docs/contributing.md b/docs/contributing.md new file mode 120000 index 00000000..44fcc634 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index b8e529ea..0b743e91 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,8 +13,8 @@ nav: - Voice: configuration/voice.md - Image Generation: configuration/imagegen.md - - Code of Conduct: ../CODE_OF_CONDUCT.md - - Contributing: ../CONTRIBUTING.md - - License: ../LICENSE - -theme: readthedocs \ No newline at end of file + - Code of Conduct: code-of-conduct.md + - Contributing: contributing.md + - License: LICENSE + +theme: readthedocs From 749b1bbfc0eea3251a5e2fcd4796253bb36bdac4 Mon Sep 17 00:00:00 2001 From: apurvsibal <30145118+apurvsibal@users.noreply.github.com> Date: Wed, 26 Apr 2023 17:29:53 +0530 Subject: [PATCH 151/222] Fix(docs) requirements link in installation guide (#3264) --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index 806f587f..e9595358 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -17,7 +17,7 @@ You can also set limits on how much you spend on [the Usage limits page](https:/ ## General setup -1. Make sure you have one of the environments listed under [**requirements**](../README.md#-requirements) set up. +1. Make sure you have one of the environments listed under [**requirements**](https://github.com/Significant-Gravitas/Auto-GPT#-requirements) set up. _To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter. Make sure you have [Git](https://git-scm.com/downloads) installed for your O/S._ From 1e71f952f980b348629dd75477600e6b43e3f6a3 Mon Sep 17 00:00:00 2001 From: vlad Date: Wed, 26 Apr 2023 10:54:22 -0400 Subject: [PATCH 152/222] Codecov - don't fail pipelines for project cov changes (#3327) Co-authored-by: Nicholas Tindle --- autogpt/memory/base.py | 5 +++++ codecov.yml | 14 +++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/autogpt/memory/base.py b/autogpt/memory/base.py index 83d80750..a6c92db6 100644 --- a/autogpt/memory/base.py +++ b/autogpt/memory/base.py @@ -10,20 +10,25 @@ cfg = Config() class MemoryProviderSingleton(AbstractSingleton): @abc.abstractmethod def add(self, data): + """Adds to memory""" pass @abc.abstractmethod def get(self, data): + """Gets from memory""" pass @abc.abstractmethod def clear(self): + """Clears memory""" pass @abc.abstractmethod def get_relevant(self, data, num_relevant=5): + """Gets relevant memory for""" pass @abc.abstractmethod def get_stats(self): + """Get stats from memory""" pass diff --git a/codecov.yml b/codecov.yml index 61acfc08..15a83ad2 100644 --- a/codecov.yml +++ b/codecov.yml @@ -3,4 +3,16 @@ coverage: project: default: target: auto - threshold: 1% \ No newline at end of file + threshold: 1% + informational: true + patch: + default: + target: 80% + +## Please add this section once you've separated your coverage uploads for unit and integration tests +# +# flags: +# unit-tests: +# carryforward: true +# integration-tests: +# carryforward: true From 3ae6c1b03f2a22e364ed522828318dcd492bbf3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=9C=94=EF=B8=8FITtechtor?= <104917057+MarKiSS2000@users.noreply.github.com> Date: Wed, 26 Apr 2023 22:50:43 +0700 Subject: [PATCH 153/222] Update installation.md (#3325) --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index e9595358..034814d6 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -62,7 +62,7 @@ azure_model_map: ... ``` Details can be found here: [https://pypi.org/project/openai/](https://pypi.org/project/openai/) in the `Microsoft Azure Endpoints` section and here: [learn.microsoft.com](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line) for the embedding model. -If you're on Windows you may need to install [msvc-170](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) +If you're on Windows you may need to install [msvc-170](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). 4. Follow the further instructions for running Auto-GPT with [Docker](#run-with-docker) (*recommended*), or [Docker-less](#run-docker-less) From 4241fbbbf0333f6b28d46de766035302b4e16325 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 26 Apr 2023 09:11:31 -0700 Subject: [PATCH 154/222] mock openai in test image gen (#3285) --- tests/test_agent_manager.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/test_agent_manager.py b/tests/test_agent_manager.py index e02a6a59..d1bb3d8b 100644 --- a/tests/test_agent_manager.py +++ b/tests/test_agent_manager.py @@ -1,9 +1,7 @@ -from typing import List - import pytest from autogpt.agent.agent_manager import AgentManager -from tests.utils import requires_api_key +from autogpt.llm_utils import create_chat_completion @pytest.fixture @@ -29,7 +27,16 @@ def model(): return "gpt-3.5-turbo" -@requires_api_key("OPENAI_API_KEY") +@pytest.fixture(autouse=True) +def mock_create_chat_completion(mocker): + mock_create_chat_completion = mocker.patch( + "autogpt.agent.agent_manager.create_chat_completion", + wraps=create_chat_completion, + ) + mock_create_chat_completion.return_value = "irrelevant" + return mock_create_chat_completion + + def test_create_agent(agent_manager, task, prompt, model): key, agent_reply = agent_manager.create_agent(task, prompt, model) assert isinstance(key, int) @@ -37,7 +44,6 @@ def test_create_agent(agent_manager, task, prompt, model): assert key in agent_manager.agents -@requires_api_key("OPENAI_API_KEY") def test_message_agent(agent_manager, task, prompt, model): key, _ = agent_manager.create_agent(task, prompt, model) user_message = "Please translate 'Good morning' to French." @@ -45,7 +51,6 @@ def test_message_agent(agent_manager, task, prompt, model): assert isinstance(agent_reply, str) -@requires_api_key("OPENAI_API_KEY") def test_list_agents(agent_manager, task, prompt, model): key, _ = agent_manager.create_agent(task, prompt, model) agents_list = agent_manager.list_agents() @@ -53,7 +58,6 @@ def test_list_agents(agent_manager, task, prompt, model): assert (key, task) in agents_list -@requires_api_key("OPENAI_API_KEY") def test_delete_agent(agent_manager, task, prompt, model): key, _ = agent_manager.create_agent(task, prompt, model) success = agent_manager.delete_agent(key) From 0ff471a49a04904880d763517682059499b13af1 Mon Sep 17 00:00:00 2001 From: James Collins Date: Wed, 26 Apr 2023 09:37:49 -0700 Subject: [PATCH 155/222] Have api manager use singleton pattern (#3269) Co-authored-by: Nicholas Tindle --- autogpt/api_manager.py | 25 ++++++++++--------------- autogpt/chat.py | 3 ++- autogpt/llm_utils.py | 4 +++- autogpt/memory/base.py | 3 --- autogpt/prompts/prompt.py | 3 ++- tests/conftest.py | 8 +++----- tests/unit/test_commands.py | 11 ++++++----- 7 files changed, 26 insertions(+), 31 deletions(-) diff --git a/autogpt/api_manager.py b/autogpt/api_manager.py index ace64fbd..52b94ff7 100644 --- a/autogpt/api_manager.py +++ b/autogpt/api_manager.py @@ -1,22 +1,19 @@ -from typing import List +from __future__ import annotations import openai from autogpt.config import Config from autogpt.logs import logger from autogpt.modelsinfo import COSTS - -cfg = Config() -print_total_cost = cfg.debug_mode +from autogpt.singleton import Singleton -class ApiManager: - def __init__(self, debug=False): +class ApiManager(metaclass=Singleton): + def __init__(self): self.total_prompt_tokens = 0 self.total_completion_tokens = 0 self.total_cost = 0 self.total_budget = 0 - self.debug = debug def reset(self): self.total_prompt_tokens = 0 @@ -28,7 +25,7 @@ class ApiManager: self, messages: list, # type: ignore model: str | None = None, - temperature: float = cfg.temperature, + temperature: float = None, max_tokens: int | None = None, deployment_id=None, ) -> str: @@ -42,6 +39,9 @@ class ApiManager: Returns: str: The AI's response. """ + cfg = Config() + if temperature is None: + temperature = cfg.temperature if deployment_id is not None: response = openai.ChatCompletion.create( deployment_id=deployment_id, @@ -59,8 +59,7 @@ class ApiManager: max_tokens=max_tokens, api_key=cfg.openai_api_key, ) - if self.debug: - logger.debug(f"Response: {response}") + logger.debug(f"Response: {response}") prompt_tokens = response.usage.prompt_tokens completion_tokens = response.usage.completion_tokens self.update_cost(prompt_tokens, completion_tokens, model) @@ -81,8 +80,7 @@ class ApiManager: prompt_tokens * COSTS[model]["prompt"] + completion_tokens * COSTS[model]["completion"] ) / 1000 - if print_total_cost: - print(f"Total running cost: ${self.total_cost:.3f}") + logger.debug(f"Total running cost: ${self.total_cost:.3f}") def set_total_budget(self, total_budget): """ @@ -128,6 +126,3 @@ class ApiManager: float: The total budget for API calls. """ return self.total_budget - - -api_manager = ApiManager(cfg.debug_mode) diff --git a/autogpt/chat.py b/autogpt/chat.py index 21eab6a0..4b906a00 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -3,7 +3,7 @@ import time from openai.error import RateLimitError from autogpt import token_counter -from autogpt.api_manager import api_manager +from autogpt.api_manager import ApiManager from autogpt.config import Config from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger @@ -134,6 +134,7 @@ def chat_with_ai( # Move to the next most recent message in the full message history next_message_to_add_index -= 1 + api_manager = ApiManager() # inform the AI about its remaining budget (if it has one) if api_manager.get_total_budget() > 0.0: remaining_budget = ( diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 212928ca..a98b12a3 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -8,7 +8,7 @@ import openai from colorama import Fore, Style from openai.error import APIError, RateLimitError, Timeout -from autogpt.api_manager import api_manager +from autogpt.api_manager import ApiManager from autogpt.config import Config from autogpt.logs import logger from autogpt.types.openai import Message @@ -147,6 +147,7 @@ def create_chat_completion( ) if message is not None: return message + api_manager = ApiManager() response = None for attempt in range(num_retries): backoff = 2 ** (attempt + 2) @@ -228,6 +229,7 @@ def get_ada_embedding(text: str) -> List[float]: kwargs = {"model": model} embedding = create_embedding(text, **kwargs) + api_manager = ApiManager() api_manager.update_cost( prompt_tokens=embedding.usage.prompt_tokens, completion_tokens=0, diff --git a/autogpt/memory/base.py b/autogpt/memory/base.py index a6c92db6..d99cadde 100644 --- a/autogpt/memory/base.py +++ b/autogpt/memory/base.py @@ -1,11 +1,8 @@ """Base class for memory providers.""" import abc -from autogpt.config import Config from autogpt.singleton import AbstractSingleton -cfg = Config() - class MemoryProviderSingleton(AbstractSingleton): @abc.abstractmethod diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index cc06fabb..68d8a336 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -1,6 +1,6 @@ from colorama import Fore -from autogpt.api_manager import api_manager +from autogpt.api_manager import ApiManager from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config from autogpt.logs import logger @@ -115,6 +115,7 @@ Continue (y/n): """ config.save(CFG.ai_settings_file) # set the total api budget + api_manager = ApiManager() api_manager.set_total_budget(config.api_budget) # Agent Created, print message diff --git a/tests/conftest.py b/tests/conftest.py index 9ab49189..745f8b5d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,7 +3,6 @@ from pathlib import Path import pytest from autogpt.api_manager import ApiManager -from autogpt.api_manager import api_manager as api_manager_ from autogpt.config import Config from autogpt.workspace import Workspace @@ -32,7 +31,6 @@ def config(workspace: Workspace) -> Config: @pytest.fixture() def api_manager() -> ApiManager: - old_attrs = api_manager_.__dict__.copy() - api_manager_.reset() - yield api_manager_ - api_manager_.__dict__.update(old_attrs) + if ApiManager in ApiManager._instances: + del ApiManager._instances[ApiManager] + return ApiManager() diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index a749cc6d..8596e1c9 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -3,8 +3,7 @@ from unittest.mock import MagicMock, patch import pytest -import autogpt.agent.agent_manager as agent_manager -from autogpt.app import execute_command, list_agents, start_agent +from autogpt.app import list_agents, start_agent from tests.utils import requires_api_key @@ -14,9 +13,11 @@ def test_make_agent() -> None: """Test that an agent can be created""" # Use the mock agent manager to avoid creating a real agent with patch("openai.ChatCompletion.create") as mock: - obj = MagicMock() - obj.response.choices[0].messages[0].content = "Test message" - mock.return_value = obj + response = MagicMock() + response.choices[0].messages[0].content = "Test message" + response.usage.prompt_tokens = 1 + response.usage.completion_tokens = 1 + mock.return_value = response start_agent("Test Agent", "chat", "Hello, how are you?", "gpt-3.5-turbo") agents = list_agents() assert "List of agents:\n0: chat" == agents From a6355a6bc801790a5766e4655ffc08f1db1ebec8 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 26 Apr 2023 09:57:05 -0700 Subject: [PATCH 156/222] use pytest-recording with VCR (#3283) --- requirements.txt | 2 +- tests/conftest.py | 1 + tests/integration/conftest.py | 17 + .../test_write_file/test_write_file.yaml | 243 +++++++ .../cassettes/write_file.vcr.yml | 616 ------------------ .../goal_oriented/test_write_file.py | 33 +- .../vcr_helper.py => vcr/openai_filter.py} | 16 +- 7 files changed, 278 insertions(+), 650 deletions(-) create mode 100644 tests/integration/conftest.py create mode 100644 tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml delete mode 100644 tests/integration/goal_oriented/cassettes/write_file.vcr.yml rename tests/{integration/goal_oriented/vcr_helper.py => vcr/openai_filter.py} (100%) diff --git a/requirements.txt b/requirements.txt index 4932ecc3..b7df2636 100644 --- a/requirements.txt +++ b/requirements.txt @@ -50,4 +50,4 @@ pytest-cov pytest-integration pytest-mock vcrpy -pytest-vcr +pytest-recording diff --git a/tests/conftest.py b/tests/conftest.py index 745f8b5d..20968706 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,6 +5,7 @@ import pytest from autogpt.api_manager import ApiManager from autogpt.config import Config from autogpt.workspace import Workspace +from tests.vcr.openai_filter import before_record_request @pytest.fixture() diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 00000000..3c2e02b3 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,17 @@ +import pytest + +from tests.vcr.openai_filter import before_record_request + + +@pytest.fixture +def vcr_config(): + # this fixture is called by the pytest-recording vcr decorator. + return { + "record_mode": "new_episodes", + "before_record_request": before_record_request, + "filter_headers": [ + "authorization", + "X-OpenAI-Client-User-Agent", + "User-Agent", + ], + } diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml new file mode 100644 index 00000000..1a5bba5c --- /dev/null +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -0,0 +1,243 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search + Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete + GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: + \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", + \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. + message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \"reason\": + \"\"\n13. Task Complete (Shutdown): \"task_complete\", args: \"reason\": + \"\"\n\nResources:\n1. Internet access for searches and information + gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered Agents for delegation + of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. Continuously + review and analyze your actions to ensure you are performing to the best of + your abilities.\n2. Constructively self-criticize your big-picture behavior + constantly.\n3. Reflect on past decisions and strategies to refine your approach.\n4. + Every command has a cost, so be smart and efficient. Aim to complete tasks in + the least number of steps.\n5. Write all code to a file.\n\nYou should only + respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 3205}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3554' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7yTQW/aQBCF7/0Vo7lwMQgIKeBbemmjtlIPtFFUV2hZD/aW9Y6zOy5IyP+9MjYh + ccWpVa/zxu99zx4f0aQYo86V6KK0w/lSr+6eZl8W04P6fNh8evdkH2x59/jx/exbhhHy5idp6Z4Y + aS5KS2LYYYTakxJKMZ68XUxvp/PJYhZhwSlZjDErZXgzuh1K5Tc8HN+MJxhhFVRGGB+x9FyUshbe + kQsYz5fTCC/ez/PJch6hsCj7PFoulnWEOmejKWD8/YgFhbOtZ0sYowrBBFFOGkh2Qq4pcEwcAECC + knOV5RISjKEbdgIdpBkmeA97Yy1UgUBygr03Qmvh9dZYAs1FoVwKwq0Agw9kLcMDe5sOwDhhUHBa + daqgFAZ5o6/3jT6SgwxGCUYvgz2pwM64rE1f5QQZKwsm/E0IrK6iN8Y5QcFBQJWl59IbJRd9yx4k + b7ZU2PVpS6tcCzqEr//pDWlvxGgTivP3cUQnf3Kh8g2DEriHXP1qeTR7T1pA+awqyEnoKl0h7ceF + ktTu+ik0wf+uaBtcR+f77Ez/OM/GoWV61aGHrnzWv+xWaHYvFj2OVyb9v+FFpzPtibgDT1yNdYRb + 40zI1+0tY4xBuMQIjUvpgPG4/lG/+Q0AAP//AwCiz6SNfQQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be04376c97296c3-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 16:40:03 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '18864' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 77d98331ebd579dd929c8de10a83a035 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search + Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete + GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: + \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", + \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. + message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \"reason\": + \"\"\n13. Task Complete (Shutdown): \"task_complete\", args: \"reason\": + \"\"\n\nResources:\n1. Internet access for searches and information + gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered Agents for delegation + of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. Continuously + review and analyze your actions to ensure you are performing to the best of + your abilities.\n2. Constructively self-criticize your big-picture behavior + constantly.\n3. Reflect on past decisions and strategies to refine your approach.\n4. + Every command has a cost, so be smart and efficient. Aim to complete tasks in + the least number of steps.\n5. Write all code to a file.\n\nYou should only + respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\nNone\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write + ''Hello World'' into a file named ''hello_world.txt''. The write_to_file command + is the most appropriate command for this task.\",\n \"plan\": \"- Use + the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\": + \"I need to ensure that I have the correct arguments for the write_to_file command.\",\n \"speak\": + \"I will use the write to file command to write ''Hello World'' into a file + named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": + \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 2955}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4667' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6SS3W7aQBCF7/sUo7nhxiDApaR+gsBNG4n0R3WENuvB3rLesXbHgQr53SvHJlAS + pEi9nTNz5pvdc0CTYYK6UKLLyg7nn/Xq29OCfqrFF728i5c/vtq7eJHH98vdLUbIj79JSz8x0lxW + lsSwwwi1JyWUYTL5dDOdTefT8SzCkjOymGBeyTAezYZS+0cejuPxBCOsg8oJkwNWnstK1sJbcgGT + yTiOIzyZn4TZxwiFRdlTaXIzbyLUBRtNAZNfBywpHI09W8IEVQgmiHLSYrITcu0Jh9QBAKQoBdd5 + ISHFBPpiL9Be2mKKC9gZa6EOBFIQiArbdQ9IoLkslctAGF5qx65RitG5pScV2BmXd76rgiBnZcGE + N8eBN7DzRozLYXBL1jJ8Z2+zARgnDAo2xhI4VVIGg6LV17tWH8leBiNYXWVt9xUEJQcBVVWeK2/U + mb5hD1K0XW8cUVnlOv4h3P/3k+j2Pm1CeXxqR/Q8SS7Uvp1TAgso1FPnodl70gLK53VJTkIPe4Xh + cl2oSG2v/iq894TOsomOIerbX2Wo/Ztu2z90F1DK55fxO89LH5ZzvgxCrTWFsKmt/fMC9AzVs6Wu + wSbCjXEmFOvOCRMMwhVGaFxGe0zGzUPz4S8AAAD//wMARSTZRwcEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be043fc3bba96c3-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 16:40:20 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '15127' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 468c3c77efd74e3a2d396ef6033a5602 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/goal_oriented/cassettes/write_file.vcr.yml b/tests/integration/goal_oriented/cassettes/write_file.vcr.yml deleted file mode 100644 index ec31a7d6..00000000 --- a/tests/integration/goal_oriented/cassettes/write_file.vcr.yml +++ /dev/null @@ -1,616 +0,0 @@ -interactions: -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are write_to_file-GPT, an AI designed to use the write_to_file command to write - ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete - command to complete the task.\nYour decisions must always be made independently - without seeking user assistance. Play to your strengths as an LLM and pursue - simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file - command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. - Use the task_complete command to complete the task.\n3. Do not use any other - commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your - short term memory is short, so immediately save important information to files.\n2. - If you are unsure how you previously did something or want to recall past events, - thinking about similar events will help you remember.\n3. No user assistance\n4. - Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. - append_to_file: Append to file, args: \"filename\": \"\", \"text\": - \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. - read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search - Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, - args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete - GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: - \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", - \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. - message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. - start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \n13. Task Complete - (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can beparsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past:\n\n\n"}, - {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0.0, "max_tokens": 3210}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3531' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA7yTT4/TMBDF73yK0Vx6cas21bbZ3Lgg9oIEAi2CoMpNpomp4wn2RC2q8t1R/nS7 - G8QJaa/zZt772R5f0OSYYFZqyarazrfx22/H08auyG0fo3fm6zr6dfcpjxv6cPiICnn/kzIZJxYZ - V7UlMexQYeZJC+WYrDZxtIpXm3WksOKcLCZY1DJfL+7m0vg9z5fr5QoVNkEXhMkFa89VLTvhI7mA - yTbeKrx5P9WjZaRQWLR9Kt3H963CrGSTUcDk+wUrCldbz5YwQR2CCaKddJDshFx3gEvqAABSlJKb - opSQYgJjcRToLF0xxQc4GWuhCQRSEpy8EdoJ7w7GEmRcVdrlIDwIMHtP1jI8srf5DIwTBg19q9MV - 5TArO3136vSFnGW2SFE9D/akAzvjiiH9c0lQsLZgwn+EKOgh/4nfmZcEFQcBXdeea2+03PQDe5Cy - 69LhOCWurXYD7By+vNItZd6IyUyorm/kiHp/cqHxHYMWeABdQROMK3qmjL2nTG4546Vo6eW+bALs - qRvoOIXcdcj+nhKEmvTxtTZkCG7VdW1H07+2tnMYmF5ATNC1L6YLPwhd781iwvHCZPpJnp3pStsT - j+Cpa7FVeDDOhHI3rDgmGIRrVGhcTmdMlu2P9s0fAAAA//8DAKiDWQmUBAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bbf4f1f88c715ca-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 22 Apr 2023 16:40:46 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '14187' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-reset-requests: - - 17ms - x-request-id: - - dd915979acb69f7fba7d79dc709e4440 - status: - code: 200 - message: OK -- request: - body: '{"input": ["Assistant Reply: {\n \"thoughts\": {\n \"text\": - \"I will use the write_to_file command to write ''Hello World'' into a file - named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write - ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file - command is the most appropriate command for this task.\",\n \"plan\": - \"- Use the write_to_file command to write ''Hello World'' into a file named - ''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I am - using the correct file name and that the file is being written correctly.\",\n \"speak\": - \"I will use the write_to_file command to write ''Hello World'' into a file - named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": - \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": - \"Hello World\"\n }\n }\n} \nResult: Command write_to_file returned: - File written to successfully. \nHuman Feedback: "], "model": "text-embedding-ada-002", - "encoding_format": "base64"}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '1071' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1R6WQ+6TLPn/fspnjy3zhtW6eK5Y5NdWgUVJ5MJKC4gsnYDfXK++0T/J2dybkyA - DqabqvotVf/xr7/++rvJy+I6/v3PX3+/X8P49//63rtlY/b3P3/973/99ddff/3H7/d/rCzqvLjd - Xp/Hb/nv4etzK+a///mL/+87/3/RP3/9fciYTRYqV4yVY1SjUT1e6fYA557e/M8AY3Z/0W3Kezmf - nNe8Au1Rot4Oa+WwuT8AqkqgdPtOxn6hp1uBRmSKpLoYSjzJU7dbv2lEsQnGHs2pJe/QEqgrvLnD - sx81xl2Ro+ZbIo1ulS/+zpvAGBuTXvR2iknMzzJcL8oem9aT5ONe+qyQIXsR3VApzNnILgQeXnal - 7rSN89naRAD1m99SfdBe8cJv2gQ29LPC2iO95sNTEm31u19shh2L59Z9LOrz7u/o1k3eQb8f+AbK - WhtxZhEuX/bLo1H19hRGS62xuM/bdYr686nF+i1bgu77fpTy6z0O0/M2WGxjJ6NZiM5YK4ZXPDXZ - 0YBYIDlR1VTNi8u+1dZpltrYPKp9Xr7C9ISMVpOw1toKI0FYhurV9km0aNIbTcomTWB4iDrWHuqR - LS7KCarifKHOZ3cox9cVXeGsKRR7Kzvsh+vYL8rtltzxpYmnnGFV89Vtu+siEJ9NyTaaeQV+yvrf - eccT5yk8+PL7iQ0a9zETRCIj3mNbap+eQ9zobjso5i0hNHusakTfOnTg2GzC9sXjzPa0TmuQ6rzA - +PL49AsqZlF9PYmII+7Uo3kO1BU4/Emj4fvaoYHYfgKXOOBoiCW3F6bnvEParYwirtYCJm28Cw/5 - bdtRcxi0crm+piuEqW/QTf42cnG6bg5KLR18irtzYIroWF6hTJMYe+V7X3Yv0Fao0Jsz3gV0QVNe - vCu4APQU+8cb+hNf7V728Zl3TvlQ5V0CphO/aXj0H2zitOoKnankEXqIbUmO2qVGlSLL2FAOt3x2 - TkW6tvjXg3BJRRClqzlRgwGlWDP1JGays0pBuG1zujX1ATXuJKdwzKo6miNLQ8LzHDQomZjyPe8g - lmYJQhj79T1678cdEtDovIBMrMfBWjwEorQaXBB0sYmWLZQlMcU5hHo6ptGcbd9smt24hnd4Eqh5 - CN7l4vkXQLEw5DhMwe4HTqsKlYuaEIfnnA+mQ75LQU3qJ9aeLxxPUrdcVSKRGw4MxQ3mi74nXJi6 - Bg3O4RK8hSGoIT6IT2xyt9ScnYb5cD9abxzpFPrhvt9pan2736hm1ls2dxs/BGSzA41YrgdTzL2j - 3/po5k4sJ/NTtYHaIo+dZn4HkxFdHtAckErt+aMhejztyFpYMqBuqznBIu7dlZIk/IK94KkzsZ7H - g3Le6XbEm7wUz8LqcQIpUZ/4WBIn7yK4d7A52jyN4uBSNmm8WcHhnh2oO3thMKvyxwdbOLOI8zQz - np7x86DYjrPB2nb3zllCtwoSL7JB8WZVlMxS4hqOn8uZ8EmfB4uiFuJ/5X8yxKXg3IflT71Tqmno - CbmAhsx7k2AfB37OLscgReujfaIW70aM1/eyDWVJnWg98q9+OQdzpGpRJkXSRxjMsRRcA4pWQtTu - KTbHM+emYN30C/ayw7ofoyw9wC9+vFknbJqy6QSxrOlUC+gBsb7oBzDVtYE1bXoG85LxPoTlQ6Ru - 6pX95ItNCCei8ThSMhawiOgr2EsoxNu6eJTLruyuiHfuH2qM1YoN3ZvvQOvOHHV4R4wX2iwhBFMH - 2DtJcbBcnW2CjINK6AbIZDKF6q7yPR/q8/05n+6u3oF+w2vCb5QybxnTCOC9fKGXUpPNxbdRijTB - OGK3ybf9JDRtCM2tOhBpk8xxfQsFH+1zK8c55jaMzbrRwNHAEvV9PPdjs+4e8Gl3OVmvejvolNcu - VRciyNQTX89gaUPTB/XOJdh5reOcTNKUqp4QxOSXb6J/SX2lPOz9aJKUTUA/22QHDp9o9LCFsp9W - ClS/eKURXo0xOQfrUPEGe0MNQmbEQHlM6om7znSLBjPnj7e2Qyudq+kmeljl3KxfL/Ux7E54c7yL - 5byMqQKobcPovT3N5TBzGxfMe5dEzDkjczpbeEFGn9zo8RE/zWkWxAXtjsnnVw/7xkD8ABuzkyLF - uAWIfz8zH33rD1k0aYMY4uUVGHIQYXsbJfl036cGWsWX8r/ip5muLmyPpYPDWq/yqajrBu5m15B2 - WyFzFq00Qt9r+l1fUr/WZLVqrygK0e0ZM+njWrCuXzHGvnKOx/pwn5StMnzw5hyVwdyKwQraIU+o - 4/W7ctSYVIB6OCBsH+GA2LJzReVbD+g5+nj5fC/cCeybG5G1+NLN6VhPBixS1GBrqMp8QoHro23N - WeTNv3uTvoNAUyB82oQj3sRmvs4nZJHbCW9vk2EKS2U8IH6uZBxMZWay2b9oIAqSj7XsWAZLe9w/ - VFAYH3HpiS+XCM4d9Hr8wMF7ecWkPe5fsNhdQuaiepuTO8mZ+s3HSAkmk/EZ/qQg3J86xp5W5oOt - 0BRt5YseqWu9MJtM2RH1yzciJRWeaFBAl5XVcfSoqTy5uNUNdYDq2vi44E4Bmz7JO4Sx/awjaA5n - NsPm6qPrgbtTa0JGP3vPlYEUx/6Q1j8TNP/w9psf1En3DzZZu9cCwSGNsK09joh+4xEumTXj9Pp4 - sflw8QEOQUejz2bdIaKRyQXQ3ZgG+nQ0mSH7J6SWRKa62PBs7vWyg1DaXXChfu750PfDFYkXxaD4 - 6io5aY+XFxpnaPGB133zx78Qq1GMQw47wXw3jh1a2keEtT7TkJihdwcoewj4kuj1l6/tB9ic04xe - 0U2PF3fZnGB/GAe8CefUpKvqXcCS2ntCY/MaT1yshwDMreiByhVip62foI7kb9IGzycahZcQgZOV - MY2k5RH/8A+MKu9xaJKypD++80rkDU20lV8yuGxE9dqfImoGJIuZnaYGOG3fRUzvn/lczc8CSfp2 - FT064ROzxV4KCMuXGPEvpUfVEpkp7EJOouGzewfUW4k2fOMR++e1yxp5LTfom8/UE1Jikpm/8LCh - 7xU1qGwhMTjOFVqlBsXOl3/RL9+CcnrK1ItIZPKleZtg1NMjNnXbQWx7Nxe42MeIdNePU37zwQUz - 110aXO9+PB2TfAfBsnphu0lcxq+5poA5jGOCHvud2dXNSYHSqiZ6HZkfTPJa7pB4rq4E8AEH85S+ - FGWKlIhqrNywebVjC3jp6UX/1AtYpBNqO4fQYAgzNLjTlKmm9wlouGN6L9gXHsB09m/q1P3FJLKV - GKoUyHEkq1JXfvlJpdyVw4a6UnbP2X7gO1jaV/TN11fAnGQuVGB+hc1jumcT5y08XG2X/PhkSfXl - 8EC912B6+eLd8uV3v/zEDptENn3rLXz6Wsf6c97nvRXKGaAgK7CtBmE8B9ySgVKHKyLxA0aTV+Qd - snDCUW/kjXLO3vUDbqrMsMkXdfzFn+gXP1SvR98U+NsmUypdFSLFx/ueJfJ1B03D2djc14M5EdtI - 1MgpUzKF9iFn6XM6ATyHDz2BfokX1+p8ZMkHE/slzszpuplS2BwtnmLTnfs5Np0OUuukElQ6gMa6 - gpdsT5pH7wI6ffmvXECpCVuq+W+ODc5TiWAtrXa/eAmW6+VlQyV/DtT9nveMDp6N3m1QU+cUJGj8 - MGUAP4iu2OvJn/ddoYGhjV4f387F230vQ9PvBmzt1sec17l8hyo1KrDpnFEwdPIlBSfIABunjdsT - Idj7MATdA2/FYDbJ8hkUMLvgTuCD5HxWA3OFnm2bU8/xX+XMtVoCe+FT46i3dNZyli1C9tyVeKdN - ekDaMPBRRy5vGt7iN1o8SfXhU/U4atc+Q+2nq07AZaZGN1vUBxPQBFSiO1okSnISEGw4Lzi1lwZr - neDk43OrHtCkCCeSwiiV9JFzPuzqqYrWafruJ6SvJ7gf7Te2fVVjgqe2LjRUw0Scbls2P9smg+sx - 3mBvugr5lET9Fbn1aU+Dh9j20/7Y1Ci/4Y4Gi6SZkvJeFmhC90gjK3uZ87J/L8hvbz62jju+H/HD - tdGSWnvqRAZF85F9+cKh0fFtufTxUhwqUO8N7/7wOJ73EgU4CpuZRgo39ENwPj1AfV4O2Hixjzk4 - 2vkAXBucqUNXszlJnXJVKu5lUetiZDGJIriCOUVt9Mr1Zz6P94emWo9JpUlRDuVCF7cG1j/u9Eqo - wWjzfoggHrgd3QQCZ85KeNlBRI0Cu7M3/PicDzlpP1/8XtD4bnIbJDJt8S20OzR/+QRy2raLyi0e - UHVySAL87TGR5RVt+1EYggpWdrVg97z1S3FXdgUU2Vhho91y5TNkmwKln1Aiwmapg8m+GimarlXz - 5f+fYPKClYJqflXhQGzfiKK+qxCZ5p7qO2XL3uig20Dqw4NIaJ2zWZxRCltV21L/Fj6DiQaKixpP - pBE8N0H5OSCFh0dAjAh2TC8F951UqrKGJ5HWjdvPNy9bIHNuInbuRYgkDhYDxFa74GA8bdE0atkD - 6JELqLZaSwGL8ncEiwIBtg/+aC6vrDbgpioMB4/9ZDZekTfo6PltdJeuLhpCWtrgnTSgQVu5wcRi - FCranefxHlVj/4fPj+17Tfi15ZRtn7gZSLnWffmgbnbKK03hIuprbK2tT7lkaOwAlJmnZri5oJ5j - fQd5MI9YS7EUL178OKnTWb1GL9Z1JXMpJ8P5Nt4J2NXU00sgGPDzU2w1GPL5qiQvldcGjdq++mCM - Ci9Qy6mUqS57Tj6rSXaCaFkMarDNPlhOVZ0Buvs6DdWHUk4xN4aQ38sd9RrZQ5OBYICFRwbdaoFm - jknV1fD1e6LqfHrkS6XLPApDkxLxzlf5rMrUB/vmR9jdKktJxL22UqJnsqFB4vbmON4b7ccXqWlq - OZoC5jUQRE1H1kbkIcIJs41WlfQg6GKZbEALeoBA2pSsv3g8nO+K8sNPHH733+ftnMJXL9PixlXB - 3CG2KC/dfxEl0qV4OcGygvlGd9gtZtEcDms1ResncgiThU2/HKXAhSssATWS6hQPD6u0UJtxZxpG - 47GfvECUIa4OBf7WbzTHhbLASv6YRPzq3cVIQYSrKbk/vtK/3+FnhSr5faAGX5v9LPmuAWg7WPhi - idDPBTbqX72j1iH7lNPP77HMVUg38nnHmmnfAprs5x0fo4fVi9KA09/3xa+9R9GIRucB9XRO8Xa5 - 9Dm5XjoL0vie4KhZnQK2bi8nlH4iidSDTfrFi5sEPsqQYi20l3hsuSxBXT0dqSOIBhKUNRRgHDhC - rbXl9MKat5o/+Od9/adl61kdLFLY4BPPjyYtLqMCjcAkajm1an7r8YLKRSqoeXpZaNH7SwXHqxx+ - 9WeB2Ncfg59+CgfrVi7pOkmhvjOTWsddUs4vuovUH576X39qCh6VDPxC9ji6CnMw0gOyIUnEhboG - 0krRmL0M4gP/xD/+K3h9mMLjcOuwNvPrePz5k5HHr8jJwEa8uFv5gVIFsm++NiaTDLuGWQjP+EaM - ISCNHZ/AlZaIrOe6yQkTews8LwrI8/Ep8rkxphV8/UCyOr9k1Dkr5QAa4QLq5M62FJSptoDF3oTN - hKWx5GbPAzqQVxtNhdOw+Ycfd2zmv++L+FE7vNDXP4ykezGwgfMWEXz7s6M/fF4O8WkFw826UWs6 - tSYd8q2/5lrvjJ2PEAZk9AURVvLbpJssp/nS2HGiSgn3JNHWfZRLojyHn94n9fx5oCVR2gFOxOCj - VWAk5Sjb7RW8rHG/+Z6VJcxZqgB30Yjk2eegvdxFgtq0nSP1nWx79voUk/KqjOuP75kj9Qbyi//o - tWqv/aIR2Qe3TvY0Rv0hXx7TM0EON7Dffspl53An5HlhgK2V9sinU3m5Iv65SzD+6jca6DhbR6Ff - UZ0tHzYRQDLaxbqO3bxqEbHXxhXdosj96lUpZ31RDsi33zvs2e82WGa1iRDNqjPdXaa2n/qLM4Ci - 60UkP6RX0K9s7arOq30Vseb4zFm9Kgzl5/ewhKW5YF8A0NCuI2wGRInbMh5t+GSuRa9XEuYyF3sh - fPERB1WDf/XNhT08UhzFwbpvzcdiwVePYOdq2iX5kGhAX75HYNWN/bz3w2Ud716Iuu1bK6cNz1XK - xBUviletYLKrxclAwjvDRriVgkkXQxltLkSLutwWTHq9WQVKpGEg8ufOlzPVTglC6RbT28vR488t - Fi3YVcUqmlfzmo2THbmQNNWRWitNi2fdEAicF6RRe9w9S6aV1wQ8qfjiT6ub0+aU1aA9dws99M93 - P512MMBeWodYnwcwR/14KNC3vkTzpQnzRXh8FtBMw6JRWH7Mb3wsqJSuXYT0STDnFLXiT88RwnLd - nAgbX0DL9k14mu3MqaOmBt/6TO1PShHrZvcKJz20qPPFf2J+kAHFEDyp3w7vmHpnn6DumKTYcupb - MJzvi6Iuy+eKNa7Y9VMdqSn6+q9fvRaZQvPZLBBa2Z1iq7WCZrFUDcFNNWnYj0VAGjWU0defJuSx - k0pmhXIKSjLrdKOJY04HcdoBO95ORLo2WvDH7/76E3SzWWyT7B+mDNYx87CbcVPc7dvsgN75PcTa - OV7FrEuRAjapAuysKj0WLDodfn4GdfiBspm1SwK7eqlwhKo6WLbih4fmVh+IdtXVYAnNqwgF/9pH - PKfw8R9/7SUUFzJrXGnSWH8R1W/vfjR9453f8FL9px5tvKpn86XdWKi0lRPVr0LaL+WyVOrz7u6w - vzZO5ZJOkaHkxzqj2ntNShpVrEJ6vDexlfQomI0q7WD38lJqff0+Fkm7RE10B2NT3g0B4/PmiuRW - GKgj7NuYilAMcLinB/rzX9hxFVpwmqeUCM8mLyeJd0X45hs24dPnU1VNBE12eY+WTfQOFnnmIpSl - a4adL38TT7aXKeHtNvz8mXgkSl38+gfRXI9+wKpNHsGqLQZSPwITTSenPqE0y2zC8fqrp18/BM61 - hQmH92I5+eOFV0giljjccJLZfvtPP7+VKM6l6idS0RP6+ZF2HQGblFJNZFcoGhy+uHvPFjKKYAhC - gz0lqdm0skwDjshVcCEtj/w97FoeSFPZ+BtvJru9+AM4j/pGdTfEMfVWKxv2wrum+HbY5uzyaXyk - 64JNf/0m8kxWIQrtYYMDnA0x9a0REDveT9Qt3WM+8a91Crp9PZCpcFw2uWkpAuh+jI1TZwRMoZ4L - V9nl6Kba1Ghqspvx4490+3KeOT8U3AP9+k/OWdFzsbYMCz4ucSLh68d9+3sHFAg7N3q73CGfDff9 - +tM/0/BT7udNsvVBIsuWbh31XXYXzgphTd+PSJrvZjnPgbD60x94v7kMzavMcNUjSTzsVq3DvvHU - gFXzNtXctW3y69Ek8NNTx1PrlUuTyjYoJUrpNuytWHg3saUUKJaxRTmv/O0PRbf6Sc3P6skWacAZ - fOOfmrr9+VMv0EdOF+xskn3+1V8rCHy9+OGDyS6BqoEinI+ktx0+JsdiVYOdiQ3VlNeZLev4EEFX - XSkZrt6BjQGnpLDT3xZZ7ZQ6nsfTpQN7Mjwcfa+Hn9/61cOEhOUnmNdPqiDEDUfqn6jww1sCPz8O - X90sXqzxxcNP//jOpSpZ3BY2eGnywoGsrPOB3fQKoO9veBu903zcJo9CvSrFG9s9pea8csYU3r6O - //RXJKy6PhKijYWNX/44Qq9AwpwH9Y5IiefnVt2BPjl8NGmB0//8/HV/1E0cRqNQsvlOG3RUi0dU - 1KPRDwHzOji2RKUbbR+jCZPbAj+9YJf6rhzxarTAUS/bSOo/e/bHP/727776OOkl17UesNYThi1/ - /+rnV62L6mHA5q9/VdIff5AOLcKXb/9nHi5TqL4O1R1vyWdTCoOTF/ARN2EkHCQTLYviNVD10xP7 - 8avtWSkmK6jLx4c8v3pxSj2phpYMSgQcr/XD189VN+csw4E9jGhC7dtXvn4d6YvpVrKTyUBdtdcB - 2xGt+4WHg6Lyd/6Et/FzDuabKAAEymehP3089fzCQ0ZWQMMpv7Lp3TUvyDSCsZ898775PWfX1MVu - lols/umh7/uJpKVpOfuKUUEpBBaNIl7Jf/4QEu6lTlbBhwSkW9cn+PSV/kd/jddwPv3whmql+Swn - 4WQVUOmcEC1fPGZPNbNhXuSZJnshCtjTbh6QL5eAbo8Xg82taK7Uv39TAf/5r7/++j+/CYO6uRXv - 72DAWMzjv/97VODf2S37N8+L/6bin0kEMmSP4u9//msI4e+2b+p2/L9jUxWf4e9//hJF+c+8wd9j - M2bv//HgX9//+89//T8AAAD//wMAAQU0SOUgAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bbf4f924e7a15ca-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 22 Apr 2023 16:40:50 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '96' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - de62efcb5fa7139d5deab0536eca1086 - status: - code: 200 - message: OK -- request: - body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command - to use, and respond using the format specified above:''}, {''role'': ''assistant'', - ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I will use the - write_to_file command to write \\''Hello World\\'' into a file named \\''hello_world.txt\\''.\",\\n \"reasoning\": - \"The goal is to write \\''Hello World\\'' into a file named \\''hello_world.txt\\'', - and the write_to_file command is the most appropriate command for this task.\",\\n \"plan\": - \"- Use the write_to_file command to write \\''Hello World\\'' into a file named - \\''hello_world.txt\\''.\",\\n \"criticism\": \"I need to ensure that - I am using the correct file name and that the file is being written correctly.\",\\n \"speak\": - \"I will use the write_to_file command to write \\''Hello World\\'' into a file - named \\''hello_world.txt\\''.\"\\n },\\n \"command\": {\\n \"name\": - \"write_to_file\",\\n \"args\": {\\n \"filename\": \"hello_world.txt\",\\n \"text\": - \"Hello World\"\\n }\\n }\\n}''}, {''role'': ''system'', ''content'': - ''Command write_to_file returned: File written to successfully.''}]"], "model": - "text-embedding-ada-002", "encoding_format": "base64"}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '1258' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1R6Ww+yyrbl+/4VK+vV3hFRqWK9cb9ThYCInU5HEBEQkVsV1Mn57x39Tk7nvJhw - swhzzjHHGLP+419//fV3l9VFPv39z19/v6px+vt/fc/db9Pt73/++t//+uuvv/76j9/v/7izaLPi - fq/e5e/238XqfS+Wv//5i/vvM///pn/++lsX5wuxXVNi+8eNVdCIxQb77dVz6a2dYvi8BAnR3QsZ - aH88GnA4vUNiQHhmAxJvEsy0sSZFeS6y+fQKePiajBVt9f1YM+XWUXCQjAzLZVtl7F7NASyb+oS1 - bVsNbF8wCW4l2yRy8bEyeoJyAMsjVoiJbs2wtlu/FDrZTLD+ST/uMn9IAKb36JKrXutgiZQsOSqe - fJqFnbu6s6M7JTSfmjcfz/HKpqb1cuGh7xC2Ba0eaBeHG9EKXAlrgXmr6X159qIy9CWR949XxGrq - l9ATBh+b70AeeG5kFmQkKdHCTyYg79jqBP6ZpjhTAsX9zJtSgtVsLVirLi1gR+3qgdZ1DKxzdB/R - E/kI0A7PPZE8c8kGGYQBDE+BioJRb+pVyFwIHoKDkKAaR3cYkroRfaXYzUAumoFwMu4Bbo53xFHd - Z2SfDslBVQuLyHz8cftaEWL47CIeGw9NVZnu5B2YW7rF4WMKBnbf3RNIs3lG/HgdInY6vgJ463wH - Mf6wGYZdO1G4dR/neXNOZrZI26wDWtAEJMU6Uhc+8GOQ6rQiuW/dh3UfmD3Y7PBnpvqS1uuh2swg - 2RwMHAVvP6KCNlSQFlyDw8tt47LH00VQ8sn2T3zX6JNw0J/SkNzL3eKuu22PQJEmZzTJHxAt25k2 - MHlrD3IdsjRab6JPgUnKAHHDNc/4/hMk0MtfR+KGN5/tt1s5EUi3/aBtIh+iQXfiHjKzPOJ4FZaM - eafGg7di9ybyRQgA3Zgoh5kRFDh61snwuYpuDs77R4B224MfjSfyFOAh8k7YRq8smunlY0EMXBv7 - jz4eWOe+QxhFxUQ8pD7BIheVJXYx9rA6aW694ltewuOEKqKeV3ugHXIDuP1UH6Lu+9yllHsGcHai - FjuzmjE+MJwUmub4we7l1bv0Gm1CuDvzLpbjiAAKVSv/Ez9wi581iZ0lFpWHbaIDGknW328Lgjlc - rVmMju96iWfZgtslPmEng4k7WvVrFj/rESPeXJ/uYlN8E25LO2FUnjfR8nKrTryGQj8vpybNPuPj - 1G5VztgTu3bLaNEslMBrU7ywm78fbDEjp4evZGcgrn0XgEoi7qAaPzri34xrvcLHoYGrusYEcUDN - Vk1Temi1txG94zc/sNL43OD5rvk4CtA80Ft57uA2CkXiCGRy2YndW4iqmc18r67f+tcrEFy9OzrY - FhfR1U1usAbBETHxzavL83nhQdnqHo41Mkb0vGmQiFSzQYdzZjDC5akBZJt/E0dI5mi875cETA+h - xoocpUP3BrkjPIeNSbRtq9SLIWUtuG0Kft5orImWQXIdIJCrh5ZGy8FyiBcI5aJKUTcZZr1P/TiE - s5/lM9ufxGw6vp8KDCYBYX/D9xlViI9AfbBV4mT1PmL8dixBDjwfa1SfAOPHXSHyqbLD9i40VHbc - 2AXs6Pwixrdeh+igePAQUw9bavxUR90oISh09MDKs+/rWb/bN1i8F4FITp24a7TbeHB6zy72i8ch - Wx8pqSBwhRd23LXPxtQuGyhNF4T4+OwCRl7TQeB5zcboAR/RKryHGHzzk9hp/lHXdKWGiG7AJTI4 - aCpn46wBIgEW9lrzGdFd3qeQ1lVCFDk61NS4pgos3x+TaN96pSSrKngqtSMxrkcDMJ+uvTisNCcZ - kPdsefdHDVy9XsQOjLqMttU5hVL6uWN7hVM2trhNYPaONlj3Kk9dHP6hwUC+8cQVuqNLqAZTuHlR - jcj97Zyxw/5IIcofBpEVr3c/c/1oQcf4DBtukA3Lxg5zscPPG/ZK5z2sMfQP4B1KFUb3zzNba0VI - 4KNcR+L7cAJ0vewo7B/pmySPuwwWZV8KMMtLB6smyAAbageCogd0vhpnNePbrV8BT2TqLMDxra6X - 65KIr4MUYMlwLtGf/Pdn6BPfSkNA7/slhu/jaSbqpA31Uk1bBNpynxKkv2TArqWbAOfZWERlt8Zd - xnD1IFHDFJvl08l2Tz5ohfnsa9hQUOuS8728icmwBtie63mYiWuvkGz9M1qLios+zRhbEJxDC+vO - a3WZvHMamD/5AQHDBBH19YcFaNt7RLkwCDqnUjrxatcm2gEDM7bSyoPXd5thKdQGl4DnGULzGjRo - k+pGxuvJcYYxznpiapqnzu9Ui4XculsY6a8nYHcwSfD7/UkSnwcw+m7LwWbMVWwI/ib60M5DwB/4 - C9aM6J0tD6daoci1Atq6mVkvbepIYHfmXCxV73PNuhMPAbzkK/aXFNTjrX0loDksKzY3QIl2lVA0 - wnHo3tiZ+rvLVlFuxHGersQ7SlrNfcsCiGpGsWUlCaCiQBRQzI2MeAW1KgvDxhCrTAK4UPHnF89Y - 9B4TweZzbIdlfFwbyKr8gg2nKNzZ20uKqOqKSvyONGy6h/cS9jvziD7CXYnYSnsPBouk4/w1V8OE - t4cEvNJMxsZpG6lLxRkH6Kfzk9hffrNvFNiD43YRiYUiOtDgEyLIV5uIGH2+ZOR7XQh6mhLPehhs - bLaSADova7AsMzdazsM+hyG/jbEjnc71WPluCo3tUSYoi6zhh4/wY1Ue0bhirJf4vRSQ8vqCbdcs - 2ciDAwc/BjwRmbyWoZNCqxA27DPj4hvPke01BG/F/o2tpFXdpSi4A3Q1u8bu4muAPidOAEowqTiS - 7s+BerGEIBDEK0HUfrHJ1iUkbgetwSbfEcZi55hAZejKuWrzTl0OqA+Fj9spJMvfD8Be52GE/L7T - sKt8SrB0O7TCvXI9EJV/PNhyfJ5iuIQvB98n4amu33jA2ukdYtDVcRdpG/Wikx8Sot+LczYz2U9g - Vmg6cQ0zywgZdi2wdL+a14OD1d33/b49LSJqN3iA6tI1+C8+FH/CejWndoWieqWIe4MnmwKx7eBb - 2H2I3sPGnfdDoIlGZ96IWVruwMWrxwvKZk2wpjaELR9Q5PDqmgpx32THWC+OFJ6SKcFW4jcRvdDn - CrfvzTzvS8ut50tpS+CXHz+8YAwfZkHxX943v0V1dZt0AyStNEkeRlc2GEGTQN7IBRKVqZDNNb6O - UIzFAW3OxB9IKdgbmDfwSfxg0COiO3EHRpa5X/6pDcz81AYc3vyGeKVjDmvCRY7YoJQQR33sAR2D - 2QMm+7zRtunVel25SYDAz8uZZUlTf7+/BHsvXGYQU2NYlMEuIZKfn3mzm0qw8BV3E58pd8IXc/DV - FZejB9gC4Pw4d8eBbLd2DB97KZ0390Cp9/7eSSCWVhOra9fWDPvbAh6Vk45//XL51hs425sLlvnp - zSZPjgr4rW/0bDQI1rfbbCC8KhMxjxczYmt/q4DIWzusxK9bvQbHavNbn0hffs4OS28AYdqdiHxU - y2E5vwiFr+fazH0+R2Amh1sHTwwR4l7tV7b4sVICkRwtkl8zqtKHVVCQiIlLjNfprP7qH0j0rv+p - J9bplxZc4neKJjnIhzUH/Qh+/SwL2Vizyfo4cA51NMMvXs34lldQC9qA+F/+/DQ9YfPDF6Jyr7Ke - TcbnMGVRRuxxy4Y/fJ8YN/TDX7baa5YKZNQf2BarwuWeUqMA2Dcdlj6J7/Jf/gCGxXvi8Gaf6+Us - HQJI+rrBbigTt8eT2wLLWO5Y//WDUjrNsL29B7QWgLlsH0Y9MFXpQL4w746gPm7AVeRMfP2uN56U - q/Lr1zgUZ8d9W9gOhePBkYhi7NnA4s1JEJvLGs7b4C5mpDSeN3gat8offjm3hx0nXvCxxW5jcaw7 - a2ADzkVvYenqPTP242O/59mzTuqFMauCn+clnmnjUvD9nhY8zZU477/x53R25OBk1PLsW8tS010l - GDBk+wJbB33vrtb1Ww/c1phZ/pTd/WkLN5AeV4WoX/7JpDPnwJ3s3OYt+4Tgy39bMLVOgP2bcRwW - VH4EKBwoIX57Hd2lPa2xqET8EavTvf/mX1iKIbI8NDRCGDF3s9PAx9iciPmpt2Ah1o5Cw3ch9g5v - nS3SCgPQHvrtL98iioqzAOP+YhHvi0c0Y9oIC8X4YBkKV8Asa2ng/uY/yH3D99HyFjsHljA5EKPf - BSp534IE7mTrRs7OJmDrdNW8P/H+6hvGLtw5B5xwmNAReXAYO3kQoH0fDlhL7swdC6fMwe6SGj9+ - zLptGx+gvdkrSMjqfTb/9OnZuuXYOyBT3SvIvsGq4muM8JVjJD7JCvj2k3kz3ONs7mupA8A9vDBu - enXgLOobwuUT2ViP1ac7vrtJA1//Yd7OBT/UPz0mPu8a/vXzXSUkDdzhtpiPqj+59Pt+ULjaJYJX - 5RRRBYwNaDDs59X2vWjHjoEn6iEViH07TOyzze0KJhvBQIcNhzKO7TUPRJvUwVYX9+7y3gc5/Op/ - giaZDL9+B358S7/vOZXeynsHJBwJWOZjW2XPa5DDKlMA2rdFoy4eBxVotemIDVNShwUsmxEYEo+w - by2ngXH+W4E0G2cETwJQ6eKXGvS324lIY1FHjBP2B4hmVcZaRfWaeRcgQHS9BOgjpXY92y+owMbj - OIxnfc+WW3dMYHaDGGOqLGwxGGeJPOHzebVb111+/sj6ebyIG1/eYDlzAS8ey+RKdJeiH38vxfzJ - DUS/68mw7vTj5oenBD1NAKbyc0Twx08DiGnExs+tg09qFkSi+8qlswVyGJ8/lx/+ubQzQQst2Kcz - JyjvYdpWVwQUEWVom3lcPV74YwGro4GJxJ3zmlLZmuEBqum8ouUdjetltwpz9LGJ90htMBZaSmES - cAxx942lrsIEFOAr+Y78+B9D9jEHg3t+EfcuFTXdJbcZCtVTxeZeRvVaLnN4SGihkO/xML37xYB+ - WdrElQut5i1jasFoFj52x8yMaMq9KTSrzsRufDHBXExlII76ISROFSKwlPcTB/tYu5JzaLzdMTvG - Kex3+hG7S6sOe9F3eSCamkwQowZbpkOQiIoFsvmL//VsPPEN2m85m5eLQNloRFYC/bKysaRmW7A6 - m3WFIpO2RA8N0yXYfFPoCkpBnDkX6hm34AAn4ynjMN0cM7peRAqtwJbwNIhvl03owMF5KCi2Z4cA - Khw0DR4uBwUXD7jNyLk+GMK1yV9zzvcumN0m/fL7sEHAu3PutPp9Cr96hnzxH3APq1ihsqEJQevm - k613y7FANHIY7T+wGFghwxU+tFOEQ0XnXfLemjzwz+1AtOQeqSvgnBF+8ZI4+i6o6Xvn5cff+6P1 - 5dRj0ig3yNmknufX8Rqt9mOMIUQmJoahPyLqZOtGdJVX9/VrQLbIaZgD+4EZGv0nzPpYOfdQeaUc - wW72rtdxShJ4fuIGbf30We89Ny7F99lKEVW5Tf3QRRGBSeBlxLoQuWM8iBbUnW1I7Ptbj/aW8hTg - MFYE//JpPXmhBmldJuSbHxm/tXMK99lyw06/kdxJEFLjT7+QmgbXHTgpAixz8iHe8sD1fshZAKdI - uWFcG1a0g5dYgDvRvKC9Gc4R7W59D7DfB0QKNdflXqoBob4NQqwG7ymb4Mn3wKgLIZFiVg3MefEJ - NFXlMD999zhMsgsM8Lh3O6yLqs3o1UwEaN1l++svuBlfvINGdErFx/jGzoxxstn/8I64S1sP9Pjw - Y+GF1QLtv/yM5Qk6APR0DrP47SfzI31XIDWTOzHknRmN2zYWYBNpT6J/FvPP/0G04z5Ybu5gINgk - Kxx6yGH5kemA1q3iQMqbC7E2CgVEW+7cMS9mbr4ly5HR+rEW4iVxI8R7WarOwntI4HbsH0j86iu2 - XIYC7JTxjESD29fjC+49+MV3rDmjoS7TIU1gQnMFJ1+9tLwXi0L2anzibdwSrGoDHUi5J08yea9m - NH6cKXB0g3zj+c5GGW9WwEs+woawBWDtmnoWqtlZ0CFLy2HHB3oMD/HqoWHLdlG3bPgNCMupm0Gv - owF0KergepMk7N18f9gz9RpD7FQ3jFnAud18Dnn4xTNshAsdxp8f9rnW43x42/6wi2fZ+fm15Kvn - f/osB/sjN8/DN9+WvoQdHLj4grNKPGRMTR/afuOlTyzbcRmNgsPQz1+ZP4WkqavwrhM4oHtG7ORB - 1M/V7g9geyIKtqc0jH58QRQMtUcnbxXAAIV4/ukPgnc+UtfYmmL41c9Y2RzbaNXrk/Fffp/GtGxV - XDGFYS7zCMSXN5vuOu/AIK5G1N/NdaDaU1nhsyVg3uT7mq3Pe279+PUshIJU85oTJrDvE32+7dxV - Xa99Mf/yhWjAr8HHWk8GiFeHzluOI9m3nmNY+qFG/FKNarodxxbiEo9IeMlGtLTNJwXf+QCWD73M - KGvuDpy8dfqD/zRVgwPsUkGc4V516t1qhjP86U2zn3OVinbgwO68mD58pB8297XVQ9yAOxK/fgPd - Xncl+OI1QXaeuXRXrQbU6tnA8rUY3dGLLQ9KA4vQ5Tm2da8nxxG6oL1+/YKN2zobYYX8taVYadVP - xK5jNYuF2jVY5R9btsihQwV26wii9/wYrYlQcVANwYSOajsAttyBBcF+VxPp0rXurz8LEycL2P/q - 30VujjyYqVOhKjhXdX9EY/vzf4i1Hw8uD6+lIErSnp+38Nxkk/C6xBCGi0z8dnUA4/JAE6POKQjW - C58tT4VKsDLODtHJ/BzG4wJTYBRkj1q69u5PH8IXd7jNEOwslzqV08Gs2x2xLnWH7Fs/I2xeHzgH - S1CyuaksCr/xIW4jrNHozL0Ev/oN0d5yarqsjxbs1S1DYhAaA3s8VQTDQ40RL2wzsBZPS4APwUL4 - Vt9dd39zFwSfU6mg5y341OMmevHQkwKXIK5qhrZW1kS8bXJ+XsxLG423mw9B1+cHIjf3bJim4bCB - XOvBmdZSHFEqUCjK+eaOvWsWqPQqmwiq8b1Dr1qh2Wdg9fxbj1zPcch27Jh6v3kZzj4jAezb7356 - GKvna8/ow0pW0L+6GEeojzLWApkDX78A7Ti6z6bqTSrg+U5MlGfv1MsGV85PHxO7uirs55+B2qfS - vD0tUvSLP2je/h5j1dp9/eFuA45lfP35JWwNkzeFS2PrxPPIhv3wGs7u/oloL3TqH7xke8si8uW5 - ujTy5hkSdjTnJjim7k4Y3tKf/NJjVXY5KkujULyZMK+H2meLrVsI9mFZYOnbP0dIrzGs/VXCSO3e - YE3bdBSW8O3Mc7RGwwQ2hQZf0us2Uz/063VT8y3E2VOYhc/t5dIX3HqwULQPkVhdMLqM5gGSa6lh - PTxo9d4kwgq5/br/4rfEWIFyB0o7iydIFFNG6/HEwcSWXazD3lVpeJa0P/hrH+1H9J2P9X/8kGSU - vGHVP90KLxx6ExPJOiD8tqlEkSnbn7+pfvkW/fUn9PniL7mu601Et6NLJBK1bAjKuQKn687DmONI - RJbLkIOrufUQU4eaUZL11Q8vsI08WM/Pi5bDT2a+Z3r15Iiup9b48SOCUr3Nlsvp3ENxvvjz5lne - arqeZg3GkthitUyFaHnCLoTBouj4V++02NgStI5lhU/7xytjMriFoNrBK5aahtTLwg3Nb14xb6vJ - Hvad+w5A4/HcDOVLMCzC65LAYWP0aKnDrUqEixX89A0ST1rjsqzoQujUlorxS1WiNaqxA07MI0QT - uJc7B4Zzg3M02ETf2bxLPZ9W4hPnM1GCs1J3rny14LfeyDcf2CpkKoTifqYYe76e7bAhaUBuEwkJ - GeTdyeEfBtwORoMNOe/qOSdqDDPV6nAx3ONo3ttbCIvPZfedJyvRXvRVHt7D+20exVVxl+ispKDM - 7Jm47sBna3DsN2B/CNncN917GI5OJ/ye/85bry794rO4Sl3xpx/T+iEU4MfXvvOHaLrcdxIEzTRh - LWcu+JPvz0uY/Jknr/ucSIByNU+s1+aUTUK6h7DvVufLh+2Ifv1o4Ru/eefHdr0cFOcArwU6INFs - z+qa5XkIFyFdsLMV7GiRi94BwXRApFA7E3AdckNwG84SqrsQqeOFflZY+oFGTqfmEH31dSJ88ZVY - uPAYPaRid3RS74XdZatE3JPzWniQu4Tcu6R1F1ZeCyHV1wpBWDzUr19ViX//dgX857/++uv//HYY - tN29eH03BkzFMv37v7cK/Pt2v/2b4/h/E/7PToR5vJXF3//81yaEvz9D136m/zt1TfEe//7nLx6C - P/sN/p666fb6Hxf+9V3vP//1/wAAAP//AwBlmDGz5SAAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bbf4f957a7315ca-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 22 Apr 2023 16:40:51 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '227' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - a25379b9d7bfed73e1a46145146b1478 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are write_to_file-GPT, an AI designed to use the write_to_file command to write - ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete - command to complete the task.\nYour decisions must always be made independently - without seeking user assistance. Play to your strengths as an LLM and pursue - simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file - command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. - Use the task_complete command to complete the task.\n3. Do not use any other - commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your - short term memory is short, so immediately save important information to files.\n2. - If you are unsure how you previously did something or want to recall past events, - thinking about similar events will help you remember.\n3. No user assistance\n4. - Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. - append_to_file: Append to file, args: \"filename\": \"\", \"text\": - \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. - read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search - Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, - args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete - GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: - \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", - \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. - message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. - start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \n13. Task Complete - (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can beparsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past:\n[''Assistant - Reply: {\\n \"thoughts\": {\\n \"text\": \"I will use the write_to_file - command to write \\''Hello World\\'' into a file named \\''hello_world.txt\\''.\",\\n \"reasoning\": - \"The goal is to write \\''Hello World\\'' into a file named \\''hello_world.txt\\'', - and the write_to_file command is the most appropriate command for this task.\",\\n \"plan\": - \"- Use the write_to_file command to write \\''Hello World\\'' into a file named - \\''hello_world.txt\\''.\",\\n \"criticism\": \"I need to ensure that - I am using the correct file name and that the file is being written correctly.\",\\n \"speak\": - \"I will use the write_to_file command to write \\''Hello World\\'' into a file - named \\''hello_world.txt\\''.\"\\n },\\n \"command\": {\\n \"name\": - \"write_to_file\",\\n \"args\": {\\n \"filename\": \"hello_world.txt\",\\n \"text\": - \"Hello World\"\\n }\\n }\\n} \\nResult: Command write_to_file returned: - File written to successfully. \\nHuman Feedback: '']\n\n"}, {"role": "user", - "content": "Determine which next command to use, and respond using the format - specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I will use the write_to_file command to write ''Hello World'' into a file - named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write - ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file - command is the most appropriate command for this task.\",\n \"plan\": - \"- Use the write_to_file command to write ''Hello World'' into a file named - ''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I am - using the correct file name and that the file is being written correctly.\",\n \"speak\": - \"I will use the write_to_file command to write ''Hello World'' into a file - named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": - \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": - \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command - write_to_file returned: File written to successfully."}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0.0, "max_tokens": 2704}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '5708' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA8xTwW7bMAy97ysIXnJRgjhNs9S3HQq02GGHpRuGeQhUmbG1yJIh0a2LwP8+KHaa - NF0HDLvsZpOP5HtP5A51jimqUrKqajN+v/wg5yW3/PDt7vq2+DRvb5Lky7VvVlcfFQp09z9J8VAx - Ua6qDbF2FgUqT5IpxzRZLGfJMllczgRWLieDKRY1jy8ml2Nu/L0bTy+mCQpsgiwI0x3W3lU1r9lt - yQZMk9lyLvDY/DkxS2YC2bE0R+z8atEJVKXTigKm33dYUTg09s4QpihD0IGl5UjTWSYbJewyCwCQ - IZeuKUoOGaYwBIcEtRyDGd5CKR8IQqMUhbBpjHmCR6+ZycLohoxx8NV5k49AW3YgYaMNgZUV5TAq - Y379GPMTbnk0gVVJYKllCEw16ADsoAkEXBKwDNv1IJ5AuaqSNo+A59gBNclQnNL1JIOz2hY959UA - G/pHuvT3ZGE/vST7m/nwWVtFMJgjjSeZH33Zw6LI/ZT4F+eI/de5+Fe9oQnaFm8bcq69NtL2ssdw - 989OKq9ZKx2qw+tbon0l2dD4WCf5DyN0iK+Zg3Lek2LzNJh4UhQxh7r8xV6dcwk1ye3/vYU93U4c - DmqAv7qnyKRX8qL9mWDpi/NTPN3vYbmjh28aeKzsBm6Z7bATuNFWh3Ldd8IUA7saBWqbU4vptPvR - vfsFAAD//wMAbKbLwRMFAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bbf4f9c6b3c15ca-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 22 Apr 2023 16:41:07 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '15821' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-reset-requests: - - 17ms - x-request-id: - - 19520475f9dabce5bc67ccb90bec24f1 - status: - code: 200 - message: OK -version: 1 diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index 053d6e08..92ebbc56 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -2,29 +2,20 @@ import concurrent import os import unittest -import vcr +import pytest from autogpt.agent import Agent from autogpt.commands.command import CommandRegistry from autogpt.commands.file_operations import delete_file, read_file from autogpt.config import AIConfig, Config from autogpt.memory import get_memory -from tests.integration.goal_oriented.vcr_helper import before_record_request from tests.utils import requires_api_key -current_file_dir = os.path.dirname(os.path.abspath(__file__)) -# tests_directory = os.path.join(current_file_dir, 'tests') - -my_vcr = vcr.VCR( - cassette_library_dir=os.path.join(current_file_dir, "cassettes"), - record_mode="new_episodes", - before_record_request=before_record_request, -) - CFG = Config() @requires_api_key("OPENAI_API_KEY") +@pytest.mark.vcr def test_write_file(workspace) -> None: CFG.workspace_path = workspace.root CFG.file_logger_path = os.path.join(workspace.root, "file_logger.txt") @@ -32,20 +23,12 @@ def test_write_file(workspace) -> None: file_name = str(workspace.get_path("hello_world.txt")) agent = create_writer_agent(workspace) try: - with my_vcr.use_cassette( - "write_file.vcr.yml", - filter_headers=[ - "authorization", - "X-OpenAI-Client-User-Agent", - "User-Agent", - ], - ): - with concurrent.futures.ThreadPoolExecutor() as executor: - future = executor.submit(agent.start_interaction_loop) - try: - result = future.result(timeout=45) - except concurrent.futures.TimeoutError: - assert False, "The process took longer than 45 seconds to complete." + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit(agent.start_interaction_loop) + try: + result = future.result(timeout=45) + except concurrent.futures.TimeoutError: + assert False, "The process took longer than 45 seconds to complete." # catch system exit exceptions except SystemExit: # the agent returns an exception when it shuts down content = read_file(file_name) diff --git a/tests/integration/goal_oriented/vcr_helper.py b/tests/vcr/openai_filter.py similarity index 100% rename from tests/integration/goal_oriented/vcr_helper.py rename to tests/vcr/openai_filter.py index 58504520..56a7eaec 100644 --- a/tests/integration/goal_oriented/vcr_helper.py +++ b/tests/vcr/openai_filter.py @@ -26,6 +26,14 @@ def replace_timestamp_in_request(request): return request +def before_record_request(request): + filtered_request = filter_hostnames(request) + filtered_request_without_dynamic_data = replace_timestamp_in_request( + filtered_request + ) + return filtered_request_without_dynamic_data + + def filter_hostnames(request): allowed_hostnames = ["api.openai.com"] # List of hostnames you want to allow @@ -33,11 +41,3 @@ def filter_hostnames(request): return request else: return None - - -def before_record_request(request): - filtered_request = filter_hostnames(request) - filtered_request_without_dynamic_data = replace_timestamp_in_request( - filtered_request - ) - return filtered_request_without_dynamic_data From 109fa04c7c6f551cb43f886d5b08b171ffe3bfe5 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 26 Apr 2023 10:23:05 -0700 Subject: [PATCH 157/222] test image gen (#3287) --- tests/test_image_gen.py | 152 ++++++++++++++++++---------------------- 1 file changed, 67 insertions(+), 85 deletions(-) diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index 23332ee0..7e7ae58e 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -1,14 +1,9 @@ import hashlib -import os -import shutil -import unittest from pathlib import Path from PIL import Image from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui -from autogpt.config import Config -from autogpt.workspace import Workspace from tests.utils import requires_api_key @@ -17,97 +12,84 @@ def lst(txt): return Path(txt.split(":")[1].strip()) -@unittest.skip("Skipping image generation tests") -class TestImageGen(unittest.TestCase): - def setUp(self): - self.config = Config() - workspace_path = os.path.join(os.path.dirname(__file__), "workspace") - self.workspace_path = Workspace.make_workspace(workspace_path) - self.config.workspace_path = workspace_path - self.workspace = Workspace(workspace_path, restrict_to_workspace=True) +@requires_api_key("OPENAI_API_KEY") +def test_dalle(request, config, workspace): + """Test DALL-E image generation.""" + config.image_provider = "dalle" - def tearDown(self) -> None: - shutil.rmtree(self.workspace_path) + # Test using size 256 + image_path = lst(generate_image("astronaut riding a horse", 256)) + assert image_path.exists() + with Image.open(image_path) as img: + assert img.size == (256, 256) + image_path.unlink() - @requires_api_key("OPENAI_API_KEY") - def test_dalle(self): - """Test DALL-E image generation.""" - self.config.image_provider = "dalle" + # Test using size 512 + image_path = lst(generate_image("astronaut riding a horse", 512)) + with Image.open(image_path) as img: + assert img.size == (512, 512) + image_path.unlink() - # Test using size 256 - image_path = lst(generate_image("astronaut riding a horse", 256)) - self.assertTrue(image_path.exists()) - with Image.open(image_path) as img: - self.assertEqual(img.size, (256, 256)) - image_path.unlink() - # Test using size 512 - image_path = lst(generate_image("astronaut riding a horse", 512)) - with Image.open(image_path) as img: - self.assertEqual(img.size, (512, 512)) - image_path.unlink() +@requires_api_key("HUGGINGFACE_API_TOKEN") +def test_huggingface(request, config, workspace): + """Test HuggingFace image generation.""" + config.image_provider = "huggingface" - @requires_api_key("HUGGINGFACE_API_TOKEN") - def test_huggingface(self): - """Test HuggingFace image generation.""" - self.config.image_provider = "huggingface" + # Test using SD 1.4 model and size 512 + config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" + image_path = lst(generate_image("astronaut riding a horse", 512)) + assert image_path.exists() + with Image.open(image_path) as img: + assert img.size == (512, 512) + image_path.unlink() - # Test usin SD 1.4 model and size 512 - self.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - image_path = lst(generate_image("astronaut riding a horse", 512)) - self.assertTrue(image_path.exists()) - with Image.open(image_path) as img: - self.assertEqual(img.size, (512, 512)) - image_path.unlink() + # Test using SD 2.1 768 model and size 768 + config.huggingface_image_model = "stabilityai/stable-diffusion-2-1" + image_path = lst(generate_image("astronaut riding a horse", 768)) + with Image.open(image_path) as img: + assert img.size == (768, 768) + image_path.unlink() - # Test using SD 2.1 768 model and size 768 - self.config.huggingface_image_model = "stabilityai/stable-diffusion-2-1" - image_path = lst(generate_image("astronaut riding a horse", 768)) - with Image.open(image_path) as img: - self.assertEqual(img.size, (768, 768)) - image_path.unlink() - def test_sd_webui(self): - """Test SD WebUI image generation.""" - self.config.image_provider = "sd_webui" - return +def test_sd_webui(config, workspace): + """Test SD WebUI image generation.""" + config.image_provider = "sd_webui" + return - # Test using size 128 - image_path = lst(generate_image_with_sd_webui("astronaut riding a horse", 128)) - self.assertTrue(image_path.exists()) - with Image.open(image_path) as img: - self.assertEqual(img.size, (128, 128)) - image_path.unlink() + # Test using size 128 + image_path = lst(generate_image_with_sd_webui("astronaut riding a horse", 128)) + assert image_path.exists() + with Image.open(image_path) as img: + assert img.size == (128, 128) + image_path.unlink() - # Test using size 64 and negative prompt - result = lst( - generate_image_with_sd_webui( - "astronaut riding a horse", - negative_prompt="horse", - size=64, - extra={"seed": 123}, - ) + # Test using size 64 and negative prompt + result = lst( + generate_image_with_sd_webui( + "astronaut riding a horse", + negative_prompt="horse", + size=64, + extra={"seed": 123}, ) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - self.assertEqual(img.size, (64, 64)) - neg_image_hash = hashlib.md5(img.tobytes()).hexdigest() - image_path.unlink() + ) + image_path = path_in_workspace(result) + with Image.open(image_path) as img: + assert img.size == (64, 64) - # Same test as above but without the negative prompt - result = lst( - generate_image_with_sd_webui( - "astronaut riding a horse", image_size=64, size=1, extra={"seed": 123} - ) + neg_image_hash = hashlib.md5(img.tobytes()).hexdigest() + image_path.unlink() + + # Same test as above but without the negative prompt + result = lst( + generate_image_with_sd_webui( + "astronaut riding a horse", image_size=64, size=1, extra={"seed": 123} ) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - self.assertEqual(img.size, (64, 64)) - image_hash = hashlib.md5(img.tobytes()).hexdigest() - image_path.unlink() + ) + image_path = path_in_workspace(result) + with Image.open(image_path) as img: + assert img.size == (64, 64) + image_hash = hashlib.md5(img.tobytes()).hexdigest() + image_path.unlink() - self.assertNotEqual(image_hash, neg_image_hash) - - -if __name__ == "__main__": - unittest.main() + assert image_hash != neg_image_hash From 76df14b8314b42039cb111fc852af4d0d045ae95 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 26 Apr 2023 20:14:14 +0200 Subject: [PATCH 158/222] Fix docs (#3336) * Fix docs * Add short section about testing to contribution guide * Add back note for voice configuration * Remove LICENSE symlink from docs/ * Fix site_url in mkdocs.yml --- CODE_OF_CONDUCT.md | 3 +- CONTRIBUTING.md | 28 ++--- README.md | 18 +-- docs/LICENSE | 1 - docs/configuration/imagegen.md | 52 +++++++- docs/configuration/memory.md | 174 +++++++++++++++------------ docs/configuration/search.md | 70 +++++------ docs/configuration/voice.md | 34 ++++-- docs/installation.md | 115 ------------------ docs/setup.md | 210 +++++++++++++++++++++++++++++++++ docs/testing.md | 65 +++++----- docs/usage.md | 99 +++++++--------- mkdocs.yml | 33 ++++-- requirements.txt | 1 + 14 files changed, 522 insertions(+), 381 deletions(-) delete mode 120000 docs/LICENSE delete mode 100644 docs/installation.md create mode 100644 docs/setup.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index d2331b4c..b1f7caf3 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,4 @@ -# Code of Conduct for auto-gpt +# Code of Conduct for Auto-GPT ## 1. Purpose @@ -37,4 +37,3 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont ## 6. Contact If you have any questions or concerns, please contact the project maintainers. - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cdb84ca3..10043ecb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,21 +4,11 @@ First of all, thank you for considering contributing to our project! We apprecia This document provides guidelines and best practices to help you contribute effectively. -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [Getting Started](#getting-started) -- [How to Contribute](#how-to-contribute) - - [Reporting Bugs](#reporting-bugs) - - [Suggesting Enhancements](#suggesting-enhancements) - - [Submitting Pull Requests](#submitting-pull-requests) -- [Style Guidelines](#style-guidelines) - - [Code Formatting](#code-formatting) - - [Pre-Commit Hooks](#pre-commit-hooks) - ## Code of Conduct -By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project. +By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project. + +[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md ## 📢 A Quick Word Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT. @@ -84,6 +74,7 @@ isort . ``` ### Pre-Commit Hooks + We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: Install the pre-commit package using pip: @@ -103,7 +94,14 @@ If you encounter any issues or have questions, feel free to reach out to the mai Happy coding, and once again, thank you for your contributions! Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: +https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts -https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ +## Testing your changes -## Testing +If you add or change code, make sure the updated code is covered by tests. + +To increase coverage if necessary, [write tests using `pytest`]. + +For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/). + +[write tests using `pytest`]: https://realpython.com/pytest-python-testing/ diff --git a/README.md b/README.md index 9cfcda75..c915418b 100644 --- a/README.md +++ b/README.md @@ -89,28 +89,20 @@ Your support is greatly appreciated. Development of this free, open-source proje - 🗃️ File storage and summarization with GPT-3.5 - 🔌 Extensibility with Plugins -## 📋 Requirements - -Choose an environment to run Auto-GPT in (pick one): - - - [Docker](https://docs.docker.com/get-docker/) (*recommended*) - - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) - - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) - ## Quickstart -1. Set up your OpenAI [API Keys](https://platform.openai.com/account/api-keys) +1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys) 2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) -3. Follow the [installation instructions][docs/install] +3. Follow the [installation instructions][docs/setup] 4. Configure any additional features you want, or install some [plugins][docs/plugins] 5. [Run][docs/usage] the app -Please see the [documentation][docs] linked below for full setup instructions and configuration options. +Please see the [documentation][docs] for full setup instructions and configuration options. [docs]: https://significant-gravitas.github.io/Auto-GPT/ ## 📖 Documentation -* [⚙️ Installation][docs/install] +* [⚙️ Setup][docs/setup] * [💻 Usage][docs/usage] * [🔌 Plugins][docs/plugins] * Configuration @@ -119,7 +111,7 @@ Please see the [documentation][docs] linked below for full setup instructions an * [🗣️ Voice (TTS)](https://significant-gravitas.github.io/Auto-GPT/configuration/voice/) * [🖼️ Image Generation](https://significant-gravitas.github.io/Auto-GPT/configuration/imagegen/) -[docs/install]: https://significant-gravitas.github.io/Auto-GPT/installation/ +[docs/setup]: https://significant-gravitas.github.io/Auto-GPT/setup/ [docs/usage]: https://significant-gravitas.github.io/Auto-GPT/usage/ [docs/plugins]: https://significant-gravitas.github.io/Auto-GPT/plugins/ diff --git a/docs/LICENSE b/docs/LICENSE deleted file mode 120000 index ea5b6064..00000000 --- a/docs/LICENSE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE \ No newline at end of file diff --git a/docs/configuration/imagegen.md b/docs/configuration/imagegen.md index cf9d55fd..38fdcebb 100644 --- a/docs/configuration/imagegen.md +++ b/docs/configuration/imagegen.md @@ -1,14 +1,58 @@ -## 🖼 Image Generation +# 🖼 Image Generation configuration -By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [Hugging Face API Token](https://huggingface.co/settings/tokens) is required. +| Config variable | Values | | +| ---------------- | ------------------------------- | -------------------- | +| `IMAGE_PROVIDER` | `dalle` `huggingface` `sdwebui` | **default: `dalle`** | -Once you have a token, set these variables in your `.env`: +## DALL-e +In `.env`, make sure `IMAGE_PROVIDER` is commented (or set to `dalle`): +``` ini +# IMAGE_PROVIDER=dalle # this is the default +``` + +Further optional configuration: + +| Config variable | Values | | +| ---------------- | ------------------ | -------------- | +| `IMAGE_SIZE` | `256` `512` `1024` | default: `256` | + +## Hugging Face + +To use text-to-image models from Hugging Face, you need a Hugging Face API token. +Link to the appropriate settings page: [Hugging Face > Settings > Tokens](https://huggingface.co/settings/tokens) + +Once you have an API token, uncomment and adjust these variables in your `.env`: ``` ini IMAGE_PROVIDER=huggingface -HUGGINGFACE_API_TOKEN=YOUR_HUGGINGFACE_API_TOKEN +HUGGINGFACE_API_TOKEN=your-huggingface-api-token ``` +Further optional configuration: + +| Config variable | Values | | +| ------------------------- | ---------------------- | ---------------------------------------- | +| `HUGGINGFACE_IMAGE_MODEL` | see [available models] | default: `CompVis/stable-diffusion-v1-4` | + +[available models]: https://huggingface.co/models?pipeline_tag=text-to-image + +## Stable Diffusion WebUI + +It is possible to use your own self-hosted Stable Diffusion WebUI with Auto-GPT: +``` ini +IMAGE_PROVIDER=sdwebui +``` + +!!! note + Make sure you are running WebUI with `--api` enabled. + +Further optional configuration: + +| Config variable | Values | | +| --------------- | ----------------------- | -------------------------------- | +| `SD_WEBUI_URL` | URL to your WebUI | default: `http://127.0.0.1:7860` | +| `SD_WEBUI_AUTH` | `{username}:{password}` | *Note: do not copy the braces!* | + ## Selenium ``` shell sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md index 6fc80a75..7d707598 100644 --- a/docs/configuration/memory.md +++ b/docs/configuration/memory.md @@ -1,10 +1,12 @@ ## Setting Your Cache Type -By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone. +By default, Auto-GPT set up with Docker Compose will use Redis as its memory backend. +Otherwise, the default is LocalCache (which stores memory in a JSON file). -To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want: +To switch to a different backend, change the `MEMORY_BACKEND` in `.env` +to the value that you want: -* `local` (default) uses a local JSON cache file +* `local` uses a local JSON cache file * `pinecone` uses the Pinecone.io account you configured in your ENV settings * `redis` will use the redis cache that you configured * `milvus` will use the milvus cache that you configured @@ -20,32 +22,39 @@ Links to memory backends - [Weaviate](https://weaviate.io) ### Redis Setup -> _**CAUTION**_ \ -This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all -1. Install docker (or Docker Desktop on Windows). -2. Launch Redis container. -``` shell - docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest -``` -> See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. +!!! important + If you have set up Auto-GPT using Docker Compose, then Redis is included, no further + setup needed. -3. Set the following settings in `.env`. - > Replace **PASSWORD** in angled brackets (<>) - -``` shell -MEMORY_BACKEND=redis -REDIS_HOST=localhost -REDIS_PORT=6379 -REDIS_PASSWORD= -``` +!!! caution + This setup is not intended to be publicly accessible and lacks security measures. + Avoid exposing Redis to the internet without a password or at all! - You can optionally set `WIPE_REDIS_ON_START=False` to persist memory stored in Redis. +1. Launch Redis container -You can specify the memory index for redis using the following: -``` shell -MEMORY_INDEX= -``` + :::shell + docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest + +3. Set the following settings in `.env` + + :::ini + MEMORY_BACKEND=redis + REDIS_HOST=localhost + REDIS_PORT=6379 + REDIS_PASSWORD= + + Replace `` by your password, omitting the angled brackets (<>). + + Optional configuration: + + - `WIPE_REDIS_ON_START=False` to persist memory stored in Redis between runs. + - `MEMORY_INDEX=` to specify a name for the memory index in Redis. + The default is `auto-gpt`. + +!!! info + See [redis-stack-server](https://hub.docker.com/r/redis/redis-stack-server) for + setting a password and additional configuration. ### 🌲 Pinecone API Key Setup @@ -56,65 +65,57 @@ Pinecone lets you store vast amounts of vector-based memory, allowing the agent 3. Find your API key and region under the default project in the left sidebar. In the `.env` file set: + - `PINECONE_API_KEY` -- `PINECONE_ENV` (example: _"us-east4-gcp"_) +- `PINECONE_ENV` (example: `us-east4-gcp`) - `MEMORY_BACKEND=pinecone` -Alternatively, you can set them from the command line (advanced): - -For Windows Users: - -``` shell -setx PINECONE_API_KEY "" -setx PINECONE_ENV "" # e.g: "us-east4-gcp" -setx MEMORY_BACKEND "pinecone" -``` - -For macOS and Linux users: - -``` shell -export PINECONE_API_KEY="" -export PINECONE_ENV="" # e.g: "us-east4-gcp" -export MEMORY_BACKEND="pinecone" -``` - ### Milvus Setup -[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search. And it can be quickly deployed by docker locally or as a cloud service provided by [Zilliz Cloud](https://zilliz.com/). +[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store +huge amounts of vector-based memory and provide fast relevant search. It can be quickly +deployed with docker, or as a cloud service provided by [Zilliz Cloud](https://zilliz.com/). -1. Deploy your Milvus service, either locally using docker or with a managed Zilliz Cloud database. +1. Deploy your Milvus service, either locally using docker or with a managed Zilliz Cloud database: - [Install and deploy Milvus locally](https://milvus.io/docs/install_standalone-operator.md) - -
Set up a managed Zilliz Cloud database (click to expand) - - 1. Go to [Zilliz Cloud](https://zilliz.com/) and sign up if you don't already have account. - 2. In the *Databases* tab, create a new database. - - Remember your username and password - - Wait until the database status is changed to RUNNING. - 3. In the *Database detail* tab of the database you have created, the public cloud endpoint, such as: - `https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443`. -
+ - Set up a managed Zilliz Cloud database + 1. Go to [Zilliz Cloud](https://zilliz.com/) and sign up if you don't already have account. + 2. In the *Databases* tab, create a new database. + - Remember your username and password + - Wait until the database status is changed to RUNNING. + 3. In the *Database detail* tab of the database you have created, the public cloud endpoint, such as: + `https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443`. 2. Run `pip3 install pymilvus` to install the required client library. - Make sure your PyMilvus version and Milvus version are [compatible](https://github.com/milvus-io/pymilvus#compatibility) to avoid issues. + Make sure your PyMilvus version and Milvus version are [compatible](https://github.com/milvus-io/pymilvus#compatibility) + to avoid issues. See also the [PyMilvus installation instructions](https://github.com/milvus-io/pymilvus#installation). -3. Update `.env` +3. Update `.env`: - `MEMORY_BACKEND=milvus` - One of: - - `MILVUS_ADDR=host:ip` (for local instance) - - `MILVUS_ADDR=https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443` (for Zilliz Cloud) + - `MILVUS_ADDR=host:ip` (for local instance) + - `MILVUS_ADDR=https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443` (for Zilliz Cloud) - *The following settings are **optional**:* - - Set `MILVUS_USERNAME='username-of-your-milvus-instance'` - - Set `MILVUS_PASSWORD='password-of-your-milvus-instance'` - - Set `MILVUS_SECURE=True` to use a secure connection. Only use if your Milvus instance has TLS enabled. - Setting `MILVUS_ADDR` to a `https://` URL will override this setting. - - Set `MILVUS_COLLECTION` if you want to change the collection name to use in Milvus. Defaults to `autogpt`. + The following settings are **optional**: + + - `MILVUS_USERNAME='username-of-your-milvus-instance'` + - `MILVUS_PASSWORD='password-of-your-milvus-instance'` + - `MILVUS_SECURE=True` to use a secure connection. + Only use if your Milvus instance has TLS enabled. + *Note: setting `MILVUS_ADDR` to a `https://` URL will override this setting.* + - `MILVUS_COLLECTION` to change the collection name to use in Milvus. + Defaults to `autogpt`. ### Weaviate Setup -[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store data objects and vector embeddings from ML-models and scales seamlessly to billion of data objects. [An instance of Weaviate can be created locally (using Docker), on Kubernetes or using Weaviate Cloud Services](https://weaviate.io/developers/weaviate/quickstart). -Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) is supported which allows the Auto-GPT process itself to start a Weaviate instance. To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`. +[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store +data objects and vector embeddings from ML-models and scales seamlessly to billion of +data objects. To set up a Weaviate database, check out their [Quickstart Tutorial](https://weaviate.io/developers/weaviate/quickstart). + +Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) +is supported which allows the Auto-GPT process itself to start a Weaviate instance. +To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`. #### Install the Weaviate client @@ -128,7 +129,7 @@ $ pip install weaviate-client In your `.env` file set the following: -``` shell +``` ini MEMORY_BACKEND=weaviate WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance WEAVIATE_PORT="8080" @@ -140,7 +141,7 @@ WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" # this is optional and i USE_WEAVIATE_EMBEDDED=False # set to True to run Embedded Weaviate MEMORY_INDEX="Autogpt" # name of the index to create for the application ``` - + ## View Memory Usage View memory usage by using the `--debug` flag :) @@ -150,7 +151,7 @@ View memory usage by using the `--debug` flag :) Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT. ``` shell -# python data_ingestion.py -h +$ python data_ingestion.py -h usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script. @@ -172,15 +173,32 @@ Note that you can also use the `--file` argument to ingest a single file into me The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. -You can adjust the `max_length` and `overlap` parameters to fine-tune the way the documents are presented to the AI when it "recall" that memory: -- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. -- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks. -- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI. +You can adjust the `max_length` and `overlap` parameters to fine-tune the way the + documents are presented to the AI when it "recall" that memory: -Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT. +- Adjusting the overlap value allows the AI to access more contextual information + from each chunk when recalling information, but will result in more chunks being + created and therefore increase memory backend usage and OpenAI API requests. +- Reducing the `max_length` value will create more chunks, which can save prompt + tokens by allowing for more message history in the context, but will also + increase the number of chunks. +- Increasing the `max_length` value will provide the AI with more contextual + information from each chunk, reducing the number of chunks created and saving on + OpenAI API requests. However, this may also use more prompt tokens and decrease + the overall context available to the AI. -⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file. +Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data +into its memory. Chunks of data are split and added to memory, allowing the AI to access +them quickly and generate more accurate responses. It's useful for large datasets or when +specific information needs to be accessed quickly. Examples include ingesting API or +GitHub documentation before running Auto-GPT. -⚠️For other memory backends, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backends, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. +!!! attention + If you use Redis for memory, make sure to run Auto-GPT with `WIPE_REDIS_ON_START=False` -Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. + For other memory backends, we currently forcefully wipe the memory when starting + Auto-GPT. To ingest data with those memory backends, you can call the + `data_ingestion.py` script anytime during an Auto-GPT run. + +Memories will be available to the AI immediately as they are ingested, even if ingested +while Auto-GPT is running. diff --git a/docs/configuration/search.md b/docs/configuration/search.md index 87e8e3ab..4640d63c 100644 --- a/docs/configuration/search.md +++ b/docs/configuration/search.md @@ -1,49 +1,37 @@ ## 🔍 Google API Keys Configuration -Note: -This section is optional. use the official google api if you are having issues with error 429 when running a google search. -To use the `google_official_search` command, you need to set up your Google API keys in your environment variables. +!!! note + This section is optional. Use the official Google API if search attempts return + error 429. To use the `google_official_search` command, you need to set up your + Google API key in your environment variables. Create your project: 1. Go to the [Google Cloud Console](https://console.cloud.google.com/). -2. If you don't already have an account, create one and log in. -3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". -4. Give it a name and click "Create". -Set up a custom search API and add to your .env file: -5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard). -6. Click "Enable APIs and Services". -7. Search for "Custom Search API" and click on it. -8. Click "Enable". -9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page. -10. Click "Create Credentials". -11. Choose "API Key". -12. Copy the API key. -13. Set it as an environment variable named `GOOGLE_API_KEY` on your machine (see how to set up environment variables below). -14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) -Set up a custom search engine and add to your .env file: -15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page. -16. Click "Add". -17. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. -18. Once you've created your search engine, click on "Control Panel". -19. Click "Basics". -20. Copy the "Search engine ID". -21. Set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine (see how to set up environment variables below). +2. If you don't already have an account, create one and log in +3. Create a new project by clicking on the *Select a Project* dropdown at the top of the + page and clicking *New Project* +4. Give it a name and click *Create* +5. Set up a custom search API and add to your .env file: + 5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard) + 6. Click *Enable APIs and Services* + 7. Search for *Custom Search API* and click on it + 8. Click *Enable* + 9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page + 10. Click *Create Credentials* + 11. Choose *API Key* + 12. Copy the API key + 13. Set it as the `GOOGLE_API_KEY` in your `.env` file +14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) + the Custom Search API on your project. (Might need to wait few minutes to propagate.) + Set up a custom search engine and add to your .env file: + 15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page + 16. Click *Add* + 17. Set up your search engine by following the prompts. + You can choose to search the entire web or specific sites + 18. Once you've created your search engine, click on *Control Panel* + 19. Click *Basics* + 20. Copy the *Search engine ID* + 21. Set it as the `CUSTOM_SEARCH_ENGINE_ID` in your `.env` file _Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._ - -### Setting up environment variables - -For Windows Users: - -``` -setx GOOGLE_API_KEY "YOUR_GOOGLE_API_KEY" -setx CUSTOM_SEARCH_ENGINE_ID "YOUR_CUSTOM_SEARCH_ENGINE_ID" -``` - -For macOS and Linux users: - -``` -export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" -export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" -``` diff --git a/docs/configuration/voice.md b/docs/configuration/voice.md index 8c9ab854..fcd487fd 100644 --- a/docs/configuration/voice.md +++ b/docs/configuration/voice.md @@ -1,4 +1,4 @@ -## Voice +# Text to Speech Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT @@ -6,24 +6,32 @@ Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT python -m autogpt --speak ``` -Eleven Labs provides voice technologies such as voice design, speech synthesis, and premade voices that Auto-GPT can use for speech. +Eleven Labs provides voice technologies such as voice design, speech synthesis, and +premade voices that Auto-GPT can use for speech. -1. Go to [Eleven Labs](https://beta.elevenlabs.io/) and make an account if you don't already have one. +1. Go to [ElevenLabs](https://beta.elevenlabs.io/) and make an account if you don't + already have one. 2. Choose and setup the `Starter` plan. 3. Click the top right icon and find "Profile" to locate your API Key. In the `.env` file set: + - `ELEVENLABS_API_KEY` - `ELEVENLABS_VOICE_1_ID` (example: _"premade/Adam"_) -### List of IDs with names from eleven labs. You can use the name or ID: +### List of available voices -- Rachel : 21m00Tcm4TlvDq8ikWAM -- Domi : AZnzlk1XvdvUeBnXmlld -- Bella : EXAVITQu4vr4xnSDxMaL -- Antoni : ErXwobaYiN019PkySvjV -- Elli : MF3mGyEYCl7XYWbV9V6O -- Josh : TxGEqnHWrfWFTfGW9XjX -- Arnold : VR6AewLTigWG4xSOukaG -- Adam : pNInz6obpgDQGcFmaJgB -- Sam : yoZ06aMxZJJ28mfd3POQ +!!! note + You can use either the name or the voice ID to configure a voice + +| Name | Voice ID | +| ------ | -------- | +| Rachel | `21m00Tcm4TlvDq8ikWAM` | +| Domi | `AZnzlk1XvdvUeBnXmlld` | +| Bella | `EXAVITQu4vr4xnSDxMaL` | +| Antoni | `ErXwobaYiN019PkySvjV` | +| Elli | `MF3mGyEYCl7XYWbV9V6O` | +| Josh | `TxGEqnHWrfWFTfGW9XjX` | +| Arnold | `VR6AewLTigWG4xSOukaG` | +| Adam | `pNInz6obpgDQGcFmaJgB` | +| Sam | `yoZ06aMxZJJ28mfd3POQ` | diff --git a/docs/installation.md b/docs/installation.md deleted file mode 100644 index 034814d6..00000000 --- a/docs/installation.md +++ /dev/null @@ -1,115 +0,0 @@ -# 💾 Installation - -## ⚠️ OpenAI API Keys Configuration - -Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys). - -To use OpenAI API key for Auto-GPT, you **NEED** to have billing set up (AKA paid account). - -You can set up paid account at [https://platform.openai.com/account/billing/overview](https://platform.openai.com/account/billing/overview). - -Important: It's highly recommended that you track your usage on [the Usage page](https://platform.openai.com/account/usage). -You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). - -![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./imgs/openai-api-key-billing-paid-account.png) - -**PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING. OTHERWISE, NOTHING WILL WORK!** - -## General setup - -1. Make sure you have one of the environments listed under [**requirements**](https://github.com/Significant-Gravitas/Auto-GPT#-requirements) set up. - - _To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter. Make sure you have [Git](https://git-scm.com/downloads) installed for your O/S._ - -2. Clone the repository using Git, or download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) (`Source code (zip)`, at the bottom of the page). - -``` shell - git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git -``` - -3. Navigate to the directory where you downloaded the repository. - -``` shell - cd Auto-GPT -``` - -5. Configure Auto-GPT: - 1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may be hidden by default in some operating systems due to the dot prefix. To reveal hidden files, follow the instructions for your specific operating system (e.g., in Windows, click on the "View" tab in File Explorer and check the "Hidden items" box; in macOS, press Cmd + Shift + .). - 2. Create a copy of this file and call it `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. - 3. Open the `.env` file in a text editor. - 4. Find the line that says `OPENAI_API_KEY=`. - 5. After the `"="`, enter your unique OpenAI API Key (without any quotes or spaces). - 6. Enter any other API keys or Tokens for services you would like to use. To activate and adjust a setting, remove the `# ` prefix. - 7. Save and close the `.env` file. - - You have now configured Auto-GPT. - - Notes: - - - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to get your OpenAI API key. - - Get your ElevenLabs API key from: [ElevenLabs](https://elevenlabs.io). You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps: - - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: - - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID - - `smart_llm_model_deployment_id` - your gpt-4 deployment ID - - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID - -``` shell -# Please specify all of these values as double-quoted strings -# Replace string in angled brackets (<>) to your own ID -azure_model_map: - fast_llm_model_deployment_id: "" - ... -``` -Details can be found here: [https://pypi.org/project/openai/](https://pypi.org/project/openai/) in the `Microsoft Azure Endpoints` section and here: [learn.microsoft.com](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line) for the embedding model. -If you're on Windows you may need to install [msvc-170](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). - -4. Follow the further instructions for running Auto-GPT with [Docker](#run-with-docker) (*recommended*), or [Docker-less](#run-docker-less) - -### Run with Docker - -Easiest is to run with `docker-compose`: -``` shell -docker-compose build auto-gpt -docker-compose run --rm auto-gpt -``` -By default, this will also start and attach a Redis memory backend. -For related settings, see [Memory > Redis setup](./configuration/memory.md#redis-setup). - -You can also build and run it with "vanilla" docker commands: -``` shell -docker build -t auto-gpt . -docker run -it --env-file=.env -v $PWD:/app auto-gpt -``` - -You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: -``` shell -docker-compose run --rm auto-gpt --gpt3only --continuous -``` -``` shell -docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous -``` - -Alternatively, you can pull the latest release directly from [Docker Hub](https://hub.docker.com/r/significantgravitas/auto-gpt) and run that: -``` shell -docker run -it --env OPENAI_API_KEY='your-key-here' --rm significantgravitas/auto-gpt -``` - -Or with `ai_settings.yml` presets mounted: -``` shell -docker run -it --env OPENAI_API_KEY='your-key-here' -v $PWD/ai_settings.yaml:/app/ai_settings.yaml --rm significantgravitas/auto-gpt -``` - - -### Run without Docker - -Simply run `./run.sh` (Linux/macOS) or `.\run.bat` (Windows) in your terminal. This will install any necessary Python packages and launch Auto-GPT. - -### Run with Dev Container - -1. Install the [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension in VS Code. - -2. Open command palette and type in Dev Containers: Open Folder in Container. - -3. Run `./run.sh`. - diff --git a/docs/setup.md b/docs/setup.md new file mode 100644 index 00000000..a5d0558c --- /dev/null +++ b/docs/setup.md @@ -0,0 +1,210 @@ +# Setting up Auto-GPT + +## 📋 Requirements + +Choose an environment to run Auto-GPT in (pick one): + + - [Docker](https://docs.docker.com/get-docker/) (*recommended*) + - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) + - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) + + +## 🗝️ Getting an API key + +Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys). + +!!! attention + To use the OpenAI API with Auto-GPT, we strongly recommend **setting up billing** + (AKA paid account). Free accounts are [limited][openai/api limits] to 3 API calls per + minute, which can cause the application to crash. + + You can set up a paid account at [Manage account > Billing > Overview](https://platform.openai.com/account/billing/overview). + +[openai/api limits]: https://platform.openai.com/docs/guides/rate-limits/overview#:~:text=Free%20trial%20users,RPM%0A40%2C000%20TPM + +!!! important + It's highly recommended that you keep keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage). + You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). + +![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./imgs/openai-api-key-billing-paid-account.png) + + +## Setting up Auto-GPT + +### Set up with Docker + +1. Make sure you have Docker installed, see [requirements](#requirements) +2. Pull the latest image from [Docker Hub] + + :::shell + docker pull significantgravitas/auto-gpt + +3. Create a folder for Auto-GPT +4. In the folder, create a file called `docker-compose.yml` with the following contents: + + :::yaml + version: "3.9" + services: + auto-gpt: + image: significantgravitas/auto-gpt + depends_on: + - redis + env_file: + - .env + environment: + MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} + REDIS_HOST: ${REDIS_HOST:-redis} + volumes: + - ./:/app + profiles: ["exclude-from-up"] + redis: + image: "redis/redis-stack-server:latest" + +5. Create the necessary [configuration](#configuration) files. If needed, you can find + templates in the [repository]. +6. Continue to [Run with Docker](#run-with-docker) + +[Docker Hub]: https://hub.docker.com/r/significantgravitas/auto-gpt +[repository]: https://github.com/Significant-Gravitas/Auto-GPT + + +### Set up with Git + +!!! important + Make sure you have [Git](https://git-scm.com/downloads) installed for your OS. + +!!! info + To execute the given commands, open a CMD, Bash, or Powershell window. + On Windows: press ++win+x++ and pick *Terminal*, or ++win+r++ and enter `cmd` + +1. Clone the repository + + :::shell + git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git + +2. Navigate to the directory where you downloaded the repository + + :::shell + cd Auto-GPT + + +### Set up without Git/Docker + +!!! warning + We recommend to use Git or Docker, to make updating easier. + +1. Download `Source code (zip)` from the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) +2. Extract the zip-file into a folder + + +### Configuration + +1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may + be hidden by default in some operating systems due to the dot prefix. To reveal + hidden files, follow the instructions for your specific operating system: + [Windows][show hidden files/Windows], [macOS][show hidden files/macOS]. +2. Create a copy of `.env.template` and call it `.env`; + if you're already in a command prompt/terminal window: `cp .env.template .env`. +3. Open the `.env` file in a text editor. +4. Find the line that says `OPENAI_API_KEY=`. +5. After the `=`, enter your unique OpenAI API Key *without any quotes or spaces*. +6. Enter any other API keys or tokens for services you would like to use. + + !!! note + To activate and adjust a setting, remove the `# ` prefix. + +7. Save and close the `.env` file. + +!!! info + Get your ElevenLabs API key from: [ElevenLabs](https://elevenlabs.io). You can view your xi-api-key using the "Profile" tab on the website. + +!!! info "Using a GPT Azure-instance" + If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and + make an Azure configuration file: + + - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: + - `fast_llm_model_deployment_id`: your gpt-3.5-turbo or gpt-4 deployment ID + - `smart_llm_model_deployment_id`: your gpt-4 deployment ID + - `embedding_model_deployment_id`: your text-embedding-ada-002 v2 deployment ID + + Example: + + :::yaml + # Please specify all of these values as double-quoted strings + # Replace string in angled brackets (<>) to your own ID + azure_model_map: + fast_llm_model_deployment_id: "" + ... + + Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model. + If you're on Windows you may need to install an [MSVC library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). + +[show hidden files/Windows]: https://support.microsoft.com/en-us/windows/view-hidden-files-and-folders-in-windows-97fbc472-c603-9d90-91d0-1166d1d9f4b5 +[show hidden files/macOS]: https://www.pcmag.com/how-to/how-to-access-your-macs-hidden-files +[openai-python docs]: https://github.com/openai/openai-python#microsoft-azure-endpoints +[Azure OpenAI docs]: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line + + +## Running Auto-GPT + +### Run with Docker + +Easiest is to use `docker-compose`. Run the commands below in your Auto-GPT folder. + +1. Build the image. If you have pulled the image from Docker Hub, skip this step. + + :::shell + docker-compose build auto-gpt + +2. Run Auto-GPT + + :::shell + docker-compose run --rm auto-gpt + + By default, this will also start and attach a Redis memory backend. If you do not + want this, comment or remove the `depends: - redis` and `redis:` sections from + `docker-compose.yml`. + + For related settings, see [Memory > Redis setup](./configuration/memory.md#redis-setup). + +You can pass extra arguments, e.g. running with `--gpt3only` and `--continuous`: +``` shell +docker-compose run --rm auto-gpt --gpt3only --continuous +``` + +If you dare, you can also build and run it with "vanilla" docker commands: +``` shell +docker build -t auto-gpt . +docker run -it --env-file=.env -v $PWD:/app auto-gpt +docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous +``` + +[docker-compose file]: https://github.com/Significant-Gravitas/Auto-GPT/blob/stable/docker-compose.yml + + +### Run with Dev Container + +1. Install the [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension in VS Code. + +2. Open command palette with ++f1++ and type `Dev Containers: Open Folder in Container`. + +3. Run `./run.sh`. + + +### Run without Docker + +Simply run the startup script in your terminal. This will install any necessary Python +packages and launch Auto-GPT. + +- On Linux/MacOS: + + :::shell + ./run.sh + +- On Windows: + + :::shell + .\run.bat + +If this gives errors, make sure you have a compatible Python version installed. See also +the [requirements](./installation.md#requirements). diff --git a/docs/testing.md b/docs/testing.md index d87c9acd..47cbecaf 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -1,39 +1,46 @@ -## Run tests +# Running tests -To run all tests, run the following command: +To run all tests, use the following command: -``` -pytest +``` shell +pytest ``` -To run just without integration tests: - -``` -pytest --without-integration +If `pytest` is not found: +``` shell +python -m pytest ``` -To run just without slow integration tests: +### Running specific test suites -``` -pytest --without-slow-integration +- To run without integration tests: + + :::shell + pytest --without-integration + +- To run without *slow* integration tests: + + :::shell + pytest --without-slow-integration + +- To run tests and see coverage: + + :::shell + pytest --cov=autogpt --without-integration --without-slow-integration + +## Runing the linter + +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. +We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. +See the [flake8 rules](https://www.flake8rules.com/) for more information. + +To run the linter: + +``` shell +flake8 . ``` -To run tests and see coverage, run the following command: - +Or: +``` shell +python -m flake8 . ``` -pytest --cov=autogpt --without-integration --without-slow-integration -``` - -## Run linter - -This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information. - -To run the linter, run the following command: - -``` -flake8 autogpt/ tests/ - -# Or, if you want to run flake8 with the same configuration as the CI: - -flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 -``` \ No newline at end of file diff --git a/docs/usage.md b/docs/usage.md index 8a1bb63a..80fa7985 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -1,67 +1,48 @@ # Usage -Open a terminal and run the startup script: - -- On Linux/MacOS: - -``` shell -./run.sh -``` - -- On Windows: - -``` shell -.\run.bat -``` - -- Using Docker: - -``` shell -docker-compose run --rm auto-gpt -``` - +## Command Line Arguments Running with `--help` lists all the possible command line arguments you can pass: ``` shell -./run.sh --help +./run.sh --help # on Linux / macOS -# or with docker -docker-compose run --rm auto-gpt --help +.\run.bat --help # on Windows ``` -2. After each response from Auto-GPT, choose from the options to authorize command(s), -exit the program, or provide feedback to the AI. - 1. Authorize a single command by entering `y` - 2. Authorize a series of _N_ continuous commands by entering `y -N`. For example, entering `y -10` would run 10 automatic iterations. - 3. Enter any free text to give feedback to Auto-GPT. - 4. Exit the program by entering `n` +!!! info + For use with Docker, replace the script in the examples with + `docker-compose run --rm auto-gpt`: + :::shell + docker-compose run --rm auto-gpt --help + docker-compose run --rm auto-gpt --ai-settings + +!!! note + Replace anything in angled brackets (<>) to a value you want to specify -## Command Line Arguments Here are some common arguments you can use when running Auto-GPT: -> Replace anything in angled brackets (<>) to a value you want to specify -* View all available command line arguments -``` shell -python -m autogpt --help -``` * Run Auto-GPT with a different AI Settings file -``` shell -python -m autogpt --ai-settings -``` + + :::shell + ./run.sh --ai-settings + * Specify a memory backend -``` shell -python -m autogpt --use-memory -``` -> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python -m autogpt --help` for more information + :::shell + ./run.sh --use-memory -### Speak Mode + +!!! note + There are shorthands for some of these flags, for example `-m` for `--use-memory`. + Use `./run.sh --help` for more information. + +### Speak Mode Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT -``` -python -m autogpt --speak +``` shell +./run.sh --speak ``` ### 💀 Continuous Mode ⚠️ @@ -71,34 +52,38 @@ Continuous mode is NOT recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorize. Use at your own risk. -1. Run the `autogpt` python module in your terminal: - ``` shell -python -m autogpt --continuous +./run.sh --continuous ``` - -2. To exit the program, press Ctrl + C +To exit the program, press ++ctrl+c++ ### ♻️ Self-Feedback Mode ⚠️ Running Self-Feedback will **INCREASE** token use and thus cost more. This feature enables the agent to provide self-feedback by verifying its own actions and checking if they align with its current goals. If not, it will provide better feedback for the next loop. To enable this feature for the current loop, input `S` into the input field. -### GPT3.5 ONLY Mode +### GPT-3.5 ONLY Mode -If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! +If you don't have access to GPT-4, this mode allows you to use Auto-GPT! ``` shell -python -m autogpt --gpt3only +./run.sh --gpt3only ``` -### GPT4 ONLY Mode +You can achieve the same by setting `SMART_LLM_MODEL` in `.env` to `gpt-3.5-turbo`. -If you do have access to the GPT4 api, this mode will allow you to use Auto-GPT solely using the GPT-4 API for increased intelligence (and cost!) +### GPT-4 ONLY Mode + +If you have access to GPT-4, this mode allows you to use Auto-GPT solely with GPT-4. +This may give your bot increased intelligence. ``` shell -python -m autogpt --gpt4only +./run.sh --gpt4only ``` +!!! warning + Since GPT-4 is more expensive to use, running Auto-GPT in GPT-4-only mode will + increase your API costs. + ## Logs Activity and error logs are located in the `./output/logs` @@ -106,5 +91,5 @@ Activity and error logs are located in the `./output/logs` To print out debug logs: ``` shell -python -m autogpt --debug +./run.sh --debug ``` diff --git a/mkdocs.yml b/mkdocs.yml index 0b743e91..856a9d62 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,20 +1,27 @@ site_name: Auto-GPT -site_url: https://github.com/Significant-Gravitas/Auto-GPT +site_url: https://significantgravitas.github.io/Auto-GPT/ repo_url: https://github.com/Significant-Gravitas/Auto-GPT nav: - - Home: index.md - - Installation: installation.md - - Usage: usage.md - - Plugins: plugins.md - - Testing: testing.md - - Configuration: - - Search: configuration/search.md - - Memory: configuration/memory.md - - Voice: configuration/voice.md - - Image Generation: configuration/imagegen.md + - Home: index.md + - Setup: setup.md + - Usage: usage.md + - Plugins: plugins.md + - Configuration: + - Search: configuration/search.md + - Memory: configuration/memory.md + - Voice: configuration/voice.md + - Image Generation: configuration/imagegen.md + - Contributing: + - Contribution guide: contributing.md + - Running tests: testing.md - Code of Conduct: code-of-conduct.md - - Contributing: contributing.md - - License: LICENSE + + - License: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/LICENSE theme: readthedocs + +markdown_extensions: + admonition: + codehilite: + pymdownx.keys: diff --git a/requirements.txt b/requirements.txt index b7df2636..98530511 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,6 +34,7 @@ isort gitpython==3.1.31 auto-gpt-plugin-template mkdocs +pymdown-extensions # OpenAI and Generic plugins import From 83f11465f51e5d1776c719a0e12c343188731ec4 Mon Sep 17 00:00:00 2001 From: James Collins Date: Wed, 26 Apr 2023 12:07:28 -0700 Subject: [PATCH 159/222] Clean up image generation tests (#3338) --- tests/test_image_gen.py | 160 ++++++++++++++++++++-------------------- 1 file changed, 82 insertions(+), 78 deletions(-) diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index 7e7ae58e..c009296d 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -1,95 +1,99 @@ +import functools import hashlib from pathlib import Path +import pytest from PIL import Image from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui from tests.utils import requires_api_key +@pytest.fixture(params=[256, 512, 1024]) +def image_size(request): + """Parametrize image size.""" + return request.param + + +@requires_api_key("OPENAI_API_KEY") +def test_dalle(config, workspace, image_size): + """Test DALL-E image generation.""" + generate_and_validate( + config, + workspace, + image_provider="dalle", + image_size=image_size, + ) + + +@requires_api_key("HUGGINGFACE_API_TOKEN") +@pytest.mark.parametrize( + "image_model", + ["CompVis/stable-diffusion-v1-4", "stabilityai/stable-diffusion-2-1"], +) +def test_huggingface(config, workspace, image_size, image_model): + """Test HuggingFace image generation.""" + generate_and_validate( + config, + workspace, + image_provider="huggingface", + image_size=image_size, + hugging_face_image_model=image_model, + ) + + +@pytest.mark.xfail(reason="SD WebUI call does not work.") +def test_sd_webui(config, workspace, image_size): + """Test SD WebUI image generation.""" + generate_and_validate( + config, + workspace, + image_provider="sd_webui", + image_size=image_size, + ) + + +@pytest.mark.xfail(reason="SD WebUI call does not work.") +def test_sd_webui_negative_prompt(config, workspace, image_size): + gen_image = functools.partial( + generate_image_with_sd_webui, + prompt="astronaut riding a horse", + size=image_size, + extra={"seed": 123}, + ) + + # Generate an image with a negative prompt + image_path = lst(gen_image(negative_prompt="horse", filename="negative.jpg")) + with Image.open(image_path) as img: + neg_image_hash = hashlib.md5(img.tobytes()).hexdigest() + + # Generate an image without a negative prompt + image_path = lst(gen_image(filename="positive.jpg")) + with Image.open(image_path) as img: + image_hash = hashlib.md5(img.tobytes()).hexdigest() + + assert image_hash != neg_image_hash + + def lst(txt): """Extract the file path from the output of `generate_image()`""" return Path(txt.split(":")[1].strip()) -@requires_api_key("OPENAI_API_KEY") -def test_dalle(request, config, workspace): - """Test DALL-E image generation.""" - config.image_provider = "dalle" +def generate_and_validate( + config, + workspace, + image_size, + image_provider, + hugging_face_image_model=None, + **kwargs, +): + """Generate an image and validate the output.""" + config.image_provider = image_provider + config.huggingface_image_model = hugging_face_image_model + prompt = "astronaut riding a horse" - # Test using size 256 - image_path = lst(generate_image("astronaut riding a horse", 256)) + image_path = lst(generate_image(prompt, image_size, **kwargs)) assert image_path.exists() with Image.open(image_path) as img: - assert img.size == (256, 256) - image_path.unlink() - - # Test using size 512 - image_path = lst(generate_image("astronaut riding a horse", 512)) - with Image.open(image_path) as img: - assert img.size == (512, 512) - image_path.unlink() - - -@requires_api_key("HUGGINGFACE_API_TOKEN") -def test_huggingface(request, config, workspace): - """Test HuggingFace image generation.""" - config.image_provider = "huggingface" - - # Test using SD 1.4 model and size 512 - config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - image_path = lst(generate_image("astronaut riding a horse", 512)) - assert image_path.exists() - with Image.open(image_path) as img: - assert img.size == (512, 512) - image_path.unlink() - - # Test using SD 2.1 768 model and size 768 - config.huggingface_image_model = "stabilityai/stable-diffusion-2-1" - image_path = lst(generate_image("astronaut riding a horse", 768)) - with Image.open(image_path) as img: - assert img.size == (768, 768) - image_path.unlink() - - -def test_sd_webui(config, workspace): - """Test SD WebUI image generation.""" - config.image_provider = "sd_webui" - return - - # Test using size 128 - image_path = lst(generate_image_with_sd_webui("astronaut riding a horse", 128)) - assert image_path.exists() - with Image.open(image_path) as img: - assert img.size == (128, 128) - image_path.unlink() - - # Test using size 64 and negative prompt - result = lst( - generate_image_with_sd_webui( - "astronaut riding a horse", - negative_prompt="horse", - size=64, - extra={"seed": 123}, - ) - ) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - assert img.size == (64, 64) - - neg_image_hash = hashlib.md5(img.tobytes()).hexdigest() - image_path.unlink() - - # Same test as above but without the negative prompt - result = lst( - generate_image_with_sd_webui( - "astronaut riding a horse", image_size=64, size=1, extra={"seed": 123} - ) - ) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - assert img.size == (64, 64) - image_hash = hashlib.md5(img.tobytes()).hexdigest() - image_path.unlink() - - assert image_hash != neg_image_hash + assert img.size == (image_size, image_size) From a0cfdb0830ebecf0ab88f8c6812239b19529b51c Mon Sep 17 00:00:00 2001 From: karlivory <5895483+karlivory@users.noreply.github.com> Date: Wed, 26 Apr 2023 22:18:12 +0300 Subject: [PATCH 160/222] fix set_total_budget docstring (#3288) --- autogpt/api_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/api_manager.py b/autogpt/api_manager.py index 52b94ff7..b16f3af7 100644 --- a/autogpt/api_manager.py +++ b/autogpt/api_manager.py @@ -87,7 +87,7 @@ class ApiManager(metaclass=Singleton): Sets the total user-defined budget for API calls. Args: - prompt_tokens (int): The number of tokens used in the prompt. + total_budget (float): The total budget for API calls. """ self.total_budget = total_budget From cd8fdb31efef2d6b755011f491863a17eead1bdd Mon Sep 17 00:00:00 2001 From: WladBlank Date: Wed, 26 Apr 2023 22:08:39 +0200 Subject: [PATCH 161/222] Chat plugin capability (#2929) Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> --- .env.template | 6 +++++ autogpt/agent/agent.py | 30 ++++++++++++++++++++---- autogpt/config/config.py | 2 ++ autogpt/logs.py | 3 +++ autogpt/utils.py | 50 ++++++++++++++++++++++++++++++++++++++-- 5 files changed, 84 insertions(+), 7 deletions(-) diff --git a/.env.template b/.env.template index 1d9eefb3..3d2746c4 100644 --- a/.env.template +++ b/.env.template @@ -201,3 +201,9 @@ OPENAI_API_KEY=your-openai-api-key #ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) ALLOWLISTED_PLUGINS= + +################################################################################ +### CHAT PLUGIN SETTINGS +################################################################################ +# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False) +# CHAT_MESSAGES_ENABLED=False diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 395d00ae..6dd5918b 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -9,7 +9,7 @@ from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner -from autogpt.utils import clean_input +from autogpt.utils import clean_input, send_chat_message_to_user from autogpt.workspace import Workspace @@ -84,8 +84,11 @@ class Agent: logger.typewriter_log( "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}" ) + send_chat_message_to_user( + f"Continuous Limit Reached: \n {cfg.continuous_limit}" + ) break - + send_chat_message_to_user("Thinking... \n") # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( @@ -114,6 +117,8 @@ class Agent: command_name, arguments = get_command(assistant_reply_json) if cfg.speak_mode: say_text(f"I want to execute {command_name}") + + send_chat_message_to_user("Thinking... \n") arguments = self._resolve_pathlike_command_args(arguments) except Exception as e: @@ -123,6 +128,11 @@ class Agent: # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit + self.user_input = "" + send_chat_message_to_user( + "NEXT ACTION: \n " + f"COMMAND = {command_name} \n " + f"ARGUMENTS = {arguments}" + ) logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, @@ -136,9 +146,13 @@ class Agent: flush=True, ) while True: - console_input = clean_input( - Fore.MAGENTA + "Input:" + Style.RESET_ALL - ) + console_input = "" + if cfg.chat_messages_enabled: + console_input = clean_input("Waiting for your response...") + else: + console_input = clean_input( + Fore.MAGENTA + "Input:" + Style.RESET_ALL + ) if console_input.lower().strip() == "y": user_input = "GENERATE NEXT COMMAND JSON" break @@ -193,10 +207,16 @@ class Agent: "", ) elif user_input == "EXIT": + send_chat_message_to_user("Exiting...") print("Exiting...", flush=True) break else: # Print command + send_chat_message_to_user( + "NEXT ACTION: \n " + f"COMMAND = {command_name} \n " + f"ARGUMENTS = {arguments}" + ) + logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 97ab2d58..c0afaeb1 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -61,6 +61,8 @@ class Config(metaclass=Singleton): self.use_mac_os_tts = False self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") + self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True" + self.use_brian_tts = False self.use_brian_tts = os.getenv("USE_BRIAN_TTS") diff --git a/autogpt/logs.py b/autogpt/logs.py index 3e231d8d..fc529f25 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -12,6 +12,7 @@ from colorama import Fore, Style from autogpt.singleton import Singleton from autogpt.speech import say_text +from autogpt.utils import send_chat_message_to_user class Logger(metaclass=Singleton): @@ -84,6 +85,8 @@ class Logger(metaclass=Singleton): if speak_text and self.speak_mode: say_text(f"{title}. {content}") + send_chat_message_to_user(f"{title}. {content}") + if content: if isinstance(content, list): content = " ".join(content) diff --git a/autogpt/utils.py b/autogpt/utils.py index c8553eaf..17be611a 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -11,10 +11,56 @@ try: except: pass +from autogpt.config import Config -def clean_input(prompt: str = ""): + +def send_chat_message_to_user(report: str): + cfg = Config() + if not cfg.chat_messages_enabled: + return + for plugin in cfg.plugins: + if not hasattr(plugin, "can_handle_report"): + continue + if not plugin.can_handle_report(): + continue + plugin.report(report) + + +def clean_input(prompt: str = "", talk=False): try: - return input(prompt) + cfg = Config() + if cfg.chat_messages_enabled: + for plugin in cfg.plugins: + if not hasattr(plugin, "can_handle_user_input"): + continue + if not plugin.can_handle_user_input(user_input=prompt): + continue + plugin_response = plugin.user_input(user_input=prompt) + if not plugin_response: + continue + if plugin_response.lower() in [ + "yes", + "yeah", + "y", + "ok", + "okay", + "sure", + "alright", + ]: + return "y" + elif plugin_response.lower() in [ + "no", + "nope", + "n", + "negative", + ]: + return "n" + return plugin_response + + # ask for input, default when just pressing Enter is y + print("Asking user via keyboard...") + answer = input(prompt) + return answer except KeyboardInterrupt: print("You interrupted Auto-GPT") print("Quitting...") From 7a006afb17a3a6320af2a67e44490f03a47a5f84 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 26 Apr 2023 13:11:08 -0700 Subject: [PATCH 162/222] fix cassettes recording (#3342) --- tests/integration/conftest.py | 5 +++-- .../cassettes/test_write_file/test_write_file.yaml | 4 ---- tests/integration/goal_oriented/test_write_file.py | 7 ++++--- tests/vcr/openai_filter.py | 6 ++++++ 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 3c2e02b3..4534bce8 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,6 +1,6 @@ import pytest -from tests.vcr.openai_filter import before_record_request +from tests.vcr.openai_filter import before_record_request, before_record_response @pytest.fixture @@ -9,8 +9,9 @@ def vcr_config(): return { "record_mode": "new_episodes", "before_record_request": before_record_request, + "before_record_response": before_record_response, "filter_headers": [ - "authorization", + "Authorization", "X-OpenAI-Client-User-Agent", "User-Agent", ], diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml index 1a5bba5c..482042f8 100644 --- a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -86,8 +86,6 @@ interactions: - Wed, 26 Apr 2023 16:40:03 GMT Server: - cloudflare - Transfer-Encoding: - - chunked access-control-allow-origin: - '*' alt-svc: @@ -213,8 +211,6 @@ interactions: - Wed, 26 Apr 2023 16:40:20 GMT Server: - cloudflare - Transfer-Encoding: - - chunked access-control-allow-origin: - '*' alt-svc: diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index 92ebbc56..739b0f20 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -50,6 +50,9 @@ def create_writer_agent(workspace): ], ) ai_config.command_registry = command_registry + CFG.set_continuous_mode(True) + CFG.set_memory_backend("no_memory") + CFG.set_temperature(0) memory = get_memory(CFG, init=True) triggering_prompt = ( "Determine which next command to use, and respond using the" @@ -68,9 +71,7 @@ def create_writer_agent(workspace): triggering_prompt=triggering_prompt, workspace_directory=workspace.root, ) - CFG.set_continuous_mode(True) - CFG.set_memory_backend("no_memory") - CFG.set_temperature(0) + os.environ["TIKTOKEN_CACHE_DIR"] = "" return agent diff --git a/tests/vcr/openai_filter.py b/tests/vcr/openai_filter.py index 56a7eaec..b18b694b 100644 --- a/tests/vcr/openai_filter.py +++ b/tests/vcr/openai_filter.py @@ -26,6 +26,12 @@ def replace_timestamp_in_request(request): return request +def before_record_response(response): + if "Transfer-Encoding" in response["headers"]: + del response["headers"]["Transfer-Encoding"] + return response + + def before_record_request(request): filtered_request = filter_hostnames(request) filtered_request_without_dynamic_data = replace_timestamp_in_request( From bfaf36099e76067e625286e55d765c2096f82710 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 27 Apr 2023 01:43:21 +0200 Subject: [PATCH 163/222] Fix(workspace) root resolution (#3365) --- autogpt/workspace/workspace.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index 91d2140e..9165bd96 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -112,7 +112,7 @@ class Workspace: if root is None: return Path(relative_path).resolve() - root, relative_path = Path(root), Path(relative_path) + root, relative_path = Path(root).resolve(), Path(relative_path) if relative_path.is_absolute(): raise ValueError( From a3195d84d3be0160c408c94f9870ae42123843a0 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 26 Apr 2023 17:55:02 -0700 Subject: [PATCH 164/222] remove do nothing (#3369) --- autogpt/agent/agent.py | 41 +++--- autogpt/app.py | 2 - autogpt/prompts/generator.py | 2 +- autogpt/prompts/prompt.py | 1 - .../test_write_file/test_write_file.yaml | 120 +++++++++--------- 5 files changed, 79 insertions(+), 87 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 6dd5918b..9b48d540 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -252,29 +252,26 @@ class Agent: result = plugin.post_command(command_name, result) if self.next_action_count > 0: self.next_action_count -= 1 - if command_name != "do_nothing": - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " - f"\nResult: {result} " - f"\nHuman Feedback: {user_input} " + memory_to_add = ( + f"Assistant Reply: {assistant_reply} " + f"\nResult: {result} " + f"\nHuman Feedback: {user_input} " + ) + + self.memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + self.full_message_history.append(create_chat_message("system", result)) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) + else: + self.full_message_history.append( + create_chat_message("system", "Unable to execute command") + ) + logger.typewriter_log( + "SYSTEM: ", Fore.YELLOW, "Unable to execute command" ) - - self.memory.add(memory_to_add) - - # Check if there's a result from the command append it to the message - # history - if result is not None: - self.full_message_history.append( - create_chat_message("system", result) - ) - logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) - else: - self.full_message_history.append( - create_chat_message("system", "Unable to execute command") - ) - logger.typewriter_log( - "SYSTEM: ", Fore.YELLOW, "Unable to execute command" - ) def _resolve_pathlike_command_args(self, command_args): if "directory" in command_args and command_args["directory"] in {"", "/"}: diff --git a/autogpt/app.py b/autogpt/app.py index 6bcc851b..3bbf6581 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -119,8 +119,6 @@ def execute_command( # TODO: Change these to take in a file rather than pasted code, if # non-file is given, return instructions "Input should be a python # filepath, write your code to file and try again - elif command_name == "do_nothing": - return f"Error: No action performed. Reason: {arguments['reason']}" elif command_name == "task_complete": shutdown() else: diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py index 282b9d72..2ae9123b 100644 --- a/autogpt/prompts/generator.py +++ b/autogpt/prompts/generator.py @@ -127,7 +127,7 @@ class PromptGenerator: for item in self.command_registry.commands.values() if item.enabled ] - # These are the commands that are added manually, do_nothing and terminate + # terminate command is added manually command_strings += [self._generate_command_string(item) for item in items] return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings)) else: diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index 68d8a336..b20a1757 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -39,7 +39,6 @@ def build_default_prompt_generator() -> PromptGenerator: # Define the command list commands = [ - ("Do Nothing", "do_nothing", {"reason": ""}), ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), ] diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml index 482042f8..7d21c210 100644 --- a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -23,17 +23,16 @@ interactions: \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \"reason\": - \"\"\n13. Task Complete (Shutdown): \"task_complete\", args: \"reason\": - \"\"\n\nResources:\n1. Internet access for searches and information - gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered Agents for delegation - of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. Continuously - review and analyze your actions to ensure you are performing to the best of - your abilities.\n2. Constructively self-criticize your big-picture behavior - constantly.\n3. Reflect on past decisions and strategies to refine your approach.\n4. - Every command has a cost, so be smart and efficient. Aim to complete tasks in - the least number of steps.\n5. Write all code to a file.\n\nYou should only - respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say @@ -43,7 +42,7 @@ interactions: time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past:\n\n\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 3205}' + above:"}], "temperature": 0, "max_tokens": 3223}' headers: Accept: - '*/*' @@ -52,7 +51,7 @@ interactions: Connection: - keep-alive Content-Length: - - '3554' + - '3490' Content-Type: - application/json method: POST @@ -60,20 +59,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7yTQW/aQBCF7/0Vo7lwMQgIKeBbemmjtlIPtFFUV2hZD/aW9Y6zOy5IyP+9MjYh - ccWpVa/zxu99zx4f0aQYo86V6KK0w/lSr+6eZl8W04P6fNh8evdkH2x59/jx/exbhhHy5idp6Z4Y - aS5KS2LYYYTakxJKMZ68XUxvp/PJYhZhwSlZjDErZXgzuh1K5Tc8HN+MJxhhFVRGGB+x9FyUshbe - kQsYz5fTCC/ez/PJch6hsCj7PFoulnWEOmejKWD8/YgFhbOtZ0sYowrBBFFOGkh2Qq4pcEwcAECC - knOV5RISjKEbdgIdpBkmeA97Yy1UgUBygr03Qmvh9dZYAs1FoVwKwq0Agw9kLcMDe5sOwDhhUHBa - daqgFAZ5o6/3jT6SgwxGCUYvgz2pwM64rE1f5QQZKwsm/E0IrK6iN8Y5QcFBQJWl59IbJRd9yx4k - b7ZU2PVpS6tcCzqEr//pDWlvxGgTivP3cUQnf3Kh8g2DEriHXP1qeTR7T1pA+awqyEnoKl0h7ceF - ktTu+ik0wf+uaBtcR+f77Ez/OM/GoWV61aGHrnzWv+xWaHYvFj2OVyb9v+FFpzPtibgDT1yNdYRb - 40zI1+0tY4xBuMQIjUvpgPG4/lG/+Q0AAP//AwCiz6SNfQQAAA== + H4sIAAAAAAAAA7yTT2/TQBDF73yK0VxycSI3f0jjWwUHisQFAQVhFG3tib3NesfdHSutIn93tLbT + tEY9gbjOG7/3m9nxEXWOCWalkqyqzXS9ubt53H+68u9+FPeON2vzcb654m/v4/vP3zFCvr2jTIYv + ZhlXtSHRbDHCzJESyjG5eHs5X62Wi/llhBXnZDDBopbpYraaSuNueRov4guMsPGqIEyOWDuuatkK + 78l6TNbrZYRn76f6PI4jFBZlnkqb9bKNMCtZZ+Qx+XnEivzJ1rEhTFB5r70oKwGSrZANAxxTCwCQ + opTcFKX4FBMYioNADxKKKV7DQRsDjSeQkuDgtNBWeLvThiDjqlI2B+FegMkHMobhhp3JJ6CtMCjo + Wq2qKIdJGfTtIegzeZDJLMXoebAj5dlqW/TpX0qCgpUB7f8iJIIO8lX8YF4SVOwFVF07rp1WQrBj + B1IGVfn9mLQ2yvaQU/j6n7aTOS060746vY0l6vzJ+sYFBiVwDaqCxmtbdEwZO0eZnHOGZSjp5PDS + YQGBUMie2s3jONvXpPav30Sg+HdT98FtdDrUwfSPOw0OPdOL1Y/QlSvGJ94LofdsMeJ4YTL+LZ7N + dKLtiAfw1LbYRrjTVvty2x81JuiFa4xQ25weMInbX+2b3wAAAP//AwAxpIznhgQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be04376c97296c3-SJC + - 7be2da273c20ce54-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -83,7 +82,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 26 Apr 2023 16:40:03 GMT + - Thu, 27 Apr 2023 00:12:22 GMT Server: - cloudflare access-control-allow-origin: @@ -95,7 +94,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '18864' + - '14714' openai-version: - '2020-10-01' strict-transport-security: @@ -107,7 +106,7 @@ interactions: x-ratelimit-reset-requests: - 17ms x-request-id: - - 77d98331ebd579dd929c8de10a83a035 + - 23366aac1ddcc141fc735b4a1a8b268c status: code: 200 message: OK @@ -135,17 +134,16 @@ interactions: \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Do Nothing: \"do_nothing\", args: \"reason\": - \"\"\n13. Task Complete (Shutdown): \"task_complete\", args: \"reason\": - \"\"\n\nResources:\n1. Internet access for searches and information - gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered Agents for delegation - of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. Continuously - review and analyze your actions to ensure you are performing to the best of - your abilities.\n2. Constructively self-criticize your big-picture behavior - constantly.\n3. Reflect on past decisions and strategies to refine your approach.\n4. - Every command has a cost, so be smart and efficient. Aim to complete tasks in - the least number of steps.\n5. Write all code to a file.\n\nYou should only - respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say @@ -158,17 +156,18 @@ interactions: above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I will use the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write - ''Hello World'' into a file named ''hello_world.txt''. The write_to_file command - is the most appropriate command for this task.\",\n \"plan\": \"- Use + ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file + command is the most appropriate for this task.\",\n \"plan\": \"- Use the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\": - \"I need to ensure that I have the correct arguments for the write_to_file command.\",\n \"speak\": - \"I will use the write to file command to write ''Hello World'' into a file - named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": - \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": - \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command - write_to_file returned: File written to successfully."}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 2955}' + \"I need to ensure that I am using the correct file name and that the text is + written correctly.\",\n \"speak\": \"I will use the write to file command + to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 2970}' headers: Accept: - '*/*' @@ -177,7 +176,7 @@ interactions: Connection: - keep-alive Content-Length: - - '4667' + - '4612' Content-Type: - application/json method: POST @@ -185,20 +184,19 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA6SS3W7aQBCF7/sUo7nhxiDApaR+gsBNG4n0R3WENuvB3rLesXbHgQr53SvHJlAS - pEi9nTNz5pvdc0CTYYK6UKLLyg7nn/Xq29OCfqrFF728i5c/vtq7eJHH98vdLUbIj79JSz8x0lxW - lsSwwwi1JyWUYTL5dDOdTefT8SzCkjOymGBeyTAezYZS+0cejuPxBCOsg8oJkwNWnstK1sJbcgGT - yTiOIzyZn4TZxwiFRdlTaXIzbyLUBRtNAZNfBywpHI09W8IEVQgmiHLSYrITcu0Jh9QBAKQoBdd5 - ISHFBPpiL9Be2mKKC9gZa6EOBFIQiArbdQ9IoLkslctAGF5qx65RitG5pScV2BmXd76rgiBnZcGE - N8eBN7DzRozLYXBL1jJ8Z2+zARgnDAo2xhI4VVIGg6LV17tWH8leBiNYXWVt9xUEJQcBVVWeK2/U - mb5hD1K0XW8cUVnlOv4h3P/3k+j2Pm1CeXxqR/Q8SS7Uvp1TAgso1FPnodl70gLK53VJTkIPe4Xh - cl2oSG2v/iq894TOsomOIerbX2Wo/Ztu2z90F1DK55fxO89LH5ZzvgxCrTWFsKmt/fMC9AzVs6Wu - wSbCjXEmFOvOCRMMwhVGaFxGe0zGzUPz4S8AAAD//wMARSTZRwcEAAA= + H4sIAAAAAAAAA6SSQW8TMRCF7/yK0ZydKkuSttkbAiGFA+JQBIKtItc7Wbvx2pZnFoqi/e9om00T + QishcZ3x++bNPO/Q1ViisVpMm/zkann/9b3JZnm1XMnHmzfLd7b4IrL6xB++vUWF8e6ejIyKCxPb + 5ElcDKjQZNJCNZbF5fXrxWI+my8UtrEmjyU2SSazi8VEunwXJ9PZtECFHeuGsNxhyrFNspa4pcBY + FtPiWuERfmzMLhVKFO2PpWIx7xUaG50hxvL7DlviAzhHT1iiZnYsOshgMwahMKywqwIAQIViY9dY + 4QpLGItjgx5kKFa4gp/Oe+iYQCyBaN6uR4MEJratDjVIhKfa4dVFheoUmUlzDC40e+6NJWii9uD4 + WbmCR/CLIweZJWgjC+iUckzZaSHYxAxih+4zHpLXYT9+Ap//eyOTnTjjuD1cKhA9KilwlwedFliB + 1T/oCXZcCbgzhpg3nfe/ztGcSG9fDAD+1e4e2atD3uPzv+IOuqX9tD+ucWZK5+b8p5xGO+Z66q8+ + X/Ko7EdvVeixV7hxwbFd70lYIktMqNCFmh6wnPa3/avfAAAA//8DAKZ6myqyAwAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be043fc3bba96c3-SJC + - 7be2da943bd5ce54-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -208,7 +206,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 26 Apr 2023 16:40:20 GMT + - Thu, 27 Apr 2023 00:12:36 GMT Server: - cloudflare access-control-allow-origin: @@ -220,7 +218,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '15127' + - '10505' openai-version: - '2020-10-01' strict-transport-security: @@ -232,7 +230,7 @@ interactions: x-ratelimit-reset-requests: - 17ms x-request-id: - - 468c3c77efd74e3a2d396ef6033a5602 + - f593fff7d415793b53e5124283ad25c9 status: code: 200 message: OK From 3b56716a680fe447ae8d8e3b0cd4c3446d30efcf Mon Sep 17 00:00:00 2001 From: Eddie Cohen <31940792+edcohen08@users.noreply.github.com> Date: Wed, 26 Apr 2023 21:20:15 -0400 Subject: [PATCH 165/222] Hotfix/validate url strips query params (#3370) * reconstruct url in sanitize * tests for url validation --------- Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> --- autogpt/url_utils/validators.py | 4 ++- tests/unit/test_url_validation.py | 59 +++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 tests/unit/test_url_validation.py diff --git a/autogpt/url_utils/validators.py b/autogpt/url_utils/validators.py index 8754667c..c85a00ba 100644 --- a/autogpt/url_utils/validators.py +++ b/autogpt/url_utils/validators.py @@ -61,7 +61,9 @@ def sanitize_url(url: str) -> str: Returns: str: The sanitized URL """ - return urljoin(url, urlparse(url).path) + parsed_url = urlparse(url) + reconstructed_url = f"{parsed_url.path}{parsed_url.params}?{parsed_url.query}" + return urljoin(url, reconstructed_url) def check_local_file_access(url: str) -> bool: diff --git a/tests/unit/test_url_validation.py b/tests/unit/test_url_validation.py new file mode 100644 index 00000000..2bbd3f9e --- /dev/null +++ b/tests/unit/test_url_validation.py @@ -0,0 +1,59 @@ +import pytest +from pytest import raises + +from autogpt.url_utils.validators import validate_url + + +@validate_url +def dummy_method(url): + return url + + +successful_test_data = ( + ("https://google.com/search?query=abc"), + ("https://google.com/search?query=abc&p=123"), + ("http://google.com/"), + ("http://a.lot.of.domain.net/param1/param2"), +) + + +@pytest.mark.parametrize("url", successful_test_data) +def test_url_validation_succeeds(url): + assert dummy_method(url) == url + + +bad_protocol_data = ( + ("htt://example.com"), + ("httppp://example.com"), + (" https://example.com"), +) + + +@pytest.mark.parametrize("url", bad_protocol_data) +def test_url_validation_fails_bad_protocol(url): + with raises(ValueError, match="Invalid URL format"): + dummy_method(url) + + +missing_loc = (("http://?query=q"),) + + +@pytest.mark.parametrize("url", missing_loc) +def test_url_validation_fails_bad_protocol(url): + with raises(ValueError, match="Missing Scheme or Network location"): + dummy_method(url) + + +local_file = ( + ("http://localhost"), + ("https://localhost/"), + ("http://2130706433"), + ("https://2130706433"), + ("http://127.0.0.1/"), +) + + +@pytest.mark.parametrize("url", local_file) +def test_url_validation_fails_local_path(url): + with raises(ValueError, match="Access to local files is restricted"): + dummy_method(url) From 02f546d2bc9cb0744da9badab7d1fe5f51ac1446 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 26 Apr 2023 18:45:03 -0700 Subject: [PATCH 166/222] Run the integration tests in the CI pipeline BUT without API keys (#3359) * integration tests in ci pipeline * Update CONTRIBUTING.md Co-authored-by: Reinier van der Leer --------- Co-authored-by: Reinier van der Leer --- .github/workflows/ci.yml | 2 - CONTRIBUTING.md | 15 + tests/integration/__init__.py | 0 .../test_local_cache/test_get_relevant.yaml | 500 ++++++++++++++++++ ...t_generate_aiconfig_automatic_default.yaml | 93 ++++ ..._generate_aiconfig_automatic_fallback.yaml | 87 +++ ...t_generate_aiconfig_automatic_typical.yaml | 97 ++++ tests/{ => integration}/test_local_cache.py | 1 + tests/integration/test_setup.py | 78 +++ tests/test_image_gen.py | 6 + tests/unit/test_commands.py | 1 + tests/unit/test_setup.py | 78 --- tests/utils.py | 23 +- 13 files changed, 897 insertions(+), 84 deletions(-) create mode 100644 tests/integration/__init__.py create mode 100644 tests/integration/cassettes/test_local_cache/test_get_relevant.yaml create mode 100644 tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml create mode 100644 tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml create mode 100644 tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml rename tests/{ => integration}/test_local_cache.py (99%) create mode 100644 tests/integration/test_setup.py delete mode 100644 tests/unit/test_setup.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2815bb13..ede81c2c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -72,8 +72,6 @@ jobs: - name: Run unittest tests with coverage run: | pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 10043ecb..1ac320bd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -105,3 +105,18 @@ To increase coverage if necessary, [write tests using `pytest`]. For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/). [write tests using `pytest`]: https://realpython.com/pytest-python-testing/ + + +In Pytest, we use VCRpy. It's a package that allows us to save OpenAI and other API providers' responses. +When you run Pytest locally: + +- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required. +- If the prompt changes in a way that the cassettes are not reusable: + - If no API key, the test fails. It requires a new cassette. So, add an API key to .env. + - If the API key is present, the tests will make a real call to OpenAI. + - If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR. + - If the test is unsuccessful: + - Either: Your change made Auto-GPT less capable, in that case, you have to change your code. + - Or: The test might be poorly written. In that case, you can make suggestions to change the test. + +In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break. diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml b/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml new file mode 100644 index 00000000..8bd34929 --- /dev/null +++ b/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml @@ -0,0 +1,500 @@ +interactions: +- request: + body: '{"input": ["Sample text 1"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x + ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0 + Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071 + gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq + P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl + ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0 + HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN + SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5 + yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M + G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf + +fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI + dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv + pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA + S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF + o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx + WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0 + jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt + fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp + 3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG + MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB + /v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0 + DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK + fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps + HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz + mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd + PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl + zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN + nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7 + 6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW + tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep + aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj + 7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB + jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu + KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM + /uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7 + zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO + fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB + vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k + VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD + GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U + fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU + p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai + cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8 + Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP + ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi + MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf + uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg + CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC + tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX + baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ + qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ + ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf + zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq + pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89 + nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB + 4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr + Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/ + l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt + NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f + bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8 + jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl + 74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK + 1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY + mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy + rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn + eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn + IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq + uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71 + j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst + 6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K + H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19 + /awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl + s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f + WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK + mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ + 7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk + tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju + TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO + urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M + dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C + b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G + 5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF + bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f + ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc + 614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e + mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86 + iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ + G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR + aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO + mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm + AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M + H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr + T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz + iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4 + 5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj + l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81 + NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k + Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf + D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii + 7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh + JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3 + 01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy + 4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H + BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T + bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n + 4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+ + 6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ + QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2 + 4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be211a83807169e-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:55:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '311' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 665023d2f6ce38148bbb6db93c66bb82 + status: + code: 200 + message: OK +- request: + body: '{"input": ["Sample text 2"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6W9O6vJbn/f4Ub723zi4BkYR9x0nOJCiKOjU1BYgICMghAdLV331K/7t6um+e + Cx6EWmHld1r5j3/89dffXVrl2fT3v/76+12O09//63vtkUzJ3//663//46+//vrrP35//8edeZPm + j0fZFr/bf/8s20e+/P2vv7j/uvL/b/rXX3+PA19Q5yVYKZuW7ix1zX5Dre0xABNeX2e5CviZms81 + 0sd7il0Y1XxL8bzt9YkEuIBShzOyRHOYzvviHcJ7sOcQ3y4LY7wDOJB7rY9VVxe80ffjEppne8WO + dMf6qAmbEKjHViZygrSUlXZ5hkrRC1Q1PAKIs0ENOOJnhbbCbklZthVjyDYSILtj1nsLFTY3sL0J + B4qj+w5Mwno0ZPNq6jRQYBPR7XlW5Jfmd9jdEDNdYL3fwPQcytRA5MCWtxfYQDIuBtWG5KbPqXAZ + oXZmL1S/RSdlt4tygm53wNSQhjKaHq3fQLFe3vhg3a/6sr5LDv7eb0GlTKduVVYJ3bwzql1cgrVQ + +wSeQs2mRhJZw+dsKTc44scbq3tzieZtznzoDlpBHUfGw8yu7Ax68/lAgiK+PCadphkYu7JGrNF4 + j5TMVaD9sCV6YXFUdYEQ3KA2Nhh774dXkSZWbzB6XB3sBTUDK6lEAX4cN8XeoxMrdpy8G0hl2mAD + 33iwuscxgzgSUsLhG8+WxAhCCdHPQg2nQMPaPlYXdst1okqf0GEq2SrI19exxepbGyuWCkIJHHQ7 + YV/lzYhNSxHDodEgPtz8MBpVSWqgfEkQqQ7eNVpfwzGE8SoX1DRVzhsDVSqlOn3qRAyrXp97pPhy + kSU6NQYVRuQ1HE+gUDcX6itXGbBHFyMoZ+KAxGyn6Fx492eYXbMXtt7bI1vTSY/h3EUFPvgtGdYy + pSI4xNUTFQJ5D+vrWfvw4oEKuw7ZDlTcXV3I53mCA/RRKq4vzhDeNuKeBs/rJVpdEMXQ/CgYu6dT + AxYZdSLEzm2kVvyWdeJadgbFpLewtjmI1SRA3oSH7jlQ97HrqklY7wYIlFuNbfaA3vjxtzZEFGo4 + /X6PhdSfXuJ2/UKWJp3B8pT6BAofN8NurPHDLKJbAXfzPqAu6V/VXMHsBu+FfcLGtz9X93Es5f33 + kfFD8hgJ4g+EbuzoWLG5x8DOSbPC6LRP0ZzzExg5nrehv8mvBJrnTzTVenaDXsEfsGXcn2Cmk462 + 2+3lQR1e8nVKr8kG9jO6YTUzzIpHvVhDfeRK7HAvC8zZ+VrD8pL41M52irfwn+cJMCoqBHwgqvpY + S0/wvVYKWd6aX7F8CRVY12BCIjk73rfeG4wO3EidZitFJG+iGbw01KFV2N7BuufPBO5R8SRiK0rR + HO9DQcYhcEnz8K7VwqaohhoNML7f2EMnRFIlKeFPGkU78xUx5TiW+0UxMXWybvYovZ4ghCfUU1z4 + R31h+L5Kd2VnYmMwi2rdvRkC2/VEEUBR53VVo8fg9mwnxBM7S+mNaQbQzssLBzfTYXxlhURmpmMi + iTl1Nas0KPYwXgTs99kJrEPfJbB1o5nsvZgxqjyODRwz4Y1xuCsqZu0/MdSXlOJDXTTReNQYgc0G + mVRxT43ODvBcQysTW+wIHyXlvTcw4X3CCsbH4zNtrRIjEGeZRfNSvkVD/u4h0D7dhv6+z64yiALG + g7ojaapC9u13+0995dNr0lm8cSX81oOazvxEbM3CHFqHwx4f9I8d8Xn6LAH/hiJGFOTRrKEwh6+5 + pGjPvVq2dAnpgJCPE8WdWuvr8TrYcG1djxq1Uno0xE4DuSnQqHK+P9OZdyINXiTcU1WTr188NRpA + SvQhUbM46dxEzQjFEN2xtb+bwzIddQFeayT+9mtFL8aFwI3CDHqQy6PH1PvjBA/x60nt4DFE6/JQ + QzkWXws12FuvFlFyBHi75CG2LVMBs1h/aniiukaD81EAq35NOGgL7xqRYC6GZbjYNvykZksPaal4 + fF91OfhQaGANL8dhed7GE7RmppC1nztvWcwrBPvd6YJmL917ZOuAGYAV5EQ2D/u0171pheGcf9CK + blM0tfZRgm3SqOR9NI8DcVjfAJyfHKqnflvN7hpCGPI6Tw+t9QIr4oYVTkJVIjZslYEFxcMGJEkl + InbZ6w8egq357tD0wBBMPz6S1NHA0eVcRGuViRC6EV5x8Ga113nGNIMzlDqyGgPzvv13BrHV9Oij + bD8V2Qx7CY7sssHGMIcDEx73HsRfoBI2i1Ot56zrYGhSgrjdZY2YHvcKnELFxd/1HpYWjivw4ntM + M3OzTynadhpsCt7CyPHv0ZzUNgTHj5/TpEbMmz9SH++cej9gm09aNitHjwAx9O9oSd962sWvzSiV + BwsgTksoG5eTzIGLFV+pfXUPA5O4QQIhr/LY9wfGVisQFWBQypO5fGfVFI/rBjrTPcBO5DVs1OpP + AUcyL0iGKIqYVYWh9MVXfGLY06f3K0f7qxIX2HatNJrbdWng0G0CbD5XpncX/IEw0NUaccH2ODD7 + mUIQeTcZ7fg6qqaMHyXohPHzD5/OhxkRsJXmI1ZXZa0o9d4i/NaPT83xoK/XUXMhmrSFbMOdMsw/ + fG6v3Uy1scMVpZWiyU+0ztSQl5dHw/eph4tYMcL5Vh+t2jRIYHHxiI1oJ4JOYI7462eqZOiUdtXc + nkF9E3Sqq9IwsCx2MnjX5yvW46muxvObKbBXxSONK7vSJ2jONjRmzSL8SEV9xA/VhIkkRAS8H96w + 1PcpA473VDHeNe+K/N5/bKKAcLpj6BTitYCdkjTYfPXSMO0yW4ObS//CZvayBjZ0TQmlLsiIXPld + RL/6AWZMyLDyrDV97EdXk0zNb9CyHDfR1POyD1QMVWpzQzSwDng2fMrmDgGcpfqs3bgc1P2MsZ7D + OJqtvV3I4zU84/uniXX2VtcZ9BH3wP6zOaQrhucS5O+zRZ2bM+k0cA0Tvvn8SA9H/gDWbcvX8J6G + HFbF7BUtwactf/WQZYnLoX5fKw22ihsRiR8yne1HYMPEaQBWQGBX8/P+MOAeCg11UykDZJY3Z5iY + Q4+uqWOkwvZScMDbIAOjA5iGgeDeh57xcKnOobVi4f3dQN4vEQ2c9BORw/pKIHzhN9rY5Sdi+Z0X + Ydmeb9i/bR3wURa8AV6xOxC54/JqBgAIEHRPSl1zOui0uYQ1HOdx8+MrsNAxuQGdPkrEve5Nuvbe + pQBfvKCPr/5eP/o4S9v0eaBKEveA+LrvwvvtUlG3m2JvaWG9QtbeciQcTk/A5P0rgXO6f2OHVq+h + yAq/B189g5XLrmXzPbv58Bw55Ps8F6zdXRb+zTc3rR5IRQ8EHg+xifHySbwl1g0TQuGgYUN3an2U + NtQEL8FSyGcjnnQ29+IMC6zrhB+aGixi2RLI7/YGPpyBMdAksDpgEYFRM4Rvth6yMIYTE0saRHY1 + MKu6hZLIGQsCvker+fDYQ8lu6w/VlzZm882WEqnPTx5hnl2yD85u4c9fkPotflLCULcBeph12CZn + RydfvgHf/YEqa13BUj4+M6zqIaHaJfe9z+vcwp8+J9wtVaL1DoYeDldNpZ4aymzEB7IBRkgA6fjE + YutbGxrYXUKd2sM0pbO0aU1on6hPhn74iu/5icCS5Q6RKooBy2I1hwR2V6wl7dNbPJ3bwNTZIaqO + 9pCu55DV8JFcObIZ+5XNTt/5QFKJgZg48QPLAafsR+22I7A7S+k8Pl4j+OojspPFTzULmcjB42tU + 8RGVd71/a1UNr6+oRRvzLVckNIIz2I1nBck0lbx5oywJjNAZYf+zauAPn/300iGaAm++h5UNK5jb + SPIvaTRbU5RJj6Q/oM0J68N6GuwNJCgOkawEqzfX+5cNY15cqHNctWqKHNDB08MQMDpbnsfO85aT + ZvWxw7/9tu6vibhvxzKgf9bjcn+4UHF4n6z6Ix0YBsMIBdZ42NkrPVuG4ejD0K9Dqu+euTfIE3cG + ennjcDB/jgM7r7YEskBZ0fvmhyl98UYOn+XHQRy+XQBr23cMj/f9ierZRWLL3OUmvKw0QJfw+kiX + 5SGZ8Osv0XsD+HRh+DjLZrys2HlZkff++mcQ5mWGZPQpBtrHxxnOVnynNqdWw5qK801OdkDD6FjQ + dOqadAQWG49Iun2WiHXmHMLbxdvSb/9X0y0/dUCkOJp21/OSsr7gV0jXDNLk9jlG6+4NENiNKiVC + j1SwSJdbBjvutKFGFNjsq39cGO1MA2txoESrs0E12Fy6F/75HbqStw+uDS9Sa06LYdoqSi1/9x+1 + PheVMVrZClCv+ZOwy2ZOWZRXBkRchREI+7ViZYTiHz9SfdrZ3sxfFENm44Z+++lRsZYlCeD0+U6t + YkmGxXbrAmruZcWe79GhwXObQ241CzK/fTHq/RFocCBlTrGMAGvF3dPe82dt/PInHb7PuwFTfqc4 + OPapPtNXbIPEL3yKFrHXGdkfDTloAKTOKVurlYuPREqbwMG4YeOw3BvvBpFe20RCMGbT67JX4Ho6 + Eaot1jqsUhQ3MMXpk2y+zxtT4TFKm7aZENF4wqgjP2agP14x2itbZxjyuyzBZxwhjE+0ZeuWlZ30 + rgKOlHzDD7/1AnqZcKT86s353kAffOjAqA82hTfrsYqgKRj/xvPxYD4FgPTGJltX4sD4UsIVfnZZ + ifb5bFfzZIU9JN5tiw1p0KJpnU0B2kJbE+B7uFqs4LrC7NhuaeBF8zDVO3qCJzu4U8/nt8O0KeTu + p6+RHOnPlGwKX4C/vGa/1tBbWb1qsK73E4KuIeqE3Lz5p48xyvNLumg3r4OX2Qi++Cow4h7HHGaj + OlF0wtVQt45G4Ddf+vqNLqLuul9h6s9Pqu0I0RkXiDYkXrIl/JSP0azVnxL8+Nq3pmCg+WxuwOMz + KEhSnxMbQ+CKEumJRzVsqQPf2kdR0B9VTOZiuqQscH0TbPMkxqga/YqxWvXhZOOUqvdHzwZu2ENp + fV4y6mUbu5pHts7wMTbHP3yy8N+A4J77B2y5sZculXUb/9Rjl7RNSSQ/Rohv95ZasugMw2jtc7BZ + tYzs5lQZOE0QTuCXb1n6VgG74qp14HZxtkjgWVUtt9O7gRvo7b/5yNlj3aH1oSA6V6xEoe8tp8Ig + EuoPBWrHOIsW3HAhBIV8Io10aoYZHxoIB0m8Y91R+nQtnciWo+Qu4r6W23SV/VaB1uHm4sc3H5sP + 5pOTrOerxQFQABs2/GSAr1/Dpoe9lEr5iYNizd5kd9jZEalvsIPfvJDw2/4zzLtDTqCyvjVqRrns + sWGzrUHUriJ6zS1Kp4ejrXAUIoyt7XEC/UXqczh57ojeQnaKFmWxIDy9NmfqxosD5vNSGLCp5hs2 + bqmS7qqmtqEvtjya7f2LzeCxKvJP7zmHfKz+5D2lu3lQt4yHYUROdYJvPjv+wUvyLqYE7lvZIez8 + 9r1VewkC/JjYRfL9SfRFcoYbdPP+je3VuDJCSMbBl/x+kOG0Dt4aC8QGJj6eqfuwuWjdsr6HPCcz + jDSeALJlfQfTnAzUmg7zsFy5isAuEc5U4bfHiGRegKR3VRyoqc12RGHjNZBahkk2eX6JaFBcbMja + JEdC2zY6u/JyDMVHZOFDc5sqcqmPifzLR4zjRR2E17NG4JFcOMQ/T4gNRrdZJW7CGtlv2yZdDw2M + gaxnGuGduGQL2ys9pJE3E3h48/o6n60cKP78oJZx34K5avSzHHAniIT1EVWrYC0J9Nbl+O987pfv + ZBjq2Jo4Aywm0DYwAMv9h3/V+IpfAoxgd6GHfhdW7NDoBTgMN0jtj7kwcrwOLtAWLyOPca8Btn8h + EzwOc4fN4iik4xF1K7z1akY17lEOsyqtNdzudgpF0LCq73qcwbrpO4w+91b/7o8afPEC8Zq3ZdRx + XHFvOP6DeupF05lVWj68EFehimoQ8Cdvha1p/fDf+6PvV3cNqRlzk0c9IudQvBKBOnvFBeN0nxIA + XeIjTmhyfT5YnQR//PDrT07e27X01fNf/Ms8Jl+r8I9f9rtzkk48b5wlphcmtTe3AEhqYppwb28Q + eXngkVKiyzbYOsX+u/5Pxm5MM0HT3G/Y9slVX/NV4WBiaD21T4M2LLV+TsBW1a+kEbmNTi/Eb+A3 + 3yKNW1x1FqtlBjMQ77HHPjZgMMmLn98iixZZ0eLvQwkeBz5Hb79FlTA7Qwb2vZjg4HTH6dy2RvHz + ixgZehPR4LK3QXQQRnSvsFetoIHJn3zH5Qf477wfXYozDsfTTie8YvegiF8vqo95xWbOR6EUjHP+ + p9+Zm157+M2bcHBtm2HW6lfx4wMCUjVj7MUbGYyflkUNd38CX39W/PQqNgUYDxNBtwYK53CLv/1f + ffX1CjXQulQTtnswajcug1Z7jqkOzkd9Nt8lkb55CQ1GF1Vrcn+X8MSkAmMUGmDBYYXkoYMBVp+X + Vf94OtwAo/xY37xQjXj9bMWgIDHG6ubcDaQDAMLhbMxUUZmuE/2aCNI8+AvGdDlVHD/cXbiOJCO7 + qrNS5mvRGXYN2Pwbr9bQrWHwvOfYZmURDddo54OttB7J/uZM3neecQZke7z++CRaCTlzMH/HFjJv + Wl1NBbJD8IoZR1VU3j2KrRsHF/HFCKcOacVu7UOB59jYEt6wh4FtIwLB2x9T+usn7mBehZ+fQEsg + fvQ/fHW1OPnrD9+ekOhZDgNdr7HhW260s6rwBOx9yX/5m09pFaYGfCTdgSrfPGVtu9wFwbjmRNJQ + 6X35xYXXvXHB5xEzNnqSQwCNDyp1hE+RjpckgBDGTKBKzgeMbR13s7+oq0k9fXdP2aHxCni/kIo6 + aSJWS0jeyj50ui395qdsXleFQP0Djth+vEJvjZEd/5mHAG/0U25zdXP409Nofr7BEngTARvk6tj2 + 3jqb/ZI28Ps+VFx2LZipOJ9gHwIfW1DR/sx3ZF7iLvRSF03ajOvdBaTefci2McRoiVhfwOvevOBA + ij0262MSgqO89NRx2+ew6GccA+8ouFSLgyJa3PlqgvBskD/zheXrzyX+rIx/5gtdbQR/9BzWw/nu + jec3UH7fk/qnCuhEstdG/s5LfvkBm/mLbQJO2dlUHT6dN2eXroc1Ss7UHXVTpwk9zLB/bCU0SLQd + xjXU6l/eSf2lXKrxZq83Ob/KFtrMzwPgzHdP4FU5FwjwrzmdQjJpgD8eUmyUepwuBVJOstJgiyor + LofdZb/voCVcj1j1gJx+8z5NynenCIFleQ2DcBwb8PVf1Pvm/eNhNslvPoLV95gNM7C6DaR8ElBU + S7VO5jPOwLmHGVZkzoiWGqvZrx6sLAVJv/MqDlbKohH5fIzB0igfG26QrdNLplbRPFEhBz88BXfx + w1YV9yYsicAToG5P+vjbj++i5r7zPNGb7PhYy8W6z8gbI/dPXgRvC+fRZ6ca+jolWw1883eypVoe + sfCd9NA5yU+Kglmpdms4JHuY9Tfsm5t7Shdb6qHjPVT6+FQr+Pz8JRC/XfrF/z/5tRQfQ6ojpFb8 + +i4FaGbdGStfPOieaG8CDbxdHIz7Evz0mSRcUUjKbaV6/OtMN8A1/N2Xz0qdgmXu5EUxMLa3Vum9 + el5GEu3UAPvnwgQfUXI4iAsUY2Pc2Pq6As+EQYsC1GBCopl/jsYvv6L27IfVgtdPDDUFXbDZiAsb + /WrugLwPG2yUaz9MWiWOf/ZLt3fjgb2FQ7c/SMTG7lx+9JXtzBl+uOqI5oExQDh37OHxfFGJwPwW + LJJTJbJb1CvNv/7nm1f4MC9vwne+W6bsFEvn3++xkfFbj5GXAGFwCVUaypyRLpvNNv/x30//DMQM + qxMYWYfxk2qb9Dc/A48zq8jwzW8moRk1uGseOfZdNkbjPlZPoNY3DckyKg5UUu4a5DvJwb5qnjy2 + e2sJ9O7nFqei7Xtr7O9CuT5tX188GdP5AMD6q5ca7XGqlnuj3+CnbA/Y2EPire/xlIPUX59fPFfS + OSuMDl7U2aTaN0+fc0uJ//SPfidbfX0So4T31cxxYHJBtZj0Iv6Zz6pqzfTFS0wNtmf6RGJJ22hh + e7uHhoMeSCpyIxX6q1uDZ9QhwuLJGPhhuCN4d+IOSb2ZpH2ZttJPL1KsDbpHIziKv/pR/XZ4nfm3 + MYGynmvUu4sOWKwd6n/4SP0tIX/4EP79OxXwn//466//8zth0HSP/P09GDDly/TP/zoq8M/kkfyT + 44R/UuHPSQQyJkX+97/+fQjh78/QNZ/p/05dnbfj3//6S/xz2uDvqZuS93+7/I/vu/7zH/8PAAD/ + /wMAVLQZa+EgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be211ab1afd169e-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:55:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '172' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 5b810c395d48248e473bc4e53de0eafa + status: + code: 200 + message: OK +- request: + body: '{"input": ["Sample text 1"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x + ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0 + Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071 + gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq + P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl + ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0 + HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN + SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5 + yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M + G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf + +fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI + dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv + pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA + S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF + o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx + WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0 + jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt + fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp + 3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG + MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB + /v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0 + DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK + fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps + HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz + mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd + PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl + zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN + nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7 + 6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW + tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep + aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj + 7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB + jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu + KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM + /uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7 + zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO + fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB + vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k + VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD + GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U + fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU + p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai + cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8 + Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP + ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi + MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf + uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg + CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC + tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX + baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ + qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ + ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf + zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq + pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89 + nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB + 4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr + Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/ + l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt + NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f + bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8 + jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl + 74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK + 1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY + mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy + rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn + eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn + IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq + uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71 + j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst + 6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K + H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19 + /awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl + s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f + WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK + mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ + 7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk + tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju + TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO + urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M + dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C + b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G + 5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF + bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f + ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc + 614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e + mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86 + iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ + G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR + aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO + mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm + AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M + H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr + T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz + iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4 + 5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj + l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81 + NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k + Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf + D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii + 7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh + JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3 + 01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy + 4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H + BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T + bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n + 4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+ + 6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ + QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2 + 4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be211ac8cdd169e-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:55:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '114' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - d39b785f954e1f1cc64746735c44b522 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml new file mode 100644 index 00000000..e8cbefa1 --- /dev/null +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml @@ -0,0 +1,93 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''Write a wikipedia style + article about the project: https://github.com/significant-gravitas/Auto-GPT''\nRespond + only with the output in the exact format specified in the system prompt, with + no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1671' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1xSS2/UMBC+8ytGvnBJVt3tgzZHoBQkXgdQKyiqHHuSTOvMpPZkt6uq/x3FWfrg + aHvme/rekDeVcZ1V1w+hfHPSffy0Hz6ff6sPls3p9cHtr4tv2y8/6e3FWWMKI/U1Ot1tLJz0Q0Al + YVMYF9EqelMtj45XhweHR0fHhenFYzCVaQct9xeHpY6xlnJvf29pCjMm26Kp7s0QpR/0SuUGOZlq + dbJfmCfsx/vl6qQwKmrD49XBavVQGNcJOUym+n1vekz/YKMENJWxKVFSyzqJFFbkycBX22MF53RD + Z99/XPJ7TC7SMPFVYBnsqMLSy5jAtsgKHhO1jB5UYEYEYtAOIRsnYZAm4w3oyZZJtwHBRiUXMEG9 + hSHKmjxxCxuJwZcu2JQA7waMSgknuIgJbXRdAZtIStwWYNlDI7G3Op0Xl3wmNqTqkkt4J+xHp6Cd + RBnb7nEdZFbW0hoZVAZyk2zkNEYE69wYrdtm6F3MyJjSZIB4JiPhxcRxHkkROmq78na0gXQLuwxB + O6tACZBb22atj8trnIVvMIQyaRydjhF9AY2EIJspg6xvJI+BGFOenkryNvr0Isks40PGzUu7SCcb + EnOcKk/TrxPskmf/DH/S5sKYx4coA0ZwpNnmzB2xwYjsMC3maEOwtUSrCBvSLjOPCeNERuwkDvOj + dkgRGkRfW3eToXp7g8Dopo8YtxBxTSnzqDw3kHlO50pylM/N9YiaXrhiUVtTLuC/2KYKArVUB5x+ + CgxjHcjNFZqHwjTElLqriDYJm8oklcEUhtjjnan2Hv48vPoLAAD//wMAbCMiNQcEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be206beca7ccfbc-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:47:58 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9927' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 575539561e8025c9a37920ffb4d1b354 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml new file mode 100644 index 00000000..6ddfe32a --- /dev/null +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml @@ -0,0 +1,87 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''T&GF\u00a3OIBECC()!*''\nRespond + only with the output in the exact format specified in the system prompt, with + no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1592' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0yOTUsDMRCG7/6KYS5esqUf1tqcFdSLHoqCIjKbne6mzWZCMusHpf9dCmp7e3m/ + eHboG7ToOlLXp1Atlt3t6unbtbebl5vVc/2wWmiYbbfu+n54QINSb9jp72LkpE+B1UtEgy4zKTdo + J5dX0/nF/HKxNNhLwwEttkmr2Whe6ZBrqcaz8QQNDoVaRrvDlKVP+q6y5VjQTq/GBo/f//5sYlBF + KZw4k71B14l3XNC+7rDn8veaJTBapFJ8UYp6YJSoHA/8d+c9FMn528AdOIpRFFKWD98wUAQZNA0K + n147GRQIPij4BpTKdgSPganwsX4Swloy9Awq0HLkTMpAKWVJ2R90KxQKUGyA4MBX1VS4gUg9j3Bv + cO2jL917ZioS0WJRSWjQx4a/0I73b/uzHwAAAP//AwBPrscAswEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be20701ea0a9669-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:48:01 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1833' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - c9bf165259547ec59a88bd16b5f691f2 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml new file mode 100644 index 00000000..980a0a10 --- /dev/null +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml @@ -0,0 +1,97 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''Help me create a rock opera + about cybernetic giraffes''\nRespond only with the output in the exact format + specified in the system prompt, with no explanation or conversation.\n"}], "temperature": + 0.0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1625' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4yU0W/bRgzG3/dXEPcsB04ad53ehg1riwFNthXYgGUo6BMlcT4dVZJy4gT534uT + ZWcr9rAXAZZ5Hz/++J2eAjehDrFHj8OYVt9+1//0y+XP797n6w/DbnvTdts/9reXwzr9Fj+GKsj2 + b4q+nLiIMoyJnCWHKkQldGpCffn6zdXmerPZXFVhkIZSqEM3+urVxWblk25ltX61vgxVmAw7CvVT + GFWG0T+57ChbqK/eXFfhRfvl/eXrKrg4pvOrzXr9XIXYC0eyUP/5FAayk6xKolAHNGNzzF5MSnbK + ZYAPOFANv97cvr39eJd/JIvKY2lXA2b4/j3M8/CeIEq2KRUB8B4dbKTImPiRDDhDT2nk3AGqs7kB + 5gaGyTgyZoOtlv+8J1aYMn+eCPZsLNnABRK3dLGYALajdMsRUzpAQ8ZdpqYUHmco7bynxZpkkBZU + 4g5kJEWrYFTZc1M60sNI6tBN3GCONLuyaRxFyxQqU9fL5LMaZWelcjaS2cVdfiuYrL7LK/hBUsKt + KDrBPXsPB5mKnYb2lGQEhLImSqm0NBc9JM50xBRx9EnJjj3MqNiQFuJhS5rJOULHim1LR2YzKQP2 + E5iTzyPNCtJBOVo1F+/ZJkzFbLF507akLwuzqevIfIZciluiZosF0xHff+jJnhRTKhCaKc5sXYCy + TbqMUw6+sC7Lotxhx7mrCkFSR87zr6JHgxSReY9KJhmzL2bfURpnjmhGwzYRIDimotGAEw4F0jlA + FRjnjrT4jC66+F2yoQb3vUDEvATtIJMuATtjPNYn3pMCQs9dv/o8YWI/wEjaig4lIYu72zlB9L+C + o9QTqmGai/6BbolSdb4bS2wK5ShDET2AbM0xpmX7X6NuOeN5HYU2gutEMKA56ch0dvy76O5f4Sxf + E3H6emNzxiZO5fn4CKgy5QbYDZQSoVEFiUoMujnMUu44DNQwlntFdq47RcZ7UhhQd+TlROwxZ0rz + vVbC2APCfUGJU8N0QtlRpvk60UNkp4GyX4TnKrSc2fpPSmiSQx3MZQxV4NzQQ6jXz389f/MFAAD/ + /wMAE8xLs6wFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be203e8793fcfa4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:46:11 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '19109' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 5fe22bc0f23ce6b48845f33187e1a19d + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_local_cache.py b/tests/integration/test_local_cache.py similarity index 99% rename from tests/test_local_cache.py rename to tests/integration/test_local_cache.py index 597f9714..5200e026 100644 --- a/tests/test_local_cache.py +++ b/tests/integration/test_local_cache.py @@ -89,6 +89,7 @@ def test_get(LocalCache, config, mock_embed_with_ada): assert cache.get("test") == ["test"] +@pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") def test_get_relevant(LocalCache, config) -> None: cache = LocalCache(config) diff --git a/tests/integration/test_setup.py b/tests/integration/test_setup.py new file mode 100644 index 00000000..b649bb14 --- /dev/null +++ b/tests/integration/test_setup.py @@ -0,0 +1,78 @@ +from unittest.mock import patch + +import pytest + +from autogpt.config.ai_config import AIConfig +from autogpt.setup import ( + generate_aiconfig_automatic, + generate_aiconfig_manual, + prompt_user, +) +from tests.utils import requires_api_key + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_generate_aiconfig_automatic_default(): + user_inputs = [""] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name is not None + assert ai_config.ai_role is not None + assert 1 <= len(ai_config.ai_goals) <= 5 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_generate_aiconfig_automatic_typical(): + user_prompt = "Help me create a rock opera about cybernetic giraffes" + ai_config = generate_aiconfig_automatic(user_prompt) + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name is not None + assert ai_config.ai_role is not None + assert 1 <= len(ai_config.ai_goals) <= 5 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_generate_aiconfig_automatic_fallback(): + user_inputs = [ + "T&GF£OIBECC()!*", + "Chef-GPT", + "an AI designed to browse bake a cake.", + "Purchase ingredients", + "Bake a cake", + "", + "", + ] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name == "Chef-GPT" + assert ai_config.ai_role == "an AI designed to browse bake a cake." + assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"] + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_prompt_user_manual_mode(): + user_inputs = [ + "--manual", + "Chef-GPT", + "an AI designed to browse bake a cake.", + "Purchase ingredients", + "Bake a cake", + "", + "", + ] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name == "Chef-GPT" + assert ai_config.ai_role == "an AI designed to browse bake a cake." + assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"] diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index c009296d..546cc758 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -15,6 +15,9 @@ def image_size(request): return request.param +@pytest.mark.xfail( + reason="The image is too big to be put in a cassette for a CI pipeline. We're looking into a solution." +) @requires_api_key("OPENAI_API_KEY") def test_dalle(config, workspace, image_size): """Test DALL-E image generation.""" @@ -26,6 +29,9 @@ def test_dalle(config, workspace, image_size): ) +@pytest.mark.xfail( + reason="The image is too big to be put in a cassette for a CI pipeline. We're looking into a solution." +) @requires_api_key("HUGGINGFACE_API_TOKEN") @pytest.mark.parametrize( "image_model", diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index 8596e1c9..e3b874fb 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -7,6 +7,7 @@ from autogpt.app import list_agents, start_agent from tests.utils import requires_api_key +@pytest.mark.vcr @pytest.mark.integration_test @requires_api_key("OPENAI_API_KEY") def test_make_agent() -> None: diff --git a/tests/unit/test_setup.py b/tests/unit/test_setup.py deleted file mode 100644 index 9b907fcf..00000000 --- a/tests/unit/test_setup.py +++ /dev/null @@ -1,78 +0,0 @@ -import unittest -from io import StringIO -from unittest.mock import patch - -from autogpt.config.ai_config import AIConfig -from autogpt.setup import ( - generate_aiconfig_automatic, - generate_aiconfig_manual, - prompt_user, -) -from tests.utils import requires_api_key - - -class TestAutoGPT(unittest.TestCase): - @requires_api_key("OPENAI_API_KEY") - def test_generate_aiconfig_automatic_default(self): - user_inputs = [""] - with patch("builtins.input", side_effect=user_inputs): - ai_config = prompt_user() - - self.assertIsInstance(ai_config, AIConfig) - self.assertIsNotNone(ai_config.ai_name) - self.assertIsNotNone(ai_config.ai_role) - self.assertGreaterEqual(len(ai_config.ai_goals), 1) - self.assertLessEqual(len(ai_config.ai_goals), 5) - - @requires_api_key("OPENAI_API_KEY") - def test_generate_aiconfig_automatic_typical(self): - user_prompt = "Help me create a rock opera about cybernetic giraffes" - ai_config = generate_aiconfig_automatic(user_prompt) - - self.assertIsInstance(ai_config, AIConfig) - self.assertIsNotNone(ai_config.ai_name) - self.assertIsNotNone(ai_config.ai_role) - self.assertGreaterEqual(len(ai_config.ai_goals), 1) - self.assertLessEqual(len(ai_config.ai_goals), 5) - - @requires_api_key("OPENAI_API_KEY") - def test_generate_aiconfig_automatic_fallback(self): - user_inputs = [ - "T&GF£OIBECC()!*", - "Chef-GPT", - "an AI designed to browse bake a cake.", - "Purchase ingredients", - "Bake a cake", - "", - "", - ] - with patch("builtins.input", side_effect=user_inputs): - ai_config = prompt_user() - - self.assertIsInstance(ai_config, AIConfig) - self.assertEqual(ai_config.ai_name, "Chef-GPT") - self.assertEqual(ai_config.ai_role, "an AI designed to browse bake a cake.") - self.assertEqual(ai_config.ai_goals, ["Purchase ingredients", "Bake a cake"]) - - @requires_api_key("OPENAI_API_KEY") - def test_prompt_user_manual_mode(self): - user_inputs = [ - "--manual", - "Chef-GPT", - "an AI designed to browse bake a cake.", - "Purchase ingredients", - "Bake a cake", - "", - "", - ] - with patch("builtins.input", side_effect=user_inputs): - ai_config = prompt_user() - - self.assertIsInstance(ai_config, AIConfig) - self.assertEqual(ai_config.ai_name, "Chef-GPT") - self.assertEqual(ai_config.ai_role, "an AI designed to browse bake a cake.") - self.assertEqual(ai_config.ai_goals, ["Purchase ingredients", "Bake a cake"]) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/utils.py b/tests/utils.py index 8c72f1fa..e0bc7460 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,17 +1,32 @@ import functools import os +from contextlib import contextmanager import pytest +from autogpt.config import Config + + +@contextmanager +def dummy_openai_api_key(): + # even when we record the VCR cassettes, openAI wants an API key + config = Config() + original_api_key = config.openai_api_key + config.set_openai_api_key("sk-dummy") + + try: + yield + finally: + config.set_openai_api_key(original_api_key) + def requires_api_key(env_var): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - if not os.environ.get(env_var): - pytest.skip( - f"Environment variable '{env_var}' is not set, skipping the test." - ) + if not os.environ.get(env_var) and env_var == "OPENAI_API_KEY": + with dummy_openai_api_key(): + return func(*args, **kwargs) else: return func(*args, **kwargs) From 76bd192f826fce718a6a32406331a7eb39e1641c Mon Sep 17 00:00:00 2001 From: Robin Richtsfeld Date: Thu, 27 Apr 2023 03:55:01 +0200 Subject: [PATCH 167/222] Set `vcr_config` scope to "session" (#3361) Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> --- tests/integration/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 4534bce8..a3c4da6e 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -3,7 +3,7 @@ import pytest from tests.vcr.openai_filter import before_record_request, before_record_response -@pytest.fixture +@pytest.fixture(scope="session") def vcr_config(): # this fixture is called by the pytest-recording vcr decorator. return { From 65b6c2706e1d6f97dcabd96cc195668bf39d2e9d Mon Sep 17 00:00:00 2001 From: chyezh Date: Thu, 27 Apr 2023 09:57:29 +0800 Subject: [PATCH 168/222] fix connection bug for zilliz uri on milvus (#3278) Co-authored-by: Reinier van der Leer Co-authored-by: merwanehamadi --- autogpt/memory/milvus.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index 085f50b4..41f9ed13 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -61,13 +61,13 @@ class MilvusMemory(MemoryProviderSingleton): if self.uri.startswith("https"): self.secure = True - # Zilliz Cloud requires AutoIndex. - if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.address) is not None: - self.index_params = { - "metric_type": "IP", - "index_type": "AUTOINDEX", - "params": {}, - } + # Zilliz Cloud requires AutoIndex. + if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.uri) is not None: + self.index_params = { + "metric_type": "IP", + "index_type": "AUTOINDEX", + "params": {}, + } def init_collection(self) -> None: """Initialize collection in vector database.""" From d8c16de1230232dcbc0f5c1cb9abd75c3f9d06d6 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 23:07:28 -0500 Subject: [PATCH 169/222] The unlooping and fixing of file execution. (#3368) * The unlooping and fixing of file execution. * lint * Use static random seed during testing. remove unused import. * Fix bug * Actually fix bug. * lint * Unloop a bit more an fix json. * Fix another bug. * lint. --------- Co-authored-by: merwanehamadi --- autogpt/__init__.py | 8 ++++++++ autogpt/chat.py | 18 ++++++++++++------ autogpt/commands/execute_code.py | 5 ++--- autogpt/json_utils/json_fix_llm.py | 19 +++++++++++++++++++ autogpt/prompts/prompt.py | 3 +++ autogpt/workspace/workspace.py | 8 ++++++++ 6 files changed, 52 insertions(+), 9 deletions(-) diff --git a/autogpt/__init__.py b/autogpt/__init__.py index 5f5b20ef..909f8bf4 100644 --- a/autogpt/__init__.py +++ b/autogpt/__init__.py @@ -1,5 +1,13 @@ +import os +import random +import sys + from dotenv import load_dotenv +if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"): + print("Setting random seed to 42") + random.seed(42) + # Load the users .env file into environment variables load_dotenv(verbose=True, override=True) diff --git a/autogpt/chat.py b/autogpt/chat.py index 4b906a00..469ec9bd 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -1,4 +1,5 @@ import time +from random import shuffle from openai.error import RateLimitError @@ -80,12 +81,17 @@ def chat_with_ai( logger.debug(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 - - relevant_memory = ( - "" - if len(full_message_history) == 0 - else permanent_memory.get_relevant(str(full_message_history[-9:]), 10) - ) + if len(full_message_history) == 0: + relevant_memory = "" + else: + recent_history = full_message_history[-5:] + shuffle(recent_history) + relevant_memories = permanent_memory.get_relevant( + str(recent_history), 5 + ) + if relevant_memories: + shuffle(relevant_memories) + relevant_memory = str(relevant_memories) logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index ca0586f4..590f95cc 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -1,6 +1,7 @@ """Execute code in a Docker container""" import os import subprocess +from pathlib import Path import docker from docker.errors import ImageNotFound @@ -40,7 +41,6 @@ def execute_python_file(filename: str) -> str: try: client = docker.from_env() - # You can replace this with the desired Python image/version # You can find available Python images on Docker Hub: # https://hub.docker.com/_/python @@ -60,10 +60,9 @@ def execute_python_file(filename: str) -> str: print(f"{status}: {progress}") elif status: print(status) - container = client.containers.run( image_name, - f"python {filename}", + f"python {Path(filename).relative_to(CFG.workspace_path)}", volumes={ CFG.workspace_path: { "bind": "/workspace", diff --git a/autogpt/json_utils/json_fix_llm.py b/autogpt/json_utils/json_fix_llm.py index 869aed12..10317acc 100644 --- a/autogpt/json_utils/json_fix_llm.py +++ b/autogpt/json_utils/json_fix_llm.py @@ -91,14 +91,33 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: Returns: str: The fixed JSON string. """ + assistant_reply = assistant_reply.strip() + if assistant_reply.startswith("```json"): + assistant_reply = assistant_reply[7:] + if assistant_reply.endswith("```"): + assistant_reply = assistant_reply[:-3] + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass + + if assistant_reply.startswith("json "): + assistant_reply = assistant_reply[5:] + assistant_reply = assistant_reply.strip() + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass # Parse and print Assistant response assistant_reply_json = fix_and_parse_json(assistant_reply) + logger.debug("Assistant reply JSON: %s", str(assistant_reply_json)) if assistant_reply_json == {}: assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( assistant_reply ) + logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json)) if assistant_reply_json != {}: return assistant_reply_json diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index b20a1757..1d169fe1 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -71,6 +71,9 @@ def build_default_prompt_generator() -> PromptGenerator: "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." ) + prompt_generator.add_performance_evaluation( + "If you cannot think of a valid command to perform start or message an agent to determine the next command." + ) prompt_generator.add_performance_evaluation("Write all code to a file.") return prompt_generator diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index 9165bd96..c1050230 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -11,6 +11,8 @@ from __future__ import annotations from pathlib import Path +from autogpt.logs import logger + class Workspace: """A class that represents a workspace for an AutoGPT agent.""" @@ -112,8 +114,12 @@ class Workspace: if root is None: return Path(relative_path).resolve() + logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") + root, relative_path = Path(root).resolve(), Path(relative_path) + logger.debug(f"Resolved root as '{root}'") + if relative_path.is_absolute(): raise ValueError( f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." @@ -121,6 +127,8 @@ class Workspace: full_path = root.joinpath(relative_path).resolve() + logger.debug(f"Joined paths as '{full_path}'") + if restrict_to_root and not full_path.is_relative_to(root): raise ValueError( f"Attempted to access path '{full_path}' outside of workspace '{root}'." From 7a161cc0bd8253759c8e7b15c0a3e3d86f0adbad Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 27 Apr 2023 15:28:18 +0200 Subject: [PATCH 170/222] Add .gitattributes (#3402) --- .gitattributes | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..b28dfc4d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,5 @@ +# Exclude VCR cassettes from stats +tests/**/cassettes/**.y*ml linguist-generated + +# Mark documentation as such +docs/**.md linguist-documentation From 9e17a304dee13e406a5678f263ac307fb4cc7bd0 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 27 Apr 2023 17:58:35 +0200 Subject: [PATCH 171/222] Minor improvements to the docs for voice config and testing (#3407) --- CONTRIBUTING.md | 17 ++++++++++++----- docs/configuration/voice.md | 4 ++-- docs/setup.md | 5 +---- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1ac320bd..e736a036 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -99,15 +99,22 @@ https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-labe ## Testing your changes If you add or change code, make sure the updated code is covered by tests. - -To increase coverage if necessary, [write tests using `pytest`]. +To increase coverage if necessary, [write tests using pytest]. For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/). -[write tests using `pytest`]: https://realpython.com/pytest-python-testing/ +[write tests using pytest]: https://realpython.com/pytest-python-testing/ + +### API-dependent tests + +To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known +requests and matching responses in so-called *cassettes*, allowing us to run the tests +in CI without needing actual API access. + +When changes cause a test prompt to be generated differently, it will likely miss the +cache and make a request to the API, updating the cassette with the new request+response. +*Be sure to include the updated cassette in your PR!* - -In Pytest, we use VCRpy. It's a package that allows us to save OpenAI and other API providers' responses. When you run Pytest locally: - If no prompt change: you will not consume API tokens because there are no new OpenAI calls required. diff --git a/docs/configuration/voice.md b/docs/configuration/voice.md index fcd487fd..728fbaf5 100644 --- a/docs/configuration/voice.md +++ b/docs/configuration/voice.md @@ -11,8 +11,8 @@ premade voices that Auto-GPT can use for speech. 1. Go to [ElevenLabs](https://beta.elevenlabs.io/) and make an account if you don't already have one. -2. Choose and setup the `Starter` plan. -3. Click the top right icon and find "Profile" to locate your API Key. +2. Choose and setup the *Starter* plan. +3. Click the top right icon and find *Profile* to locate your API Key. In the `.env` file set: diff --git a/docs/setup.md b/docs/setup.md index a5d0558c..7cbab43d 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -73,7 +73,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt !!! important Make sure you have [Git](https://git-scm.com/downloads) installed for your OS. -!!! info +!!! info "Executing commands" To execute the given commands, open a CMD, Bash, or Powershell window. On Windows: press ++win+x++ and pick *Terminal*, or ++win+r++ and enter `cmd` @@ -115,9 +115,6 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt 7. Save and close the `.env` file. -!!! info - Get your ElevenLabs API key from: [ElevenLabs](https://elevenlabs.io). You can view your xi-api-key using the "Profile" tab on the website. - !!! info "Using a GPT Azure-instance" If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and make an Azure configuration file: From 7cd76b8d8ecb0171d79b2970e035ce1c72e62500 Mon Sep 17 00:00:00 2001 From: Montana Flynn Date: Thu, 27 Apr 2023 23:12:24 +0700 Subject: [PATCH 172/222] Add makedirs to file operations (#3289) * Add makedirs to file operations * Add new directory tests for file operations * Fix wrong setUp test error * Simplify makedirs and use correct nested path * Fix linter error --------- Co-authored-by: Nicholas Tindle Co-authored-by: James Collins --- autogpt/commands/file_operations.py | 7 +++++-- tests/unit/test_file_operations.py | 19 +++++++++++-------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 9999fccf..05f06088 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -142,8 +142,7 @@ def write_to_file(filename: str, text: str) -> str: return "Error: File has already been updated." try: directory = os.path.dirname(filename) - if not os.path.exists(directory): - os.makedirs(directory) + os.makedirs(directory, exist_ok=True) with open(filename, "w", encoding="utf-8") as f: f.write(text) log_operation("write", filename) @@ -167,6 +166,8 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str: str: A message indicating success or failure """ try: + directory = os.path.dirname(filename) + os.makedirs(directory, exist_ok=True) with open(filename, "a") as f: f.write(text) @@ -236,6 +237,8 @@ def download_file(url, filename): filename (str): Filename to save the file as """ try: + directory = os.path.dirname(filename) + os.makedirs(directory, exist_ok=True) message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}" with Spinner(message) as spinner: session = requests.Session() diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index c3b20d0c..b2760671 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -33,6 +33,7 @@ class TestFileOperations(unittest.TestCase): self.test_file = str(self.workspace.get_path("test_file.txt")) self.test_file2 = "test_file2.txt" self.test_directory = str(self.workspace.get_path("test_directory")) + self.test_nested_file = str(self.workspace.get_path("nested/test_file.txt")) self.file_content = "This is a test file.\n" self.file_logger_logs = "file_logger.txt" @@ -69,21 +70,23 @@ class TestFileOperations(unittest.TestCase): def test_write_to_file(self): new_content = "This is new content.\n" - write_to_file(self.test_file, new_content) - with open(self.test_file, "r") as f: + write_to_file(self.test_nested_file, new_content) + with open(self.test_nested_file, "r") as f: content = f.read() self.assertEqual(content, new_content) def test_append_to_file(self): - with open(self.test_file, "r") as f: - content_before = f.read() - append_text = "This is appended text.\n" - append_to_file(self.test_file, append_text) - with open(self.test_file, "r") as f: + append_to_file(self.test_nested_file, append_text) + with open(self.test_nested_file, "r") as f: content = f.read() - self.assertEqual(content, content_before + append_text) + append_to_file(self.test_nested_file, append_text) + + with open(self.test_nested_file, "r") as f: + content_after = f.read() + + self.assertEqual(content_after, append_text + append_text) def test_delete_file(self): delete_file(self.test_file) From adbb47fb652adafa3f38510c9eb1145b35cb6ceb Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Thu, 27 Apr 2023 09:27:15 -0700 Subject: [PATCH 173/222] scrape text regression test (#3387) Co-authored-by: James Collins --- autogpt/main.py | 10 +- autogpt/prompts/prompt.py | 7 +- tests/integration/agent_factory.py | 87 ++ tests/integration/agent_utils.py | 10 + .../test_browse_website.yaml | 1375 +++++++++++++++++ .../goal_oriented/test_browse_website.py | 27 + .../goal_oriented/test_write_file.py | 64 +- tests/utils.py | 4 + 8 files changed, 1515 insertions(+), 69 deletions(-) create mode 100644 tests/integration/agent_factory.py create mode 100644 tests/integration/agent_utils.py create mode 100644 tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml create mode 100644 tests/integration/goal_oriented/test_browse_website.py diff --git a/autogpt/main.py b/autogpt/main.py index b3ffb01e..848a22a0 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -12,7 +12,7 @@ from autogpt.configurator import create_config from autogpt.logs import logger from autogpt.memory import get_memory from autogpt.plugins import scan_plugins -from autogpt.prompts.prompt import construct_main_ai_config +from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config from autogpt.utils import get_current_git_branch, get_latest_bulletin from autogpt.workspace import Workspace from scripts.install_plugin_deps import install_plugin_dependencies @@ -124,11 +124,7 @@ def run_auto_gpt( # Initialize variables full_message_history = [] next_action_count = 0 - # Make a constant: - triggering_prompt = ( - "Determine which next command to use, and respond using the" - " format specified above:" - ) + # Initialize memory and make sure it is empty. # this is particularly important for indexing and referencing pinecone memory memory = get_memory(cfg, init=True) @@ -148,7 +144,7 @@ def run_auto_gpt( command_registry=command_registry, config=ai_config, system_prompt=system_prompt, - triggering_prompt=triggering_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, workspace_directory=workspace_directory, ) agent.start_interaction_loop() diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index 1d169fe1..c1240157 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -10,6 +10,10 @@ from autogpt.utils import clean_input CFG = Config() +DEFAULT_TRIGGERING_PROMPT = ( + "Determine which next command to use, and respond using the format specified above:" +) + def build_default_prompt_generator() -> PromptGenerator: """ @@ -71,9 +75,6 @@ def build_default_prompt_generator() -> PromptGenerator: "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." ) - prompt_generator.add_performance_evaluation( - "If you cannot think of a valid command to perform start or message an agent to determine the next command." - ) prompt_generator.add_performance_evaluation("Write all code to a file.") return prompt_generator diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py new file mode 100644 index 00000000..2a88a7b1 --- /dev/null +++ b/tests/integration/agent_factory.py @@ -0,0 +1,87 @@ +import os + +from autogpt.agent import Agent +from autogpt.app import CFG +from autogpt.commands.command import CommandRegistry +from autogpt.config import AIConfig +from autogpt.memory import get_memory +from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT + + +def create_browser_agent(workspace): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.commands.web_selenium") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="browse_website-GPT", + ai_role="an AI designed to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer the question 'What is the price of the book?' and write the price to a file named \"browse_website.txt\", and use the task_complete command to complete the task.", + ai_goals=[ + "Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer the question 'What is the price of the book?'", + 'Write the price of the book to a file named "browse_website.txt".', + "Use the task_complete command to complete the task.", + "Do not use any other commands.", + ], + ) + ai_config.command_registry = command_registry + CFG.set_continuous_mode(True) + CFG.set_memory_backend("no_memory") + CFG.set_temperature(0) + + memory = get_memory(CFG, init=True) + system_prompt = ai_config.construct_full_prompt() + + agent = Agent( + ai_name="", + memory=memory, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace.root, + ) + + return agent + + +def create_writer_agent(workspace): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="write_to_file-GPT", + ai_role="an AI designed to use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\" and then use the task_complete command to complete the task.", + ai_goals=[ + "Use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\".", + "Use the task_complete command to complete the task.", + "Do not use any other commands.", + ], + ) + ai_config.command_registry = command_registry + CFG.set_continuous_mode(True) + CFG.set_memory_backend("no_memory") + CFG.set_temperature(0) + memory = get_memory(CFG, init=True) + triggering_prompt = ( + "Determine which next command to use, and respond using the" + " format specified above:" + ) + system_prompt = ai_config.construct_full_prompt() + + agent = Agent( + ai_name="", + memory=memory, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=triggering_prompt, + workspace_directory=workspace.root, + ) + + return agent diff --git a/tests/integration/agent_utils.py b/tests/integration/agent_utils.py new file mode 100644 index 00000000..3d03ef5e --- /dev/null +++ b/tests/integration/agent_utils.py @@ -0,0 +1,10 @@ +import concurrent + + +def run_interaction_loop(agent, timeout): + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit(agent.start_interaction_loop) + try: + result = future.result(timeout=timeout) + except concurrent.futures.TimeoutError: + assert False, f"The process took longer than {timeout} seconds to complete." diff --git a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml new file mode 100644 index 00000000..63da74c4 --- /dev/null +++ b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml @@ -0,0 +1,1375 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: + Read file, args: \"filename\": \"\"\n4. search_files: Search Files, + args: \"directory\": \"\"\n5. write_to_file: Write to file, args: + \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse + Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past:\n\n\n"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 3135}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3866' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7xTS2vbQBC+91cMc5bfxEl06aFQSCg9uAmhVMGsV2NpI+2OsjtyAsb/vaxl5eGQ + trTQ68zu95qZLZocU9SlEm2benB6zhf222ZRVZ8WslH68q51Xy+/X02IF2tMkFd3pOXwY6jZNjWJ + YYcJak9KKMd0Mj+bnpxO5/OzBC3nVGOKRSOD2fBkIK1f8WA8G08wwTaogjDdYuPZNrIUrsgFTM/m + 0wSfsZ/q09lpgsKi6qfSZHx+vktQl2w0BUx/bNFS6HE914QpqhBMEOUkqmQn5KKDbeYAADKUktui + lJBhCofioUGPEosZXsCDqWtoA4GUBCvPD4GWD7QKRgg0W6tcDsKwMcEIlCJNOhqtmKswFA7aq4Zi + WiOtRNVctDSylBtR0V9YzmYj43J6HJZia4hQaxPxSoLGG03A646XuRpmmLwU6UkFdsYVndKrkkBU + qMDTfWs8BbAUhb0PmOwJf2HLhK5LQYCbqDgCKl0a2sQ8TDjW1NTKdXIGcP0/MssyN4DP71rct298 + 5H3uRw+wNjWBU5byI4FDibN/5Up7I0abYPudcER7A+RC6yO0ErgAZaENxhV7Ks3ekxa4XnzZx3zf + UugT/PMZh4ZU9VebGN/0zd/uVUe5S/rDOKC9uYuYV6fmNf2RauWL45vqGq2vu+//NvOXbB1wn26H + fhPncVjeN3Y/9nb3lg/OM7fDXYJr40wol91lYYpBuMEE99SYjne3uw8/AQAA//8DAIk1DVg4BQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be499e66c20ced1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:18:11 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '23148' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 0c30c154a2ed5a2a8a31bff33d3448c3 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"Books + to Scrape We love being scraped! Home Books Philosophy Meditations Meditations + \u00a325.89 In stock (1 available) Warning! This is a demo website for web scraping + purposes. Prices and ratings here were randomly assigned and have no real meaning. + Product Description Written in Greek, without any intention of publication, + by the only Roman emperor who was also a philosopher, the Meditations of Marcus + Aurelius (AD 121-180) offer a remarkable series of challenging spiritual reflections + and exercises developed as the emperor struggled to understand himself and make + sense of the universe. Ranging from doubt and despair to conviction and ex Written + in Greek, without any intention of publication, by the only Roman emperor who + was also a philosopher, the Meditations of Marcus Aurelius (AD 121-180) offer + a remarkable series of challenging spiritual reflections and exercises developed + as the emperor struggled to understand himself and make sense of the universe. + Ranging from doubt and despair to conviction and exaltation, they cover such + diverse topics as the nature of moral virtue, human rationality, divine providence, + and Marcus'' own emotions. But while the Meditations were composed to provide + personal consolation and encouragement, in developing his beliefs Marcus Aurelius + also created one of the greatest of all works of philosophy: a timeless collection + of extended meditations and short aphorisms that has been consulted and admired + by statesmen, thinkers and readers through the centuries. ...more Product Information + UPC4f19709e47883df5 Product TypeBooks Price (excl. tax)\u00a325.89 Price (incl. + tax)\u00a325.89 Tax\u00a30.00 Availability In stock (1 available) Number of + reviews 0 Products you recently viewed The Nicomachean Ethics \u00a336.34 In + stock Add to basket Run, Spot, Run: The ... \u00a320.02 In stock Add to basket + Critique of Pure Reason \u00a320.75 In stock Add to basket At The Existentialist + Caf\u00e9: ... \u00a329.93 In stock Add to basket Kierkegaard: A Christian Missionary + ... \u00a347.13 In stock Add to basket Proofs of God: Classical ... \u00a354.21 + In stock Add to basket\"\"\" Using the above text, answer the following question: + \"What is the price of the book?\" -- if the question cannot be answered using + the text, summarize the text."}], "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2407' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOwWrCQBRF9/2KcNeTkKgxZvYFKbiwFbqoRSaTpxlN5qWZZ2mRfE3/pF9WhNZu + D9xzzwWuhoZtjNiub+Oi5IdlcTiv5X29ahtqnu6Xb4+b4+eseF5CgasjWfldJJa7viVx7KFgBzJC + NXQ2X0zyYjIvS4WOa2qhceglniZ5LOeh4jidphkUzsEcCPqCfuCul53wiXyAni1yhX/3jWdzBWEx + 7Y3kaTYq2IadpQD9ckFH4c86cEvQMCG4IMbLtZG9kL/2bxqK+sFZingfSUNRxXyKtlhR7cRcb8MW + kQvR99ckTxZlglFh77wLzW4gE9hDIwj3UHC+pg/odHwd734AAAD//wMAj2F8NVEBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49aa96fbbced1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:18:22 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '3508' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 5d1aace4f6a0d761311cb9123e585128 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"The + price of the book \"Meditations\" is \u00a325.89.\"\"\" Using the above text, + answer the following question: \"What is the price of the book?\" -- if the + question cannot be answered using the text, summarize the text."}], "temperature": + 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '338' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOwWrCQBRF9/2K4a4nIWpjdLbFTSGrumlrkXHyNKPJvCHzLILka/on/bIitHZ7 + 4J57rvANDFxrxfWxy6olP9d13b6+PMX12SbffV5WvJofZm9yhAbvjuTkd5E77mNH4jlAww1khRqY + yXwxLatpVTxq9NxQB4NDlGyWl5mchx1nxayYQOOc7IFgrogD91G2wicKCaZcavyr73gy1xAW291J + VY4armXvKMG8X9FT+nMO3BEMbEo+iQ1yK+QgFG7165ZUHLwjxXslLakd80ltUFPjxd5e0wbKJ/X9 + NS3zxTLHqLH3wad2O5BNHGCQhCM0fGjoAlOMH+PDDwAAAP//AwBNSnfkTwEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49aca5c1cced1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:18:27 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '2637' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 1b0d10e2a3a5eb7feff786645a7a4856 + status: + code: 200 + message: OK +- request: + body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"I + will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\", \"reasoning\": \"The task requires + me to find the price of the book, and the browse_website command is the best + option to achieve this.\", \"plan\": \"- Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\n- Find + the price of the book\\n- Write the price to a file named browse_website.txt\", \"criticism\": + \"I need to ensure that I am using the correct URL and question to find the + price of the book.\", \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\" }, \"command\": + { \"name\": \"browse_website\", \"args\": { \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\", \"question\": + \"What is the price of the book?\" } } } Result: Command browse_website + returned: (''Answer gathered from website: The price of the book \"Meditations\" + is \u00a325.89. \\n \\n Links: [\\''Books to Scrape (http://books.toscrape.com/index.html)\\'', + \\''Home (http://books.toscrape.com/index.html)\\'', \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + ) Human + Feedback: "], "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1758' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaS8+7Opfl5++nODpT+lWAEGzOjHu4xSYhIdBqtSAQEkhCuNiAS/XdS+TfqlZN + nkFA4gFvr71+a/s//vXXX3+3eV3exr//+evv13MY//5f629FNmZ///PX//7XX3/99dd//P7+jzvL + d14WxfNT/W7/XXx+inL++5+/+P/+5f/f9M9ff2tSLJKtsZsNVkekgmHwGGgwdXw/hFMDYf+wM4q1 + yvFF/XiP4bzYNdXi4u4v/h3eoAd2PjVqTsvn/TipMBy5DMFI30TLNqoROIJ+TyinvHq2++SN3Elm + T3H/GgwCLqIH7EdypftGvPasVZIQLDF7UOMTVmBB5ocHwyc/EMFwr/1QBAqSny4+UyylNpjDpZBk + tWtd6nyhasxbQBLY6w9CdXKo+hE73wnehQRgjbaWP5eGrSuGxd+wj4e5npj71JUGNCkB2Xjv53ex + HYCY0gt27lT0l8WSHBjG7wc2ikyvR6c2z8oO+zW2v/Len9SDpv6eh81HOQF2VOsntKQXRXy0qxnh + duobajHVUPWavWgB7vcNXt0joqh6s57UtdNAKUQd9gz47qdq4k2lF95H9C6hB7bn5plAgi8CRiZ/ + Y6xp5go+HGEgojxH+USjvoVYMgRshp2Yj0GqD3D4pAd8BkUGWLsMbzjz8IXNdDn7o30MpN07Tk2s + fvmbP+f7LIGC672pX5u+ITi1GcNFQpi6YubXRB40CMcwgbTMpovPLqh8Q6uxJGzfuz4fpwV68Hi2 + 94jnRS8faFR3ANvBDqPaW6LvySwyqEXihuysue2X0/F9g5nhvSjqG9qzljMJPAidh+DU8TWztEWC + 9HGNiOAggwl72WxkVOgGdQr/ms9d50og7O2EWmF/Ykv7yp+Si8se+3bD2KJ8lDd8S+oOJ9Lpnk9q + WjRQDm461UjQ+JO0ac9QlEIN3xpgG6R3sxs84jmm1hLtmQD2UifvAjNDU/Ud+tdH7kQQ1uBNNse3 + 7k+kq2UY84DiYGfObBI+bgNZzOloeTdCPUzzpoWUu4f0sPmOxvy2DVWRU7bHhtUr/WxyIYRKcI0w + Zhs3Esrtiwdcyu+ov8d6zzfL6a1c9KdJiPf55iy89tOuvckJ1kq+qwfmdiqkXBGi3X061UstCyeI + R7aj+vtRswnTqdrFzdBia0SA9e071BW7CVO0OeC4b4HNOzBtkoU6fX5ii3tQVEj1T0VYfLPzxfpq + 3Yb/VDONZaL1Uxxen0Aqzh+M8txgvN61bxlH8Ra7Ar0BEihnVUkfzwkpTJki4omQAKkZHviwGThA + rjDoZLGSHCI9wzqfh5dSgbcb2dhc62UBF9GBhS5SevCvG2O5Kb0HFfd+pObnjI02OtcylN23imYS + mIbIgJnIaiNZZMecwWB754R2S72744sUJP5kG0CEsFkibAtfoaaXVBxgXUGRnuj30S8zkzoICn0m + QqSc/EW39AUkIDfw4Sw69bwTdh58fJIEG9/tXHf7k8DDdB63FLdRAHj3ljXw8WjPWCU7xxDvo46U + 7eO2RdzOhTUZ8W4C5WdA1M/GTT16pfGEk75o1DJsi/H6nZfg+bGPkahclrwrTe8Jf98radyoZmHt + y1BzXwaR9so9p3cCPFBGaYnTY2Hlk3C7yHAu3j12h2aMlg05y5CdbZXasqr5AieZplJ8zm/s3Acz + n6OH+lSunXimQX832XIFSQC80Mixfhm/OVE94w0LbvdGkv3gItI/9g4oZ+hR6/5FjMU950HV9k7U + PRaviFLqhRBljkaN9+tkDME1naAZpz517jQ25pOkILDqD2E198g7JXRkIFTjh7z8qjZI1y8iOKf0 + Rg/tMWILjVMTXJqripYitY1ZjsIJPHT5gVVzKwAqW7QFx7O5x9HhmkWUOUUFSK/eaYnpxx+E0D3D + x+d1x+54GtmIz+IZfEPPoAcXlAZzXrsJTOHeJnNpd/7iTEUCW70rqM+kVz9LDXGAsDgXrJUuWPWy + aYDEcTeK57zrxzG7JLAH8YXuu69pCKU4ZwDbaEe2z1PSM3K6O2DffQLqpWACixKqMiw7dkWndT8y + uJFj+JqJTfivN9YTjqwMxmEqIfG5ufXL5RsuELr7L5Hixq75hw7ecF1vGl0PUk1vm7P96ycYb3rb + Z+fJe4pd9OYxfhco3z6/cgz8olZRuOoVq66yDJ/hB2Ejb0xjar1dBrMiqfBR/kbGdPimAVz7PbYa + uzSY704DbLnxTD1iwnoGzmjK5Ycgwl80jT35pFFh4TAZbRvxWk8A5Nwu66oSOxX/zNkhDFVl9Qfr + 9z+AQZ0KEWy5N08tg39HbIk0BOdu+FI9OWfGeDIqqEyhZdPk8XyCcU+aEjZN+kGMl+79FL5MEZAl + SZEgtKk/P3fmGX5Dx/jVazSp04X/7Y8/+3GBnMCDttjfcSAWD4M9RSSD3/5VI+HI5mZTd/LekhHW + eX7Ol/2+GKD7sB+ITxfeYJfD04MvADWcrnpK1v0kj9GJw/twKurFv/OlQqLuTbqO9tEsi2MDX0V6 + wQ4ncgaN798GutL3gQ8p987nIa042IC5IkJqTcbXjA+DfIshR/VtMhmLbX9DKAR6izGnvOpBCLUY + iAQH2Ch9ng3HQ+IADpQEOxIe++Xwjia4+hVqyH3gixE5viEHeoFa8vjyJ54TT3CO4gkj7+NGAxs4 + ERA361DH3LZervuGh5mtf6irfjswSb5aKf3DzKhGZLNeriBEypGrXSQs8a4mSMoR6Bu/xm65qXKa + YcGEw43zqOmEO3/wZfKGIg5PZFcEjM1oWxJg4WSHXeS/wMQGUfzpK2Le9O3JHmgmLG0zxfftsWVD + 7Z5kOMy9S00TfNjiOzyCVzd9/97HmKnr6OAZWDHVCXgYvYdHDoTgeCebnZf6y/r+4IrdDqMd7+ed + XTyeSh6/YuoXKGCE0SZUuo4bsYmMVz2N2hDIolQ4WCX+p/8mCV3gVTBDJN1p7L+LfIvg0MQmPWgl + NqaNk7V/9AggbjbmIrNPMIlEgwjGNvGpm6Ut5BpboObh8jKWCy/JwHqoFD3Pm5ZNrw+RZNBkPZHF + vDWGHc+3sOWeB8SK7NnPnNGc4AW3GjY27As6YqYtNCVDpbYh8fVyfzxN5e4+POzh1ojmk+QscN6g + iiy10rC5Kx8T6AIkE8l+lBGloBT/1LOMZJ0JcndUgZdFMVFSzo6YX0kc0AuDp4eaN/vlIrzO8Ebi + LVlSPevZezsmsg7Yh9p+c8znAF4reI6WI9UMnTHyCvgFeha7073QnQz6PC02dKUyoyr9tP0SkbSB + DyBjIty/BCyf6GZDRbjoZCmlhM34zMVyF99E6k1tXn+lejfI15HEGNc7nLPPxdIVJaIHtAQvv1/9 + WwONjXakh4wm/cBzmgi//Faj5nLqo7k47IgsCPoVcTvbY/QhZCV0ojfAmkfCmlXNvvzpP0b7y5ct + 3l3VoSpYAXaS7OUv41vWIZG8gu4/16YecRXf4Lc4qWiqhjLvkpeZwS03A/rr/2xKPB2eiRoQaTzO + UXd/lhW0M7Ug7ReqPsuGKlDyqDoguFtO+TJ/TShLYdDRfOWLRZbiCkLDKclmt/9E3aN1M/BJO0I9 + V4gBsYvvE3Ax3GJTHpx+3mlpDOvgxtHzjmuNeUSqqMzVV6Bqe/lGX7JVn8oh3VQICMT0R9ucYqjh + 654Grkuj4bnNEwC47UitVX/aC+5tUGRBi4+RSCJ20T0VRmPXrP4NAiYtbIAr/9BDcFd8ZmmyDLsO + jgiqQxUN58W/Qab3J6xmDPls8I0E7uK6QJz8UuqRjSz++Wtsy+rDH1f/AWX88Omvv0x4/wqhMi4a + 3jsRqtfvheDncaA42SubqCuWzQ2cQm9D3Vuu9bN9e+lQq6BCJm7z9FkmOWcZgKmhZ8MB/hg9eKjU + QcmhqbTGfki8Zyi/wijBNmwqf9llXgVfXR1hpN37ntyU2lPmqhdQE0GhnzAIO8g9UpmakV/nA7fd + v+Wye4dYqzrXWL468EDxeYx4/zxJPdHa6xumXDdSX+0NNkcPnoNr/yPLixn51MtJAg9n9ULxXMJo + yorYASuvIHF+G0C0hBLJa/2SjSns8sb9LIOy8gE2kX2NtoH3cpSxa7fUTM2onhqOOrCwyEwESls2 + fPmxgkJc9LjYDBxr0SnhoDJuHKrOxd5Y6tp5wyCmIsVn72Esj1YQAd6gD5J3ugSWml48QIXWoJdI + ORnbpXzEihJcIurZ1asm/Vl+w4xXHtSe3wYTpgU6MGkMDxt0xGA2qjkDcfdlaD6caM0CLnzCGqD5 + x6v1/AyDFv78pc17OuMxPJjQCqotVbfx1p/Pm/MTOsAH6PXdefX2cdgjyOFGwYd3fomWlwBLqGNC + qbHyx1R82zOAtlph175//SVQzjpYr2MfNM+ITJtkgWK0eEhpke4PgDIebkJ/RJvgIRp072TBj1ex + /nX4fpJPt1D+di+LjEW0rRc8AFNGhWpQW56jaBnzVIJxdxtonnUjm+unaQN+XkZsW72WCzv9sICn + YFnUZmzrTx9iI6hz5ZPqX0sHgoKDDJ7rTYI1+Rv5S4G7Fl70ykRL5Hb5OLxcE74+3yNWxVfDyKqP + 4MRvFbTbczljLFI6UKdviG3lpPbbZka2/ON/6XTIjZmv9y00lxD++KWeVv6CR/zG2OK3NmC1gzJI + 8TOm2uGE++HA72L4403z5zc7a3orsQUF6kun2mCfy0GFL8BpKJyrBExq454giZ8t1e6vGxumV2/D + 3Ujwz8/7c5voN+VP3mO9FzD9/CnXmAJ145fh89UEbXgkZ526qcxHs1xOkqL0lk3Knd2BQUFYguXM + eRTrxyyf6oESWDVXAaNtOOQTplIF136HYHECPlnrVTE+34TuazU1JgXtZTAL/oDm7ZXV8+bh82Dv + KFdqfKJTPX3ZzgTN1X5ilznDqsdt8MufqIoX1WfP8oQg93E5bDHNiSiVuwyqrnpGAjKfPvvx4bOo + TJqrvQEYjJcWfqTsgZR2w0ezuqAFrH6PDLWzi5ZhOTmwThtI905E+mWxJkfxXcMlLceG6Cse5RAa + oZnQUyTf6pGPgQ3ObnXBTrPlfRYsUgJ/+YVW2p6xnMJFV1Z+Q69qHti6PjrgZ4UnghRIftsoMpFl + PUXU5TmvH1jVneDXkvb0TrZ9P9MKDmDth9Q6xI+e/PoxaD4LdkLS5tMDRQMQ9csNk+L4zNf1LMFZ + v9YYpZsnm1EvNtBNAcE+5zyiX54BjE+fENE5LT59ZZYov3QY0fATFqufcQKIXRNgK5LVfNK0SlYm + XXGoxeli/uMJ5SO9Woy/2phP19HrIE03KUZnOQZsPpQxKDAcqAky11jMqxeA97goNNi0Yc0c5V0C + FO4PiGST4M+PTDrLXuEmSOHAUE+mcr79yS+sSK7y5bNf3bKBrtQRU+rP0aXKlCQeLGrw+0M+9XQg + kOnfE9q00cBYdV3kX16Cf36E3z+iRCkMQ6QOkr5gPmLXBudKwvhwOrn1r97BvAkqqm3PGlsu6tEG + VvDcYsvOr8Y0G94AkbU9UvOiaWCycA6BaSNzre8gJ4Z81eXqqk5I+iSasdgnN4N7e3+h/juF0Zxq + 9AlaPGU4WXliWfkYcnqh4n2SOvXiJC0BQvisyOZFD0CsNm4LuUfAMNqMedQ5SUX+7F9VbsV+eZcQ + QVPSVKpuo1e0jB9ygyvPYW0zT2zRboaunPEpJed7u+Sf/fk9wbETOPrjmfnMVTZgKW/SDLd1Ph31 + 4QY7N5b++K1h4+5NuDfUAIngHrH57KknmZLQ/fW/vG03KYKDxRKqR9sOTLdrKwOl39toKp17zhT+ + FMDdgvY4ELTK72fh8Ybp5w5+/i6fb9PJ/PEEdewyqOmBDSJQu21INqb58ae1/sHdrT0i1wbqx93h + JUOx94w1L6L+wHPcCegSKcliGGG08snwh6+c8Tjn0zRvOrltTieM5mID2NIpElC7zl3fvzSedDye + oaHDjEaR9ImGWOom+OM3w5T7nF2dbwwvt41Nal44gO0l5Qa5waRFwlB868lhmQi3TbyjPx7eJXV+ + g2LvGDi1nxn47Vd4snYGth+VyxZ5cCFY1wPbzV7uZyZjWVY3jrrWxy2f6jPQf/VE9eTYRuQ+6gHk + UnFHuDyv2WL6TxOyzPBxUGA3nyTfqeBZOjlIXvlhebRuAl78jLG+9iOyiSsECLFaGiRekS9NoJ6V + lYfoTw8mfnkMCpWeFv75zXHzMER5zReoHpegXlJ9UKGHxQhr8H6s2XC9Q6AFhUVzwnZ1y21xA3lL + hmjHubJBgpnJEG5UhI1SNGrmGmkGei4v6eHYfI1lvKEz+OBs+dO/J9F8oF9eR6azQlkTmzqnqIFV + IJ6SIF++Kcvktb+RZc3z5vh4U2EUZV8CdoYQPaPdTOAxhi5VvU6PxIt2qH58T91GmvpxC0gGXvPu + gLXLEIBp/X8BsIQNNsutaghSPZPf+mLt/oJsiKXnBMzwpuJffshemcXDoNffaHsKynoqtIsNV/6j + qruJ+nlz6QdIRumBf3zIwDNbtt8iVKl6ODK/PR5CD56c2aJ6rTRgdgVjAtfoluOLchbY3NKN99NL + widxZKz6TWAtlCO1r9GdLTVph9/74r0SE7D6dQleo/5Jdjy3isfdiKGVtgJ20t2V/fI/kDnSlx7I + fjHIffQQNKOHSw9zOvf8Ok+B0bjtEbCYVk/JxjPhEGpnipJAzJdi2d7gyitIMNxt3720W/fLw9Fx + mgx/PL6VBRTOLKOt/xrBn/XMO9kmvP9M8t88Rc5s9fPLc/3pELxt8OwEF8E5EYzh+owhpM3h8su/ + a5Zm1aTc7ZtGDcTyfKgvtIRN5vZY82+zv+aDC3xw05Hu50+Vk5XnwOpvfnmsL+jbuYOy26gIos2h + Zy899eDOPsc05DXb+OM31c+yR1vcbvrWcGAHLtbrgD1i3mom6FUIn12r07MT7gwW7Tch+ERSR2Ze + 1/ut+yYIDldkY/uXj3/i9Cmv8wu6zpcMPrcvElzzRuykjtUvu/fVhh9QdOSx6u/a/85gU5g1vqXK + F4yvtk9gTUxGuP3FBaMxWhxoHnFJD4m/A6TyHiK4VnNF8ems19P2qSB4iKUrmYJ66UehlZ8wDnMJ + VXr4MtgUPCT4DlyHFs+krpvxEJzhfmMZePVL/bt4P2Vo2EaNJuLv+yly/UUui/JEtVTjfLaIpQ7V + qLiiRzVmYAvjpQPzh/9Qg1OVfKKOX0EiFE8avLOEzY98WGAkHF5EgHVdszUfUK5Rma/zjW/OmjTN + IBcQQC1QbiO6y9ntlz9SVXs2NYPXjwnDoB5Qvz2IYOisqVHsq5P/5gk5s/XPGbop+ZAtqKaIJYrm + 7XpwvtBs5Y8p3XCVLLjOm6Zo945mqZ4H+JGSB/aI1oHvj4/X+QHVqlFmv34Eo075Uo+Trv6qZzqU + ooghcShtoxaQmil2c6+wIfeD8af+1Ua28J88P4/eHDRmcqDuWTPzaUiaEhaP5okPker59Oevf/3T + 4rdvNucRgfLiljeyRPKtZ3ewH+BmZk/Cj4dNPnQny4OXz0Gn2pkb8qlRFgLNsN9TVHunaDlu3hAe + wOeC9XL/7hdJucbwOVIfTZvx2f/JF3LB09fnd37vZsdOSfT8QsRrtGHspR8d5ee39mu9z/AdtHCJ + hoR8r5FRf7qdIUP1M+0pystL3XoFViHfJYwiTD/GACjgZUW46qgO3q3B4JWa8JAuKeK0u98TPhl0 + +DROLj1M/dFnD1VuQZVZuzVfMdf8RFfhDb9MQoOyiX7zRtDcdxQHtfXpFyLNNxh1yxMNAq16lklq + rKz9FsdD+fan9flg5QlcujDrJ3+3OIrcGAa2uo9vCM/Q7KCOB0q9Znfwl2qjtXDIbIf6r9bzxcr7 + ivAZZntqWyTyF7g8TnDVc1Jf1Fc+l/edo3TEELF3Zswfb80Ogl5ojvS44Atb9d2EXqjl2BfvYr7y + Qad4V3TAhrHn+kW5XW6QfXyX4tonYOKX7yCrrn6mSJ19g1/1Bf72MyZyEU0zevLgN7+Cv7w67kUP + xp+mw+t8M1rn6SWwueeZukGS5sJ+lHQYADxiNA2HnmHZ9GDuFsmfvObP/G+b2Qph94/ApuMhdKD4 + sDBRIgOwWUqRDk9dJlOXs51oTJLPBFY/SsOv4bLpqE268vfvVMB//uuvv/7P74TBuy3K13owYCzn + 8d//fVTg31mR/ZvnxX9T8c9JBDJkVfn3P//vEMLf3759f8f/O7ZN+Rn+/ucvSZD+nDf4e2zH7PU/ + Lvxrfd5//uu/AAAA//8DAP7X1ErlIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49adbe81cced1-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:18:27 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '65' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 42dff468686feebf028abb07e86d8a68 + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command + to use, and respond using the format specified above:''}, {''role'': ''assistant'', + ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I will use the + browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\\n \"reasoning\": \"The task requires + me to find the price of the book, and the browse_website command is the best + option to achieve this.\",\\n \"plan\": \"- Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- + Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": + \"I need to ensure that I am using the correct URL and question to find the + price of the book.\",\\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\\n },\\n \"command\": + {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": + \"What is the price of the book?\"\\n }\\n }\\n}''}, {''role'': ''system'', + ''content'': ''Command browse_website returned: (\\''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books + to Scrape (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', + \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', + \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', + \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', + )''}]"], + "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1982' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1Sayw6zupat+/spllaXsxUggM3ucQt3MIFASKl0BCQhISFcbcClevej5N+qo+qk + AZaIwR7zG2P6v/7x119/d2Vzq+a///XX3+/nNP/9f77XrsVc/P2vv/7jH3/99ddf//X7/V8jb215 + u16fn/o3/Hfz+bne1r//9Rf7P1f+/6B//fV3wZ1HLE2MRdm3Nr7AxzQICv1PMVJLllgYjfSJNwDU + ca/ukAYvzGKR5F19yi4NXAEeR8Egca3EIx4XZZO4Q5uGK74JzeKvJ146iUMTwjf/KtfqHuTQa0cm + lDq+KEmWdpNk+OGJhNPdSqb9PnlJZ4tmJLhY0Nt6zCggvA4E6WvsjWvcRTUs5dOdnPj7ZRx0R4hg + I5gcFncpO04fdYPwWu5b/GgXFUzylk7QAKkb7h1nAWtZUyxrhydC4RaXdO5ymslnpTWI3xNPX+ND + MMFwRi7SKk8b2d02LCDW+g0ZLfsuxz1DJwic7o0yUvqURPxNE9PjaUJ2e8QeFeVDAc6CqYWyl3HN + yi0nEwDHI1g8Sw99jZxakw4eqxBFNKZk8ahnQ52+VxToV0wH9tNG8g7DhCh7Z/C2Yk80YJTNKdzs + XKGriC4T2NPlQlCh+IAW0XWBRpmIyGiXsNk+ND9B9v0w0F2Je70fLkohL9HkI//MvJr5s88WuAv7 + D1Jo1JQb+8ERnKTUIfrsMvrWxmIt2ccpIAU7Mc3ksmIFBW3PEE+jL7A8DYOHpnZ5Ed/VcDI8uCGE + TwFh5MHw0KwK4Tf4wUqMUj8PRnqOIYbJClzi8nI9LqexPUHl/dkT57JO48KK0hNOLiNg9sQKCeW0 + KIcjrT1yvhqKvr8GDAscoa1CoJHQ49jNLcB3PPFujaGz5NoxwLO2Bpn36UGX+l3FcCghQPe1u42L + yaUKjEAqE8SxT7CeXkD4rR9UueZbJ68pZ8H9mT6RQ9q+WbuLqIF8r2Bk3EhRTt/vD9tbuyDV32MP + q4OrQcfxPWJ5nq9P0unKQnxJTWQNQkf7jS6TfBYMjRx1/aCPh7t2ksU4qTF/ZJ8llWHPAgbvV2RM + Y0S5TJ8q8Fu//S49jcvdbDY55F4JMtHWgmW8hiyY9ExFfrPE+njHz0n2rIeNHyG3jaPFLE/oRIOI + u86PkmW3PRepP0nPUPzMGsBnoA8ys7+luDPKCryz5tACccvW8AELoq/eFE0ysa+QmM1jpViwHu3u + ZZ8ikjivolnLh1jA3LpoyD26K6XVsXuCOa5t5J5V2ZtbAeUwkN83Eqympi/ztRQAcYozptQXywlw + WwY+B+4Ucnm4gq0s1Fg8PWCJCqI03jLcOFcajJji/jxKyWy7xIWxV91J4FQW2Hgz4yFQtwQz1YNr + lhlyGgxwdAsF+06bZbe+TRDHYoAyj+sT7OqGKaODi9HhZWT64sWvE/Tl3UYOt8NV74UgO0mVL0nh + ytSqR6+kzCSsShdiMNVeX56mHcLC9Lmwv7awmd3+3gF+ur2w7OSrtz3YqYa6gepw1AjWl48qMRA/ + MML0XX2SVjfNFrqCb4ecXxnlZr7MG/zqB1HuoUTXRmpP8P05BeHwsPVxXWgzyPppytDN3HfJ/JSL + DFJGPeD7uvj64D7foZRcdi3SxGHXTO1LMeCG4gaZ18FsqKFceHhwxTfxXglfrmz8iGTsbGPYKU/e + o4CJXNlnhABp5sWiM2ZPEWR2rYZMneX0qXlyJ5AUkoqF17lOlgQXmfRmHncSsgGj0/isMrIrhDbR + HsxeX6q+r0UF5y+Emt27oZPp1XBnPiTiBKBL6ImTbrA1gy6UPE8As+/qLdg+vkV813x77/tujWXL + zXOSS3bbrLthG/aGYX6QW8FdiaPHUQGHsiiRrvd5Sfcyo0nrbrJRvFvWZkbW1EHoREdyF+d+JBGf + KTA6vSLM5bE9LtOu16SvHpDQ4GZ9yZ02kuYiL4jFkEe5PA9aLVv9kYb9V3/IiV9sOHqHFVkXSxv3 + RXTd4LRVe6L5TuFt03Q24F2aBnI+7Th9AtklhH77OCDkHh+AKvWtFg8DKTHPZcSjQp3HoGgjE2la + uuqr5V9v0FmCE46pFCSb0XUbvNLVJYcVcuOWVaUrnR7HkrhBqtL91D4zOD2FilzP/gVsY/iKwaXN + d3jrwWnEOVQzqHL6BYUwjEu2cGgGv/UTOUjWx1ULSQbzvYaRtqes/rrY8gBfOz5EarYt+rzzjjWM + 9TZAjpxcyuVi7SGMThkXFoeIp9/12EGhrB7EjmoL0EcYQVksvC0EdUGa7ZmkJnysg4fMr54tM9id + YL3djJCiOxxpdtxp4ORqe6KLyNFpOzxrkAxVhSnnbcnsD89cLgOVkqMG9s3ECX4oebvogdBZksZB + OoAYnHfMCZn3a+ktZz+O4XnvfEL46XNKif56wbsED+HKN/o4d2e5A6kYI2QvgTFu/lOK4cWKnBC+ + P3JDn/4wACIqOtJGGYFFFaYX3NPtQgzVNxvWoXQD4iDo6MRUZ4+OlsiDcNoRpMzumqxviRbyZtk8 + uq9TmtCXtynyo1c/yJTPTbms8FJB84kjpPdp6K0z77YwHlKJoHGuEgwMH8L7xue4Tm8dXc1QX+A9 + 0Z7IYjvg4eS2mfC0PnnkP7a5wRcQTXCxUz2EFOuAFfLFhkKSseSQnc2SFfknA+96OBHzTF7liuPd + Iuns0w+/9aAh8b7eoLid1u//PyaLum8FGMEoR36hKB416qWTRSnQSchEQYODOozhei8rPDjBKdk+ + BA9AElmCVBeOlKg7pEB9BB3eueYK5rAXNxiEKEAa4flmPh74DD4caUX6Gp7AhnawktJjayEjFjN9 + fHmSAr56gkr3AUYiX20MTU0oiLp0arO1a8XA970ziKPy12Tl1ssGHCIL/74fX2EBeuMyEV9um3FL + mS6CS/3UkLleu5JeEnWTl91eDdM87prNtC4nGJeKhKpgf0vol1cBmx77EPyezz3WpwzU5BqKVzx4 + Mx+GDOQn5BCknYzkq+9PyVo7GO43zkz2nvhu4Xc9YHbo03JbRlrD734glu6e6aZZUJK6w1QS9UaN + L+jsczivl4a4+lXz6B2pL1kNjjsS5lcGbH1UtOB59FukGWWTbKwd2DCtx4k4ZKgTakLZht5ON5Gp + w2ezGPxhgJ/Qr4hTkqO+DMwtFpvdziKu5+V0q/bQhI8Hn4as8OD0RZFiQSoP3oOYnqjo63q81eK1 + OlXkFGguWFBsL1Dh9nH4Nq9euSLrNcAqjXVijjdU0rnhTzBMlytxmuVdTpcu22D1+uhECa6nhsiX + uwanUXFIANSXt16VB4SB+cqQ+WjmkgZ55sJJ23tYIkqjbylTR5CGHYtXF45gejduDtlZ8vHK3ydA + wr05yNmS9chFiZksz9fYwVR4I6I8ToG3Xh/2Ahwn9DB3uqiUXUtpgOyBhCG11gPdtklp5U4OL19e + kxKacQ4LFd6YkWbnNaVU0zMouvwWip7oA955lU/Y8pFIvOph02W4yS741Rft5nz0TuDmGAQc8yEm + vTfN9tufjmreSODdGm/96g308eVMAmm7llS/7l6/+k+89liAdWKRD1LHZ8j9za86vaHNh75J0Jf3 + /WTJ3ZER5dp9EHeSjh49J1IB6ngIv/qKxm2a7qZ0uWGeBP1hppS7oT+8jcGzVsfF2keFLA6STg6U + icAUEdDCaUMVQWvHNK/+0dswZ8Y3CaBAxq0UnwVoLVtHZVFmzZc/MpEczfq7vz3An+ShBUoW7fF6 + o0bJwZ5I4KFpd2SKiKNbk0aVvOw+LVKXiwQmJz5H0LsHEWaE1Pvy4VqANsO3sMvOibfMVRZKP/6T + jykeiabZrrQh3UZa6HHJXO1eIXw4wooyetdHrLq1AIO0B8TdVljSmJsLWPf7Gq8xvpTb8f7EIDR9 + jVRIFBuyahTKP97TOascp/pdRdJ2Lo5ISU53sGziuwJyXuXIe1YuXftzoMHDMJfEcKEwruymFZLQ + uANR7tdHQ81aUcDjcvX/8PtW7VkTLgrKUPAJn+UmN8AF9uV1JyjWSUmJhiWoHsoZi319bjbOcXng + 42HD4FkNdPpowgDqIcuQ3aYG2A8H/gbXj/XGu5mG449H5VvjlcjvqUrZTjE6+VYyNn5Zlt0sV6vK + QZGuEQlD9g2oKAc5PILAQHFmVPo65BqG+/JdEFtwNZ0ez1ILd3tDJdl3vdLgUvLA0uIRBVQhYMlH + dwM/fcoeH0Pnz8XEQ3bu1m89acvxeN7aP35McS5XSvtOf0FUfUqkj0ncTP56YiHNDs+wuV/ODY7h + mf/VA2Tn10OyKrSr4He+3/m7lPK17sPtE1p//NqaaXgB8u5aI/16dyinLToPDY8R0ZfvKQ0lIYOf + LHPJ4WVPYKpftxc4HYmCwtfTAVyl5ZFYhNnzW99f43u42IUUyljBjL2548xMNQOeq1YhaxrlZimG + 2AZubZZ4eQcU0CtOO4n3yyTE+ZWh9HB3MxAZW4HFfb7XqXcxF6ix5UB++3+TjViDS1eckH55+iU3 + p+MGujTiQ+loAo9a8sZDxmJ7vEiM5H39UC091s5DKW/oOvYMZ4EvO4uQsbUbnT6qBH/vGwXHyztZ + lcQZfnyP0EN2E3Kf3xrIreEaAiZSwAYf+w2WnL8hD4dC8kdfriefkEhiCp2uGmB+eQo61B5KpvDo + mXDpXBHz+xNtZm6pTPj1d0iLjp9mJSfgwi5o55CXE7+hKWvY8iXjX5hj1Hpccw3wQKeflbhpaI/L + fl+2cN8wDQkWhh0XXlQF2ThlGuZO6ej92U8Kf7JJxFkY0McsmfBAzRfR5G3W6WBCGw7NpSeqkAnl + ZPLFBL3bjsMQjI9yCbbiCdksNJAqnSD46oEPv/lLKLN7kMwzr7WyjPYsMa9D29BAlAbYXfMGy9Im + J1MauBKoBxMQW3lmOtUefQQ1tj+gMO5TMKV+4AM5v+XE1xyrIfp13/7hafQRpmQR990AX5FyCeUr + 6hvy6VcBfN7Cm6jt8gDT8+A+gXaIWCwiWW84LtoYeNsIH0K5bZrVdw5/eCpcDvXZ+/rZSH6jvCfm + /pQ0Y7FKEbRi40GydO712joFLTi44B2y0sfy+FhOa1hgN8cUHF/JAjkVy3f9ZuERx0TfRtQzUHQ/ + Z1yrnyjZYqYMwSfYqXh93e9gkxvqwvK1O5MfP27AMCCgWSYTQ2qWZPgQ3MFh8jOUUmYBS6f4HbCd + 1EC4fb29jbqr/dufSO+P53KdOUOAPz4+MvVD33KonuDOpCnmBtfWKasWWMz2ck0sNXL/7X+bPN6F + Tzui5doKqIBfvx3mOH6WG6rLE3x2YxJy2us1buZncWU1FRqUC+7TW1MzqiBfSQ5xrayjn/FTQDhr + akwcUL+aLRfOA3jzfYAOouyAL98PYNqCCjPKmiWb/4AvePeyCzlMB7dcz2+FkU+9MyO7s8VxgadB + gh4SInJQWsVb19QOYdSACD8JnzWLfr66QHubwy//SlgnOUEZr/FAVFNIvS2DIgMOSe0gNZZrD3+i + oIb1VhmkApXUbPtuVSB+3FhkndUlob09VL/3SUz5rJd8WTgRvODkiKxaXLwpnSYNZt1QY1BlAp1+ + +QJ4vBwSzyxIvn5gAwFHfPTlQ7od666AX7+HDlod0PWwRBL8+WcrsZ2Re8Cqg1yANeSAKgDrTeYk + SdSYmqjw9gbrnPU3GHnyFN5+eaq4rztQsc4NKV4p6lssX58/v0hs930Y9+0w1NBWZBjucwFQqt7f + EgDO8Cb+uLYlvYZsDTvOmJCO7lVD8YtV4CctCS67sqFk1u9PMGt6TNQbmcfucB5CGLTuSkyVjxoq + X84aNCVXDoFg5h798gAM2V7447eXdH7gX55CNC09evSYLzEUPMnEsxXfKJXA3YQBBz/h56xGyR+9 + ye+Wi9evP1hPLypBu3clzLR1WE7ykREAUJcE99rhpa8r+8jg6ZGUKMynw7g/x3ACX37Deyt+l+vP + v1S+IJG8iETQK7SuJDaFPclP6R4Q8fqKIbHJOVy8UvR+ecgf/TZC7l7ye7vw4VzYu5CQNdHpph4G + GLSFH3K1h0rxZFwN+OUDZHV8MLIR5gQ4FU2CgnO0NOQemCH45hnI6PgabFzfdxAIa/nNf12PF9qD + Bge/qDFcJ7v5+XFYXhcDGTxWyhUnXAUjX7DQrRgGb5mhrEk0lBBy79qxJKvKxGCKLy0Klfk+zvqx + K4B0FP2Qqd5+ua1Om8NiFmTkHs1S3yRzgXJ1UDoUUMMcx9Y6Y/Dj/1++s9bXyYa7vakif/ey9eU4 + oxdomUElaj7HydTd5wXiY3AJ15PwTGhnfkz41fNwtRmrXHON8tLdvTAkNMSGbtg/nODej4pwZCKF + ss93n4G7dEXI1uqiWX/5vPjYAyyXJ6tcd27CA+0Qs+FuhI23VI47QcoXIfFOnabTbLvy8JDEGPnc + Q2iWJAA1NIBFQv5dsfosBWMBKoMGyNpW1JCvH4aPHmrI+hpdeluOGvzyWbi97Frf2iPl4TfPQKqu + v70Jnp4SPCQRRo4FSbLqq/OEdwkPv/mXm9Yfc8nbFwjZ8VPXp2ArarA7x17IfNfftFvfBhyg5ofs + wXl5W73Pn5B/WdW3npNk0+ytle9uyeC2fLEJbT0nl77zCfuvP/7lb1AsLgFSn9PobdIBRFBh/IJY + z/1pXIb7xsIsUmWkyVugb6ert4H7aCOEQrdrJjv1Yhi+M4FohSqO/J1LXflb/9A3T/cox7Qd7LiT + RXz56YPlx/sldwuI8sh1MKJ89H/5Vvi5MQH46oEmNUpaYWlbq5J+eVf61gvk92T0iPI8ChAuS0R0 + EMzj179G0rpPFaSVO0bHPToJELHHF3El3kpaqN06GPmSFYKoksGEGaGAogZrFIyW4339HQ/V943B + 0qgfxoVhLyYckylF5XCxS37zHhtMRFphYQi98etfbj8/Qexv3rjko7bJTr60SNuxdkL5hy0Aam5P + /M7DIyW2rIVwqeMPUaznlmzPjfHhOIDPn+81BsXl+dMLZE2ETSZ130pQ0lqfuNIWNWzRVxW8+nWL + 0tojJZV29gYtt8hDuTFZujWnYgFSnGsoXWE6Lg9zYaF2qBHROI6MZJfjQmIQkxFV24XJdO0WSc5y + Z8D8mRgJZWvAgrvEHMJD4fPJVAyFC9ngXaOw7ht9fZdxLBXX/klOFTc1aycWNqS8S0Pp6B7Bkk4v + DQTpCLCchmkyt5cGQnqrBgwDwHnUuewKePB4BUtn9erxmgUF0Lb6jfz8/XaSny3weHAh6tB3zfr1 + i1B/Rev3+/T68jKOAvj6V6SGawm22UOdFLG5SfTjG4Oerv4E21uakkNYdHT51l+gV22H5VGPyyXx + ZRZWB60jdpX5+uCr6fJnP/JpvyVbdU5jGA+f9Msjg745B8GEcNmir5/vKK396wukvcJiobPFpjvO + qJUOzzf+5W06jdJ9CO1HrZM79668LYZ3/peHEke89eUc2mksb9mdR3YRXehCQc1LvJG0IVP3ur5x + 3fjNi6oXMVXpU2JGazBkbNNAqqr2lJCXnsOXVXLh7g4wXTTxGUOOZVoUPOyopGOXd/BlXThi2NI1 + mWp9v/3ya2IxozsuIWmfcHe3r99+mdKQ6YW/+axVhGwt6sny5VNo5+IdfefvUaYsckhnOiAfiZdx + e8WvG1Rl3gyZtsbJqkfDBEs5u+OtNY2Rd94XH4I+UL79gqzE0ZVkoJ9ShFSTvZVktET23/29uTuC + 5U9/jUwmpp/Co2t6iBcYS+yCF3pIEoLyMYRLpPsEpcKu3Ih71uA3b8KrtsNle7mwPhSfS0b0+xUn + y601BPhdv0jrd2NJ3/2LhU/RdTDJNbvchE4pZDl6meh4Yr2Rrvra/d43uoDaGNd6p3Q/fUTmeQya + vXxVMDznsY+p8yrGtQGTAqRCE4mLoV9y3/7Dn/6EP85knG5vFP76b+HXT5QT5JxJ9s5WgxyHFmDT + zfAF1PkcEcuK38mYr28f/PFTdpSUv/7uH3/8J39Eoqb88nNiknPm0V0VGVBw5ZEoH0/45oG7AnBc + qBL3m4etn2wPod3nKTIOatYs986OIehvJdKKqE42aNrZn/7nxdkfx/3c8BnkXtcFWZ30KueyBhhm + +11NlHeQgK1bn9pv//34jy6Dcpx+vBkytyaivGLPTzj39x2xBjb2aB4bJlQ59UKcuTvotPsEEvz7 + dyrgv//x11//+Tth0HbX2/t7MGC+rfM//+eowD+La/FPluX/Sfg/JxHwVNS3v//170MIf/dj1/bz + /5271+0z/f2vvwRZ+HPe4O+5m4v3/7rxj+/z/vsf/w8AAP//AwD4jVCt5SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49ae11e2dced1-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:18:28 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '64' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 73e42f26050de7075473636113591bef + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: + Read file, args: \"filename\": \"\"\n4. search_files: Search Files, + args: \"directory\": \"\"\n5. write_to_file: Write to file, args: + \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse + Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past:\n[''Assistant + Reply: {\\n \"thoughts\": {\\n \"text\": \"I will use the browse_website + command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\\n \"reasoning\": \"The task requires + me to find the price of the book, and the browse_website command is the best + option to achieve this.\",\\n \"plan\": \"- Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- + Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": + \"I need to ensure that I am using the correct URL and question to find the + price of the book.\",\\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\\n },\\n \"command\": + {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": + \"What is the price of the book?\"\\n }\\n }\\n} \\nResult: Command + browse_website returned: (\\''Answer gathered from website: The price of the + book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books to Scrape + (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', + \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', + \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', + \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', + ) + \\nHuman Feedback: '']\n\n"}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}, {"role": + "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I will + use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"The task requires + me to find the price of the book, and the browse_website command is the best + option to achieve this.\",\n \"plan\": \"- Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\n- Find + the price of the book\\n- Write the price to a file named browse_website.txt\",\n \"criticism\": + \"I need to ensure that I am using the correct URL and question to find the + price of the book.\",\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\n },\n \"command\": + {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 2233}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7471' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7xTzW7bMAy+7ykEnpUgcZem8W0bsKEtemsPw1wEsszYWmzRE+kmQ5Cn2ZvsyQbZ + SbumG9BTb5Iofj/Upx24AlKwlRHbtPVovqArrPKr4madvP/SXv/8+vCx/fyhu7v+xBPQQPl3tHLo + GFtq2hrFkQcNNqARLCCdnl8ks3kyTxINDRVYQwplK6Oz8WwkXchpNDmbTEFDx6ZESHfQBmpaWQqt + 0TOk0/lspuEJ/LGQTKcahMTUT3cX5+d7DbYiZ5Eh/baDBvkIHKhGSMEwOxbjJcokL+ijhV3mlVIq + A6moKyvhDFJ1ODwUcCvxMINLtXF1rTpGJRWqTXCCS6HlytWoLDWN8YUSGgr9lTY4i4pW/SYnWsey + UX2DNw0WKg+0YVxuMGcnOJatjDPQf/MHNEze+XIQcVuhEsNrFfBH5wKyavDVpFr1Ev8r3vHQhCyK + 2jj2vtdWDh8iuONTdW1t/CBspO7edjA2OHHWcXN8HY/Y06DnLkQeI+pSmUZ17HzZU1kKAa30RJFn + mAdu5RXy4jr2nergFs36jRMy8O/1Mb0H7BfhjRCDtGdaThyYUJ7mfigcxzRAvFTyDOf0u9z+016W + ZXCDhRMT48VxG2P3+1cyG18sHq319g4uM7+HvYaV846r5fAfIAUWakGD8wVuIZ3s7/fv/gAAAP// + AwC5hinnygQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49b389e94ced1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:19:03 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '21222' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - dd3e3770c095157c99a11ad526d4ca9c + status: + code: 200 + message: OK +- request: + body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"I + will use the write_to_file command to write the price of the book to a file + named browse_website.txt.\", \"reasoning\": \"The task requires me to + write the price of the book to a file, and the write_to_file command is the + best option to achieve this.\", \"plan\": \"- Use the write_to_file + command to write the price of the book to a file named browse_website.txt.\", \"criticism\": + \"I need to ensure that I am using the correct filename and text to write the + price of the book to the file.\", \"speak\": \"I will use the write_to_file + command to write the price of the book to a file named browse_website.txt.\" }, \"command\": + { \"name\": \"write_to_file\", \"args\": { \"filename\": + \"browse_website.txt\", \"text\": \"The price of the book \\\"Meditations\\\" + is \u00a325.89.\" } } } Result: Command write_to_file returned: + File written to successfully. Human Feedback: "], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1110' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaXQ+6Spfl759PcXJumScCIlWcO95FwCoERZxMJoCKgIi8VAHV6e8+wX+nJ33j + BZIIZe21f2vt+o9//fXX321WPfLx73/++vtdDuPf/2u9dk/H9O9//vrf//rrr7/++o/f5/+489Fk + j/u9/BS/239flp/7Y/77n7/4/77y/2/656+/haghBHDuh80zuQ6gPiGVqtfQ8tpjrwTwxkyVJl7m + gS0HHA5qrIyp9hwKVitvo4VTxBcEdvEzY35ax/LSFg2ZY1ut+MdWgHB67rZ475UvwAa+1MHN9RbE + Xb4kG/YnIsp30fSovcsoYNOn8+Hkmzq9N4caDJHjmDD39yqSXeeUTSnrXLk8h1d6tJdbttjAXqBX + yxG16pb144BoAVMUqNSwh4hNnvlRIdywJ/Z2mRoNt8RalE8e7rHGBI5N3KSUcLffXAifpq+eTZ/S + h6h3Mpwr/s6b5RIF8CMnDfa45F59C+PuQ4/PELbQXIE54UII26N4wH63KbP56XBQPtf4gLbnQxLN + B09OIfeov1QvE8+YhpvOASU1D9Q9Ll02pFqSgKg2KgRKlmdkEipT2XuahgDMxYzJt3MO99/2jX0D + x9Gy337PAF0HifoXdI3+rI/rQ4zP+oi9WTeBBKbkkOIT6pRs4ue7DbtS85Con8ZoKu97DoiWO6Hv + 3dIMJuwGH/rvTYC4z6RG20t+sEG9bTSqqZdLtfA7UZaG4BLQ/OpyPd1GfgxT6bnFx7ebMjo0QIT6 + 43jC+A0SsFwFIMJW90yMq80pmzb+jYOFcuzIbnxo/fKA8hkeHfVKc9kzvC1822eAP/IDzXeRMya/ + 8BIIb7cHeaO5YsuS7xIQz85C9/aOA+yamhNQh/uXehCp1RbjlocNLk1sNkw3FuVwkMHFDgL8e96F + E3YS7CTpRI9JZfbT5dU6cKtJDCd+WIOhOEotNJTdQq1LvrBZ3R0gOFy5Du1229KgXTu5MHslFppP + 93s/s1bggH3gTARjKzBm5LcTVN6hi70XOTJ2MNRa4WLRp3jSe2OOhgAqU3XJsLZ3Lv3SXxYJ2jC7 + Y4zUoBLkQG6h4V8mMrm6B7YRLy8wi5GJjco5AtEMIhMmyXvCatT6BuN93ofosD+jzXLeRfPXeaZg + ZM8ztcNzw2a9UgaQGb2ERGp/WO+D8qEcuHNPBI+r+vm5rQJoOKqIPX65R5OmvRxFsbsGKUUWZZOy + zOJmEqMd9aifA5biQwiJLW2xrihDNJ/iOIaNfFSxmT0nj9SHsFUi0e6oBauMDUF99UGgf1LCifDl + 0YPd6LBtXxZW2/SYTcPN5eB2MkK8D48BmLXtsYX39srRwy1rDWoj04a00wF1AjeqvnJph7J9IiWS + eV2u5ld51cEc1BX2yJ5Ec9VVD1iUWoez7n7JZtLiBuqIxARe0DaiNJtiUBjAo6jCpF8MnusgPPUa + WlT1wKZQPolgyx4C9jD1wSjwHxFmE1BJSFszIr96C4nTUPclGEAcFc2BIsedsTG+94bgSl0Kh2ph + hBT2p2pTU+FBqKoOYdbHipYq75rf/qCY3xmMTz5SCN58bOJjFZv9xI2BrTzrNMDaxB+MoWZuA+kk + 6xTp7NwPpvcYwBJMBxzbh9ybkLt14VjBGw6Erqmmzy4lUNO4Mz0I6jdj4WWCStMRa91vQk+VE5fC + df9R421eslmy1EXuvP6Aj8Ll3c97481BgTt9sV18EZs/96qF+3HzpAcZBtUYbPMHnPq9RPW99qxm + et62sHFaDqOxjvs5fHQTpJ0KqAOMppq/78mUM+EoU41oDljsL3NhkmsRaYXLxRuufiSBYnYimu0R + MJb99nWGclY/sO98XsZ4rGIHPqd7ix2z7Vmft98YtDfVxdnuGxszBM0ZbuTth6Lp8PLm1s0GuNzj + N5K8rAdDWm1aWMxuRK3wG/YzZywiyPeLjS1m+T17kjlXmnnzIJvP181404sHkMvHJ+Ky96VnarjJ + wZCDgepk7nsyGbczzAQsU7t5i8bM3kkJJnPcUlPYlICNyzjAbVFe6aGwt95iKYYtb+i3od68uRtz + ibENPffzJe/N42gsNLUJBHmfIUFPjGwWeMqDMvmc0MDgppq/zjWFc/XSaOo6p2jKlSuRtdMdI4FZ + Qz++TkYCHSN3sRZFaibU16gA33boSZfgql+kYWvD5QB0fLy3kTF+twaBaz1iz7R4Y8znzIF5wyC2 + siT1hm1zqmHCbWZyCwa1Eh1h1GV0JRI9wE6PmJ55EoTg8SZZrubRHBp2Aq3tTcKH0sU940FbQOcr + etgfl7JnDEkldPI9R001K1Y9dBr4vPmYHs+7vqfjKzThzeIl+lzsql8e1HTBU6IuIkuG1/7X28Cb + 7DP2XC3L5lM4ySCF6ZNMxXSO5lNtFtAajlsi1tGeLWl25MHj0x2xP4Avm+7eEYLGt194v9H7aNk5 + WQF+9aqVowjm+q0NypA0C4F7TmGzozoldDpaYl2OWET7/aJDA88RAqueL9dTEirZ1cqwM9/tSCyp + XEB+6gKsfp8Z6IU2qKGhgAWB6GaB5R64MfR0wSeLey/ZzIlmIwe8l5NdMBTVxMIsgDTyBKwu6rGi + 1kJbIGrjEauWFxnzgTNdkJRSSg85arOZLkG8C1rTJMorHLxlVA4uPGthT63x8M5GKDkyKN5ciDXU + KdEw33YxMF58hiM+iI1lIZYKA20wqPduYD9c/UyCjdJW9FBJL2MON60MzkBpqDEln2juX5sHOH/a + AZ/J5QCGbyjzAJlLSo8oDyOa7ysEb4Dy+IjyJWKXXLPhLHI11nrU9vOAPiV0BN1HfMC/siny3iWM + aq3C8Yso2aCed7Yc5ZFFvdhvDKacxBTemK1iO2iEqCWGSH79gEynmlXkZE46INJjR7XoFXvsW/SD + PG6cG9bVrV6NOEk7uOo3NZzUqiZP02qFvD4M74syYazFgQk9eOip9hBa72slC1Ju5zyn/iluK+ay + hw60Wlqwn7FXvxzNQwtO+3qDta87GYt6T2XgP/yFWgUn97RrJQfmIdzTU7JTGVOnwIXkUmQYc+OO + DdX7nIJINDtyu8zU+FMfHyvT0I/32I3VInSr+ITV1LKiDhPlDH1pR1b9tKvtEt4h7LpgxM5FehvU + aBcbwkHQUIUQqMaGe03wrAU9tevXvWLZ0efAxh+dHy8w3ng0HdyiOybCU2fGcNviBljgXNDgOJhg + IEuvgkn2X0S6TRGYd7M0ye1Nd6lpFTQafnzIeZ1Hvf5MPNJ9XVFyufK08qBQkR/fXXRxpEe7Nnre + jioH3uXMInM+0570l0VW9HQmRLaDsmLXjwzBbve6UpxfpH64LGABhfdtMUqLj7c8v2Wq+K8uxki0 + nGx+PXY1XC57A7GX3/QL1xoEjPV2ovjUfaul0pC/OzQ1wp7fvNkkRYcOvif5hXFbGNkyolyEZAwl + 7PJ6Wi3c7hpDSXqkSJRiLWOfstVBqYo8tbpaZiS7tgnkmsseScXxaCxxXSDoPQyf2kKlGYKxSW1g + yr1ApMPT7OdzTnK4E9Ad2x3GxhwGsIanR26h3XfYZvTORwiGme/QmAk0mr/fSymNtTBhJElOtTwz + 3genfbOh1qydvZlsZRskfpmgenycPDYLQQo//azTWD8do3n0XBte4MGnjuRcso7fcfJvvxDhYCxR + 3zLWwbg79VSrM6ear1M9wd7cfKm+8stCckOHWCx0fHq1bbRcjdL8038AuO57cQtfNXx1j4XwsDuB + LUNSAayTtsVWtfPAPAyqr6j2vsWez2f9/IzfDtC/txDjypEq6m/2MnQ+0QbJ8esEZoH/8OCxSTpq + W6CJ5vT9DWCuZgDbmOZZ4V5F7vd+OAxON2O0dlMMNlzkUguLSzacecmGsH4/0dvjjJ49orsNkutw + o3FY5v3UBWKhOOb1hYAoCqBLocFBYstbvM+lDaBe+JTkpJpnfLhljjeQ6IWUHw/uNr3kLY4RBdAh + noW2welbzcPg+HB2RAu9q53HfnwJ85DbY80UPxnzCmSDlaeoJrYbb1CTuISY2U96GB+var4+bw7Q + +U9ExOJLGKufyhncKTYR4JJ7v5w9x5FXfqHm+Ji9SfDnQpmeYIu91tsbfG3GgfLQAh/vV7/YnWk/ + wDm5qFRX0ab/ugfRhF/FeGPzfnyxjsbfEAK1Nmki+iObVz2DPt/w1M+njTFupo4H0uRLCITl6LFK + BQXMla9Ok/28MCYkxIagsDfU/cyesYh410C6t+/UjIStsYivow+vc5xgJ7RlNo6JFMLG6Tgkm75r + dB9cLnBvD3t83x35rB26uAZczPt0j6otm22rCyBVj1dqxHTMlh9PrP8v2mqWwwSrO4qQx5saTR4X + GfMitQ/g7GeTmqcdBXNRZzEgxZBTPc8rMG8R5sCkdCbWii+Xtc1djGHlHiICL6rWf+8PA8n2IJXY + +lQ1Y/tvkSurnyasWxowvTx4BvoU3H76Es37JQ0gj+0jGcWUN9gm1XmlUW85VYcQGn3s7FwIddfG + 6PJFGe9pWgN/vCDhKO1HY+4lkE40pT+94bsZdjBT7jn+5Q3zRXJF2ToiDZtvPjFGYxOaUO3rEGNp + Xqq6ePohWPMLIuz3HRgB5R9wPjobfIV72WOOU8Ywyk8WEazbLZo/fqLDoZoYTU7aEA354y7Jly8i + 2FXLk8G64d7A1e+RzXs0PLb/tg+YhcsLG88Hz1Z/5yr81Ab0+AoHY773uwQa12pG59MOg/laig08 + 0HFPDV4vMuaNfKiYvSNT/H3PgKSHxVfMBSTUGN8fj42v1IY/P64RrQXz9HRSqJzED7WkdXUkeQOh + /uoE6q08NFVX4sOXac9UpcdjxuqxL+H9ORzWPEgDQuIffKC2DkUiFzo9E9qgUYZcvSBBkHw2410w + AXuQSzSf8Ictzf5xBitvksLd1dWAdn0O2Tv8oqnagei7SEX+hzcxGAxG0oPsw5fQMsJvWMKIU99S + SNNSRUsGTmCyLAjBt4CI5nf69ZbiQ0wAtHtGCNo31SQmVQonqhZI0oc6mtf6kk8xqqjq7dNsMvdz + oKTpJaFuvg+zdq3v3/rStX4A/eU3K0+v/kbzhJk8CVjrix7QPjZoxC+LwlJls/oxq2JNe1jA6T0+ + KJquMZjjepEAS+0Wq3E2ZDMTaxfmz7Kifu3r0fSsGxVw5a6m+6rfsM7UB2l3qJwex81VzbalfQ/g + CV9yXMZilQ3Z5qxC/W0m+ILFMJo7YZ/A0vIIdpL+ndGROiGg70GjrpK+PfZ4eQGcz7s7dTYLMcY7 + jXkoi6eUbNRS64W1PiF5vRk91taQMVfzAwhAsEG8VdBssiREwKI7b3wf+6HvNuWXg2v+RJH5aqI5 + 32gxsOpnSpGkYTDHy8v81TfVT84MpjydHjA0rzoS843AfnoKg1hXqD7XPevAizXw88mdH5/0DDUx + gu3bKrG58uuykKMOPz3TqX91uWruYquBN5m/Y/SZimibvLkWisc0JPwt2/YkrHYiWOufKLsjHw3L + VVFl4yVmRDlcDTZrrEVQf28othz/Vo233tDBmhdSrLReNtHTIP3JZ7YDuGWz2mcLyGLfpJYdhMbE + TUIJ7Re+r3kJYMsXphOQm+WJj6kQMjHtphiOyZKQ+vp5RVOTBjJ8T9KL7lX7AfigfvoyYMYdCXfb + Y5NelBy8Xa6MGn7IZwvXegQm6jaiWPtMxribpwl+kssTH+6RyOi4jAQKPv0gvmG6NweNyiu2ZFeo + evlNxXhQFBD1brbyRNsvKfQgHMEg4tVPV7QNlQJaVW1Q/9kH0XDokSR/0/0evVpL6wdx9yphMQ6U + mm9e8mbeDBOgeyHDXuKM/ThzOoLzZwuQ9HiCjA56Ev74mea1X2bTNGrBbs33iGgfcoP1t2ML+v7a + kV0aqkCYyZUAk9559Hb8XUUu28n98R09mmJoTA73EEEbTC3W89xgpAymAZ7e9IE1lH/6lU9E2X8r + AfbLx9Sv/R+BTRYPVAeu703JRwr+6O/Pr3W5GeRKOp5EInJh289HbimAZp/Q2q8wEKDkSMDeCCp2 + Vn5kPugecPUfdOVDT97sjzV0jIeL95995X2FwA+gSeo7/unTePZSHnTK0fsv/vXe6QPYm62K4J67 + g2X1PzC6ch71QXCq1n4I5d/7WplbR2w3DAm0i7ONT8s0ZdOaZ0AFyQ4RH+BurPlYDm3eA6hZ13Ox + AZpkURwWer0CPaIPXcuVUPm+yMoLxrqfoNJ53wN51y2r1jylA18EbHp0it5gsz2YcM378HEprWiK + n5UIHVX90CC13tm89keIxHOONSV7RvSUpQTarSIjZn3e2ZARJ4XoyaV0n1rPah493YaSdwzJLkdt + tOzjHsFj44pkXnl1POfkAQUnHYnoQBhRNi6xnJxD8Td/AIvQNBCu/oQMkKuysW4/LjyFTKV78bWr + mKWHPuzGIaDej8dk+4ngIUuPWNesJJv50KlBtPT7X/4ZCZLlLNA8RzwpBrPJhvLWFXI8UoSCkh8A + /ULHhEiMczLdFd5g22IpoOtzmGLvGGTrfEOFhyvssBd9QDU6p7Pz41Oy/RpdP/zq97O0B3zQ3yhj + +vFOgI6GGJ+Hm16Jh8kLoRokIZKxqDPRzk0O9oQdKQbyrlq+MJxg7nzv2PXB3DP7eR1gr3aUVJHF + Z/PeGDnIN5cG/dZvcs+3Eug3+Up2q99nv/welfKOfCPh0wt2LMgweg4t9e0djaajD3KQXMmNmh9p + 6lnTapMSVY6PzY8UVNPX8ju5SpYt9buNnvVGK5t/6pFf82B2Tf0FZp5ukCXyu2y6LGyCq7/Bh922 + 9OY2+8RAbC1ENf6hezPMhwKOx+ZKxG8SR3Me3NUfb5JNl7vVkkypDbuwCPAjewv9VL3zFNx79kK1 + i0UwihTZoB4S9Q/vb4vsVUKefBzEsbD1BsU8luBxv1Q/vQXj0+E4WIfWlYBchVG97l9wbZwr1o+B + YbDIRg8QaXuLWi+BRRN3AgQo7qZAi5iZPYs+ZgNpGDzWPH7w5mI5lIBvrg31LV5l24Ph1ODnR1b9 + 9db8lkDTXHaIxzaupshRbZCXdx1HfnjOxtiAE5RC+4bRpnf6Gu1vBMbCfaBaG20j+gY3Agd2ItSW + 4lc27JGWwpkZkEygGQF1nC4Gz/B6QPBYP6KFiyUTvsrhQsNGrz16GsgZtkf+QKajLFcDnRoCPTug + 2FBatZ+zizgBi9kcxSs/8nQiBIpJExH5cEr6rVcgEwBuKNH2CvRsW6PvAK2jr2GPcB9j9VMhPH+6 + gYA1r/tCQGK4P8kONr+NnonvqyoqU9YoZGsq16prSnmA+uxNhBvjgk2lVMeQfT8V2sQvfZ0XnaDy + yYM92XBP0/vxD7x29xN1g7dbbSXSErjmYTTejg9jzX+JvOZpq/95GtPuAQrghBsL2/ppzPo17wXp + CT2p/SL3iAWk66ARdyl1Purotb/5FPKL7eoHDz176NpDuZvlATvpZ6imRRR52IVlgPiuD9hcGHcE + yXd7ocatPFad+LIQfKLdm2yHserHIycXPz2mGKlTxVApLb9+R6pbOfZrfuZDl0ojxWv+SZZ8l4J1 + XkrV5mEB/rN3uD/zTAzkW7XcyGWCad3qOAieb6PLqzSEYfl8oWojE0BbxlooZ80Du9fErYbIG8uf + v0FbzluqGUqqpBxUdKTO0Bx7YVe79U7ThR4f1/nqsmAPQYOkW4ofhmss5fnh/PIdVKzzqF//B9z8 + ctBsivtIONxtFa75N9roj5Y1X/egQ8sSROobWMxGYXZTeJHo908eMUW3bIBpuBfJzTtf2JpnmPA3 + /9gXZ8Pjs3nkwfVjEMQ/wimbYDTpiqMCDh+W49cbnVPuyPJzl+BTnTl9K0Vaq6z++sfTBh8UuwWu + +R21Nwctmz9k+6d/ow2GW8ASmxehnLgAca2AvSmUb+LPnyDWLTaY1nmVAkTOwvrtW/Vs1TM5aG3z + x1tsSk2Bh8r3bSKR7O9gBl4gK1aS6tjY7QlY5M/lDPX4wf/mb4DFQJd+/Y56LzKyNR9w/+h5H90s + xj5loUL64GX849HFMbIQHhUQoR1NW8aOojrA7d1mdM2bM6FUWAoltD77gVn9/DaN5jePowjtm36R + 9d0Cnd3FpgfRurL54bIQ2Pd5h6SV50diFbHiv9qYnrfA9ZZdfBj+8J2lpOdq5QWi/P07FfCf//rr + r//zO2HQtPfHez0YMD7m8d//fVTg3+k9/TfPi/+m4p+TCGRIi8ff//zXIYS/v33bfMf/O7b14zP8 + /c9f4nb757zB32M7pu//8cW/1t/7z3/9PwAAAP//AwBjM9wq5SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49bdebeebced1-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:19:08 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '86' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 42e0557e070e90e99b138d49ebb07b3b + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command + to use, and respond using the format specified above:''}, {''role'': ''assistant'', + ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I will use the + browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\\n \"reasoning\": \"The task requires + me to find the price of the book, and the browse_website command is the best + option to achieve this.\",\\n \"plan\": \"- Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- + Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": + \"I need to ensure that I am using the correct URL and question to find the + price of the book.\",\\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\\n },\\n \"command\": + {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": + \"What is the price of the book?\"\\n }\\n }\\n}''}, {''role'': ''system'', + ''content'': ''Command browse_website returned: (\\''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books + to Scrape (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', + \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', + \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', + \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', + )''}, + {''role'': ''user'', ''content'': ''Determine which next command to use, and + respond using the format specified above:''}, {''role'': ''assistant'', ''content'': + ''{\\n \"thoughts\": {\\n \"text\": \"I will use the write_to_file + command to write the price of the book to a file named browse_website.txt.\",\\n \"reasoning\": + \"The task requires me to write the price of the book to a file, and the write_to_file + command is the best option to achieve this.\",\\n \"plan\": \"- Use the + write_to_file command to write the price of the book to a file named browse_website.txt.\",\\n \"criticism\": + \"I need to ensure that I am using the correct filename and text to write the + price of the book to the file.\",\\n \"speak\": \"I will use the write_to_file + command to write the price of the book to a file named browse_website.txt.\"\\n },\\n \"command\": + {\\n \"name\": \"write_to_file\",\\n \"args\": {\\n \"filename\": + \"browse_website.txt\",\\n \"text\": \"The price of the book \\\\\"Meditations\\\\\" + is \u00a325.89.\"\\n }\\n }\\n}''}, {''role'': ''system'', ''content'': + ''Command write_to_file returned: File written to successfully.''}]"], "model": + "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3189' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaWw+yzJat79ev+PLdulfkoNRk3SEgyqkKBRU7nR1RRECQUxVQnf7vO/qu9E7f + mKBEcVLM8Ywx67/+8ddff3+SIr0Pf//rr7/feT/8/X++7z1uw+3vf/31H//466+//vqv3+v/OjOt + kvTxyOvsd/rvw7x+pNPf//pL+J93/v9J3+/e0SMjyssw5Av9nFfBVMZEH7w2mQepX8FcE51O3k1F + 32MFar83WGAUJW/rLSr/HB/Rpen6hbzQlA6PPcG+1od8qYx7JKQrjbi7zcflcr424To+GNFexivh + l/4QA+6dE3Ona9v1pLJyxfZ3LcNxrSF5+SoAiIYfZBfxa8JP9z1AfS/f7BwJVkc3IwNkmmlOFSet + Eta2VxPy4XPFtJ0z90N3yxXKB7PFMpXLbr60BVUX3c4lmxtd8SGOXq0qYH9me93bFBOpwYOVQ07E + 2ApaJz+jnYZq3U+IeZdi3tdz8wF6NI/kvg+jZBDaT4wmV+vJTrpPyUxt8/z7PWKPN2TwHsYU1g/B + Y+Zt2BjzSlz36DTmJR2NW2PMUySt4Hn1z8QS2OzWw+sUqWP3BLqeli7izWWi8KJdjKd9cS2GlWtH + qJ68O7OkSEd8PvclZL3gk9Q6PPhkHU0TiuBtkLt6aEK2PS4quN6vNdFL3ePcGD8fCHfajfge091J + 2+MMFv55R+cm3XDR09VUqV+jy6I6JMWI2zKGXe3FLNWLUzJmw2YBUzBumDY1hI9Gd6nAV92IYE8O + ijleHnVQrMwhiWanLsfqQ4d4dcmYMddHl7d9pkGbBBE7FdKUcLwsFjAad59yV4CQv1OzRJtlidmZ + yR3nNX1oKJENgruwPndSlK4dxY6rkDltZ4fCzds5ay5ab7I5y1HB/VZtYfRtmyS389sdu4B/QFy+ + B0ZS10z4JfQ9iLexTKKkeIblxo0/aDRSn+isvoY8aLQW5KpXiX8aDd5fLlcBJCevifOOA3fGFycH + 712KzDs8Duhb3xL0z9Ejm89476pS0UDVP4HHHrEuJ59VFqxUwe4Ttr/GOZqMwQ8Q7ciT7Jv+ykWr + 2USwteQ7ztRr5s7+9ZCq23FnY9RbUzK/TroHJ19zCfmcDU4vm5emRp+FiRX5xov3Eh8taGTPZ2aa + 2MUorY096lbalewVViSDLJi6OjUI0zU9GAnNtERZP/JOwnXSGaHQhXqpXs6JRCWIUt63p0O+rB/7 + iCWfeM852rgWLJv8QYw36w1+30cp1Mo6pusFlVz6WngrMLxzyCx91jkfH9cYKn5e01V+NTt+8N85 + kh+6hpXEFNDIh6BCo9+vyFEXajQfctGE5Bq4bHcXNy67LsZZ7abhzHbmueqmtLdm6LV6heXntePc + 5pcUHp/EwW9Znwv+aWRnPW3FHYnnISnG8FFSlUVPjxiLix6OLv/s0WYdukyXm5fR9kfNgtwuj2Rv + TzjhW23tIB0UnelrJBTTYWW2EI7BDsNxXXF2O8RYMZZCx7SlwNxpGyUa2LFW0lLLFGPKTl0KB+qr + eLESCl7e+V1Ci/E44bV83Lrj4FgSaEfBYVYYT+F4vn5GuFm+gdd3q3SnUjLP6seKN+QGiVgMnaos + IBXpgs6sXoftJazuysupD4TM52XBDuijoYOWKsSUFNOYxhEd4TLyDTN35JNMq9tSgMXmMBIsbcVk + no9sAc6Wf7B0UxdGk/VZD/iSP4mvBuCW3/oozeM20dXtcue8qLUzKkllMNd2lvzPehyuC4PZGluG + k/C53lHfqjohG/Fd8Pv+fodusuvv+muK8bF3MbDVK6RLc3d1R7I8UiCREDJPPdgJH1bHSn3v9gVL + d8s2nFlml+vPVjoS66YuXFrTRYAsmt/IRjblrk+3cFQOlofJk89RMV0DdwTFec7Ma6BPagsSD/C9 + N2l+2adoPEhnQLJojGz7OlrFTGeirWW0yvHH9Ytk8q5zrxrJXOI5i8qkXxavD2zvmw3R8xVz54m6 + FnS5IDBiLQuXE2SZAKMWsYulTGHnDWYF90oLyN4ytmiqTV9BcZMuKIonIZkU3a+Q7TdXQu4VLzhj + qQSSvVjh6fHYhaP0lBZwRMVEVfddJhMPiz26uYsLs/ZIKWa0PJ1B1OMrO/jSjPptN+WwbzYFlcMm + 6UZtNWGIC9YSR9uQTjrKxQ3pvS8QmxhKMuRLzVKFq9MT3DZl2BtmJ4HHxIx449UI570jaZAbB0K0 + NDqiOW9WGaSX2KWNOAjh1JV0RLdMe7It3YxoENosVlWSlnhezzGfcrxoURL3d+IV52s3vvODp0ay + VOGFdVDR7AjHFh5y2DPTC5yQdx/dQb9+1dy4U7RROu3VTFQTFvhy5g45THsoh9OOaOfViw/5x5HQ + t1/hKR+zbuxxT5Fr35546QxuMQqfuYfcfylYokfkfuJm8oDEEyI4r6EbdkR0gFfYwOsHXvPpeduc + f88LVtL5kYwxWnwggblgxmXYh8J8Ds9oaScLsjP9FtF+hSQ0mHNKdmdCwzkdjZv6oPuUBLmehPNK + nHo1PJ18on/Xz3Q93nrA4+wRU5n2yZcfMNgDttnmlCwL2oSqCVU66FhyeVlMT03cw70tauIqh7Gj + 0rs5wjI6r8hXz/l0qzfar5/Q17UyXOl6mO6g5guV7etCd0X/FEnAqHRmfj3XyYhOGK+zGF9wjSrK + B/lhzTBWizfRJO6H4zu/evBp0z3xDtHZZV/9VXfjqWJb8Mbk/ZhRBQ1iJZ3Ys+XTYyxb+PbfXz2L + gcqZqTZ3UWdWoOrJaDxW+k9vSPSWbwWNVdeC4xRaxMztozub2zFAcXNfEOtCTl2vXB6Ook17lVz3 + U+3OZ2ZXkODLgQqjK3LWaJDCvdIDyk1LMGYUvypoHvHE9g/LDadw/3DQYe3ITBMvcTKednYM7/fj + THzLZsl4OemKivWzQrfxoeRce9QRRHmzIZGklF+9uqcoiiJMRf7Yoh+/qOfVzsCSocTd6DhuDpqk + HNgmtD1j6mXLQWFnvehSiXeh0OOSQkWEjPn5p0Hz9VUDYIEJbJ/wA5+Xaz1FX36lgpgSY87q3QdQ + 3mfMbTrdHWV/ytTFKGV/eFZsG69F3/VLcOYNxQgNZJAbIWH62S75pO+zu7oGZ/njiYI/I6KjrXUY + 6KeIFwXzTVtRTgrJmNtymXN8bo7Q5YcthttJ5lO6DTWFV57BHrOlF1xA3RmRYZmx7ff/z4uqM8FP + pReejFzuGNoYFlyPtGObdesks2LpHqTWoDLvuLYQrfv6DM9PV7NNdRoSyrU8V7mQScyYUymZ3DQo + 4XHNP7g43og7V3snh3S5bOhK917FvPc9QFG/7imkHriD5jzuUO8WjE7qC3ezk1ip2oR7i5y3/myw + 7VEqQTCWO7pq+isaC7IWlGsnrij0eMknw7rmoBZ5j5VVeQi5ss0W6ktTTuR7f/gkHCIPrGIzEytf + bjn/bG9n9NP7Lx91wjtzjrAVj5TtFWaE/66Phh8YPdCBs3DujwiEomPmNFYFS8jmCObVitl3fSGu + PVgEJ8XPvnr4TMZ5mWoQJm3APCRGLlfD/I4kWL/Yg10Eg3+a5R6JG6llXqLsix//ottp+2Z6EubJ + tMhpui7nfMAwtBGazHYRrSWZAfOJMxmzy2CGy52usb6eY8RwPGL10AYntsnrErHIbirAYlgw+zB/ + 0CdXbzGoTaCx80P8JKNQFTO6EbT8o9dzqewXClhzTRwanF3pkKsW+vEIKi4Wko3x06I4Cq8Ex3XG + 5+niC/C7P+lOwh0P5z5Am9sipku5ifkUPo83dNt2Ee6WZYN4vXsd0e5+SJk/W3o3f27NCn35h4Rb + Ieuykzmn0C6jhByv8pD04ZN/0D5It8x0rb6bqJxZqp8KL2bJ4R3N9osf0bQNY7YZFMWgXRXd1SF/ + 5cQ1nAL102O//3O9fs+bHy/jn5/Ea2N8G4MtIAWksBGZmRTLhJ+1QVL+6C3ergxu8+cdwXt7I5s1 + r4wRbxoKu6POCen9j/tHb/W9O1DRvUhoFmVKQR2mC3Ne7a7g50tnIRFZZyqCIRSt83jc0BBhRoy1 + abvihs8WHFrJpCPflojvFDNQ5WDZEC/ytFBORFFTm/4QUrTb7I2Z5kEG52YOGGkV2s153ZQwhI1G + bnGz5RzetYL60/Rg/kL0OF/tHguYVnxktv7oOJfzyUINzgnZHbYjmtTajFC8WhP29GyNy93BU0C/ + TwWzwpdvcByvMChteWC3FF0NbhcZQEGRSPSnUxWDuzVjYORQUb7tSt7npYXh9DYQMeSJFfRG8gyS + MJOpLIWnYva68wrYVkN46Ui7ZAwUEJBXOSX56a3sTnkK6pxgYqE0C8fXZjyDawNm2uHuIXbShRF9 + /ShxmGghgVRWhrgrrQhZdlnRLPHNRFn1cvC4TzqXEYQtdJQrm2ysg2WI+lTsYZa6kM7hTUpGeU4j + tEZ1gNUuY5z/vp+UQUmFXWwa8t7revjyBtsGccx5HEhHgLO3Ijv/sk8kMZgX6J4FQFfErDtelNMN + +IK/KDjy0h13YAN6B3JGPJvZ4Ugi2wTnreyItrIqTt/38wqCywhkz/aUz8wSb5Bf0BYjT3dcNjyt + G6rHz0D0+Vl0M83jHHaXeUd0UszGjJaPCGz43Fj6flZ87l+LSnGHtPr6Mc/teOMuQBjmHH/60jQ4 + 23cWaIq4Ju7Nb8L5xxdCeI0xMpyCz9G2BJUzZUmh2n7cyX5ILfIugc7Cu2y5844kyh89MLecJ/Nh + PUdqc9lROpFjjSZ1YVfQJoLBYvH9RqMmtUfwoX6zrx6H88lINSjYo2SOfl90fe89RlDet54WWMyT + +SjeU0gehOPVVnFcfn2XJbrsigiLabEymD4sZ5ArFzOjqcZwDIVmAeWgSLSXunMxh+OiQo2/yJh1 + AAjZ3eliqMd2wPL8HozKNF4R5Jf1lunmbhOO6a2tkF3fRaZ365GP+zXSf36E+K2pcErnnYZ0f71h + hitA0mq2gJFqs4CK2NqGMtsXFthxGdKPf06MWV7tP3B5KU9aowqjeYEOsSqESUxXF/C7+e19YsDj + 6LHIC9qkby+fu7IsJpu4Q24loqCkK5TF3oUZuK+K6WwesOrTYKZUXbzCn/9SzXw60cnILwVXwzZF + t5uwptM3X5gY1zF8+Kb+Xu89GUs3BNg0ksI2m92n6KVAu6tt279JsBFXyag93xrMta+T+hmM7qS9 + blT5Pg9EU4MymZvBDuAmeiYJxbNWTL/n69SdF/R939vG5Nh5AJa66Kj6zTOoYjkeLO3rAsN4NRLB + 5Zmj1lWY0+LNepcLlapBZ+YbLLBN7P7RL7DGmqRJtEvGrEWa8qDUZ8ZV2/A52qEVuHDaM2M06nB+ + e9kNtev3jmyEzOOD5NQ3VBYfl/3yj6mUvAhOiTIw/XjMuv5yOUjq16+SHZnlZPz6ReDvNqLysX11 + 7HPbtwCjHlEk7kJjVj+zDqdXqxP3cCpC8RVGmeqxw8T0YCCI4Q+l6JenbWWXhrm5XQWgdHuNHUTY + cFHff+5KxaM1MQ9FUfDSPgfwwG/KdF3YIfGpqXvYetVISBaBO6DlogIWrh/MWWhWOLlb8wbkFOvs + pti6IXMizygangnZX2OdC4MSH2E9rSI8Q1sYU3QTchjw48Z+fk/Izl7/qxfZOHsnGe/Ozfn5I+YZ + CXVngyoLcGt2xC9+Zm6/A3vxxx86+NQW480jDqTVcWB+D1tXVpX0m3dOJsGNz8LpnkgRwqiqvv6t + /ZOXIe/ch8TuRRyOWSiOcMbiGf/yyx4dl0f0VGWPbc6yUDS27Ejw5VW2v5Kd8dMnNAnBlRg6nJNp + yNczNMgWiXk6eO4olpEE7ZMAI+10RFy2ewVliDKqOHVujOl9ytVvfXEV58xoPmydgjDDnkpf/ZgV + Ax2Rpshr2sbNm39u3m4PTxojPAufJBynh+bAHK8IOW6I7HLKhR5up92besKuQ+NFu1dwufdrdgIv + SHrGu7syXMFg0XUfJP1ZL3K4BRXG0h4pX797xcrLdTU8y83LlWqj/PaDaMQspH747ddn9KSLFqMp + wGjdVmoPx3wREkOL3U74dIIA26xdEStQ85AmfljBZyscifZcF90krZoVtGsjpvDNS+RMtyt02zYR + My5SaYw2x3hdFq2LR6MoEbXesgfm9bMjySf+oPm02VlI0UbpTx5KhVMgAepV90++Upblx/nDK+3m + YXZcPGYfSA/bPcHX6sZ/xyrV282f/L9rlV2FrvL5xfTskRn8tVRK0EexI7/8lQMMjqKHVs0sJneo + f23GSIV3Y2Pxm7/w0dxiCJBsYPTM8u6bb0Rrt7YLRm5RZnBNKEuwbUenyywCY5zPyRldatMk+4fV + hVxQ56P64sKC2d88/Ov/bkoGxxSP33rxV3m5/3iS6fmKGFw3VetPHmuUDgrHRFZ6eI9xQKxH3xeD + tHop6JefGJK34GwlBndIjuoeq3picaF0kwX6+jG8er5u4bQUfQmd3u+WbO6bOemqRGxB260D4le+ + VEy2wBXwjdOaWZuzj+bfE7jcpmey5VHrDufOBHT1X1sshfGU/OEp07znZHeOHXcuPCsF+xgG5Ouv + wh9Pq4tNOGJ0fwKf73uRKmi/NrG0eTbFuD2+I+Ct3REj3xTdnPhJCdDFj68fvKA5WAQeLFvLI/v8 + ahbf/n2E+l698SI72WG/spMWjNPjxazXTXRFseCVajn3kK4muun4Kg09OO0CmW26Kf/ytOOBYuUO + c5xaN758aYJu4i1dNum1+64vAQn5QaFQPE8J1/xRQOMuEbEkubvvfMHKADZGSFfRtunmtlUitH5I + HhaWaMvn62F9h+aypUxfPZ9do9+G1c9fY+VNonCwuvgI782VEHxP3y59DkIKJZCGfrZK6/bK2JzB + r5eMuBuKkfit3595i3G8MZeqB2GG477es+1bI8b886+lLTC8eOqM9wvFBDQ1lyduN1WWMI+FOfBN + 5X/nFbMxB7AJ1NI+aERv0hcfPst6AdkBRVg2z1YxWEfTgsUrJuwaPEkht6d2hNP9fifxjxfF4Erh + 0hk9Fup795uHOcoOCfg3r0FjZS1MZa2vQ7rKgw+iD6E5omZEDtstNguDfXlZ/fofCtPravT1/PpA + 2OYzXn71cBZxqcMkSHviP7yIz3Mn7JUAiQZLfzwxd7cVPBzrQteybScTWkoVWE4a4qNkjuGXP2Ng + i6CmBfpwxFUlxb98Ba/XZdxJSRkLqNUuCfO+vDsts2emaH5bMnu8JcY4bQYdCjd5YMmFLJxYGq6g + Rs/dv+eH0vt1VKZFYVIlRWujJcGjB9/+ACNT9gonVYsr9NVjvMq62J2OxRiohNMvr40no/duIcBp + uRCwhFItlPJmlUM/Oh+24WdijP27WcDX7zDSnB58YncTYGIWpxcf9wUrAsNCX39FTDFlxre/Kj89 + YCdlTFwOb6b8eIVtovu761PjfVZXDjeJxd9ROD+EJkDffAjzO3XCMUlpDssoWjH7gdeIUclt0fEx + D8Tb2VLBQneVQeHIFAuCfjP4N6+Bw/OSEBdJm2QshfMHLspDZYb2qsOvXjlgTp38nX/dk3kgkwOp + 9zxQfpcrox+UIFC3ovAg21sf8uk3H7jdpPWPTxJOjm4MbwHVZPflnV/eCujCK3xBrC3+5D3xChGa + DbPbyY0m3OE7n2XGAlvd9MsLV9etTJyaL90af6oeLr50obPke+4vn0Bl7Oa4ijqHj8zJRliMwcSI + uVu7w+bpzlCf5QPThQ9K5lJWdMBIk2lNUGj0weq9gt5SEBW2wIzpKIo97PniQdz7E9B8Pqkx+uod + lvh91X35NVUnQdiTe4UL1Nvc8kAtsp5csaklY3CY9+oVMotE3/4h5Orthr48Qb/3J+FlnqdoKHYH + 5n79lfDZBfpvvkan3TXhZRaKM3znc8SJM8edB7/11LfSIuLLQeZOJFiAMvtJxTwhqAu2LJQVAuW4 + JPY1OyTjY29gCC7EJw4kYjc9DdOEo1za7P7lYa709x5VsUoZRjEtpkYPFDis+4r99P7PPDIJcxkv + f+u/Ypcc8jgViFGGF2POg/IMK9VdMHPxDJB02hAL/erh+7KM2GnrUhAwmZmdx3JCTeN1Rr95o33Z + 5gZ/y8EIjYx9rPCChOJh5bV/+DXt0QXxZ7TTofEhY+YWiPEnb/n7tyvgv//x11//+dthUH0e6fu7 + MWBIp+Gf/7NV4J+3x+2fgiD9k0l/diLQ/palf//r35sQ/m66T9UM/3f4lGnd//2vvxCgP/sN/h4+ + w+39vz74x/f3/vsf/w8AAP//AwBCQNw65SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49be3dc34ced1-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:19:09 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '76' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 006edc24ffa2e03bf3534ac502e0e6e7 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: + Read file, args: \"filename\": \"\"\n4. search_files: Search Files, + args: \"directory\": \"\"\n5. write_to_file: Write to file, args: + \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse + Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past:\n[''Assistant + Reply: {\\n \"thoughts\": {\\n \"text\": \"I will use the browse_website + command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\\n \"reasoning\": \"The task requires + me to find the price of the book, and the browse_website command is the best + option to achieve this.\",\\n \"plan\": \"- Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- + Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": + \"I need to ensure that I am using the correct URL and question to find the + price of the book.\",\\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\\n },\\n \"command\": + {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": + \"What is the price of the book?\"\\n }\\n }\\n} \\nResult: Command + browse_website returned: (\\''Answer gathered from website: The price of the + book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books to Scrape + (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', + \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', + \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', + \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', + ) + \\nHuman Feedback: '', ''Assistant Reply: {\\n \"thoughts\": {\\n \"text\": + \"I will use the write_to_file command to write the price of the book to a file + named browse_website.txt.\",\\n \"reasoning\": \"The task requires me + to write the price of the book to a file, and the write_to_file command is the + best option to achieve this.\",\\n \"plan\": \"- Use the write_to_file + command to write the price of the book to a file named browse_website.txt.\",\\n \"criticism\": + \"I need to ensure that I am using the correct filename and text to write the + price of the book to the file.\",\\n \"speak\": \"I will use the write_to_file + command to write the price of the book to a file named browse_website.txt.\"\\n },\\n \"command\": + {\\n \"name\": \"write_to_file\",\\n \"args\": {\\n \"filename\": + \"browse_website.txt\",\\n \"text\": \"The price of the book \\\\\"Meditations\\\\\" + is \u00a325.89.\"\\n }\\n }\\n} \\nResult: Command write_to_file returned: + File written to successfully. \\nHuman Feedback: '']\n\n"}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"The task requires + me to find the price of the book, and the browse_website command is the best + option to achieve this.\",\n \"plan\": \"- Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\n- Find + the price of the book\\n- Write the price to a file named browse_website.txt\",\n \"criticism\": + \"I need to ensure that I am using the correct URL and question to find the + price of the book.\",\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\n },\n \"command\": + {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I will use the write_to_file command to write the price + of the book to a file named browse_website.txt.\",\n \"reasoning\": \"The + task requires me to write the price of the book to a file, and the write_to_file + command is the best option to achieve this.\",\n \"plan\": \"- Use the + write_to_file command to write the price of the book to a file named browse_website.txt.\",\n \"criticism\": + \"I need to ensure that I am using the correct filename and text to write the + price of the book to the file.\",\n \"speak\": \"I will use the write_to_file + command to write the price of the book to a file named browse_website.txt.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"browse_website.txt\",\n \"text\": \"The price of the book \\\"Meditations\\\" + is \u00a325.89.\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 1711}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '9732' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5RSTY/TMBC98ytGc3arNkvb3Ry5wAqOLItEUOU408TU8UT2pF1U5b+jNOkHRQvi + 6nnzPub5gLbAFE2lxdSNm6we+ONqVX/w28X+af61XX3RzO/l0/L5+d0OFXL+g4yMG1PDdeNILHtU + aAJpoQLT+fI+WayS1WKusOaCHKZYNjK5my4m0oacJ7O72RwVtlGXhOkBm8B1I2vhLfmIaZIs7xVe + yM+D+SJRKCzaXbBvk1mn0FRsDUVMvx2wpngiDuwIU9Qx2ijaS2+TvZDvIxwyDwCQoVTclpXEDFMY + H8cBvUj/mOEj7K1z0EYCqQhEx+16NEhguK61L0AYzm8n1DRDdU0ZSEf21pcD7+cRBpWOkBP5M0MB + sTWGYty0zv1UcBR4VdrG4zCnKMBNf7TeDV3t3BppnPaDhwk8/SvW34hMsGKNjfXpUp5oXIpt6Im1 + wCPoGprAO1tYX4KGnXa2gOEasOHQS/ST11RiQ3r7f13cmB7YOnVqfUT+UbrXNQ1CvzHf+NGhvP0v + 1wVf2m2CNQS8Gfphvqr6umDYBytCx9p65MY6gjzwPtJ6T3m0QlN5kXOOY5YxUuY77BRurLexWg8G + MMUo3KBC6wt6wXTWfe/e/AIAAP//AwAYXmcn7wMAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be49bed8f47ced1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 27 Apr 2023 05:19:22 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '11504' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - cd010e2aab478b33c13c4cd5be5292c8 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/goal_oriented/test_browse_website.py b/tests/integration/goal_oriented/test_browse_website.py new file mode 100644 index 00000000..86dc39ea --- /dev/null +++ b/tests/integration/goal_oriented/test_browse_website.py @@ -0,0 +1,27 @@ +import os + +import pytest + +from autogpt.commands.file_operations import read_file +from autogpt.config import Config +from tests.integration.agent_factory import create_browser_agent +from tests.integration.agent_utils import run_interaction_loop +from tests.utils import get_workspace_file_path, requires_api_key + +CFG = Config() + + +@requires_api_key("OPENAI_API_KEY") +@pytest.mark.vcr +def test_browse_website(workspace) -> None: + CFG.workspace_path = workspace.root + CFG.file_logger_path = os.path.join(workspace.root, "file_logger.txt") + + file_name = get_workspace_file_path(workspace, "browse_website.txt") + agent = create_browser_agent(workspace) + try: + run_interaction_loop(agent, 40) + # catch system exit exceptions + except SystemExit: # the agent returns an exception when it shuts down + content = read_file(file_name) + assert "£25.89" in content, f"Expected £25.89, got {content}" diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index 739b0f20..a2f2fa5a 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -1,14 +1,11 @@ -import concurrent import os -import unittest import pytest -from autogpt.agent import Agent -from autogpt.commands.command import CommandRegistry -from autogpt.commands.file_operations import delete_file, read_file -from autogpt.config import AIConfig, Config -from autogpt.memory import get_memory +from autogpt.commands.file_operations import read_file +from autogpt.config import Config +from tests.integration.agent_factory import create_writer_agent +from tests.integration.agent_utils import run_interaction_loop from tests.utils import requires_api_key CFG = Config() @@ -23,59 +20,8 @@ def test_write_file(workspace) -> None: file_name = str(workspace.get_path("hello_world.txt")) agent = create_writer_agent(workspace) try: - with concurrent.futures.ThreadPoolExecutor() as executor: - future = executor.submit(agent.start_interaction_loop) - try: - result = future.result(timeout=45) - except concurrent.futures.TimeoutError: - assert False, "The process took longer than 45 seconds to complete." + run_interaction_loop(agent, 40) # catch system exit exceptions except SystemExit: # the agent returns an exception when it shuts down content = read_file(file_name) assert content == "Hello World", f"Expected 'Hello World', got {content}" - - -def create_writer_agent(workspace): - command_registry = CommandRegistry() - command_registry.import_commands("autogpt.commands.file_operations") - command_registry.import_commands("autogpt.app") - - ai_config = AIConfig( - ai_name="write_to_file-GPT", - ai_role="an AI designed to use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\" and then use the task_complete command to complete the task.", - ai_goals=[ - "Use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\".", - "Use the task_complete command to complete the task.", - "Do not use any other commands.", - ], - ) - ai_config.command_registry = command_registry - CFG.set_continuous_mode(True) - CFG.set_memory_backend("no_memory") - CFG.set_temperature(0) - memory = get_memory(CFG, init=True) - triggering_prompt = ( - "Determine which next command to use, and respond using the" - " format specified above:" - ) - system_prompt = ai_config.construct_full_prompt() - - agent = Agent( - ai_name="", - memory=memory, - full_message_history=[], - command_registry=command_registry, - config=ai_config, - next_action_count=0, - system_prompt=system_prompt, - triggering_prompt=triggering_prompt, - workspace_directory=workspace.root, - ) - - os.environ["TIKTOKEN_CACHE_DIR"] = "" - - return agent - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/utils.py b/tests/utils.py index e0bc7460..2a0d25d8 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -40,3 +40,7 @@ def skip_in_ci(test_function): os.environ.get("CI") == "true", reason="This test doesn't work on GitHub Actions.", )(test_function) + + +def get_workspace_file_path(workspace, file_name): + return str(workspace.get_path(file_name)) From 3d89ed17879a603695e629fc5cc8da774d4b8672 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 27 Apr 2023 19:16:56 +0200 Subject: [PATCH 174/222] Fix imports, type hints and fixtures for goal oriented tests (#3415) --- tests/conftest.py | 8 +++- tests/integration/agent_factory.py | 48 ++++++++++++------- tests/integration/agent_utils.py | 6 ++- .../goal_oriented/test_browse_website.py | 21 +++----- .../goal_oriented/test_write_file.py | 19 ++------ tests/vcr/__init__.py | 0 6 files changed, 54 insertions(+), 48 deletions(-) create mode 100644 tests/vcr/__init__.py diff --git a/tests/conftest.py b/tests/conftest.py index 20968706..c5814b0a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,7 +5,8 @@ import pytest from autogpt.api_manager import ApiManager from autogpt.config import Config from autogpt.workspace import Workspace -from tests.vcr.openai_filter import before_record_request + +pytest_plugins = ["tests.integration.agent_factory"] @pytest.fixture() @@ -25,8 +26,13 @@ def config(workspace: Workspace) -> Config: # Do a little setup and teardown since the config object is a singleton old_ws_path = config.workspace_path + old_file_logger_path = config.file_logger_path + config.workspace_path = workspace.root + config.file_logger_path = workspace.get_path("file_logger.txt") yield config + + config.file_logger_path = old_file_logger_path config.workspace_path = old_ws_path diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py index 2a88a7b1..1cda93eb 100644 --- a/tests/integration/agent_factory.py +++ b/tests/integration/agent_factory.py @@ -1,14 +1,36 @@ -import os +import pytest from autogpt.agent import Agent -from autogpt.app import CFG from autogpt.commands.command import CommandRegistry -from autogpt.config import AIConfig -from autogpt.memory import get_memory +from autogpt.config import AIConfig, Config +from autogpt.memory import NoMemory, get_memory from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT +from autogpt.workspace import Workspace -def create_browser_agent(workspace): +@pytest.fixture +def agent_test_config(config: Config): + was_continuous_mode = config.continuous_mode + was_temperature = config.temperature + config.set_continuous_mode(True) + config.set_temperature(0) + yield config + config.set_continuous_mode(was_continuous_mode) + config.set_temperature(was_temperature) + + +@pytest.fixture +def memory_none(agent_test_config: Config): + was_memory_backend = agent_test_config.memory_backend + + agent_test_config.set_memory_backend("no_memory") + yield get_memory(agent_test_config, init=True) + + agent_test_config.set_memory_backend(was_memory_backend) + + +@pytest.fixture +def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace): command_registry = CommandRegistry() command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.commands.web_selenium") @@ -25,16 +47,12 @@ def create_browser_agent(workspace): ], ) ai_config.command_registry = command_registry - CFG.set_continuous_mode(True) - CFG.set_memory_backend("no_memory") - CFG.set_temperature(0) - memory = get_memory(CFG, init=True) system_prompt = ai_config.construct_full_prompt() agent = Agent( ai_name="", - memory=memory, + memory=memory_none, full_message_history=[], command_registry=command_registry, config=ai_config, @@ -47,7 +65,8 @@ def create_browser_agent(workspace): return agent -def create_writer_agent(workspace): +@pytest.fixture +def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace): command_registry = CommandRegistry() command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.app") @@ -62,10 +81,7 @@ def create_writer_agent(workspace): ], ) ai_config.command_registry = command_registry - CFG.set_continuous_mode(True) - CFG.set_memory_backend("no_memory") - CFG.set_temperature(0) - memory = get_memory(CFG, init=True) + triggering_prompt = ( "Determine which next command to use, and respond using the" " format specified above:" @@ -74,7 +90,7 @@ def create_writer_agent(workspace): agent = Agent( ai_name="", - memory=memory, + memory=memory_none, full_message_history=[], command_registry=command_registry, config=ai_config, diff --git a/tests/integration/agent_utils.py b/tests/integration/agent_utils.py index 3d03ef5e..a899afcf 100644 --- a/tests/integration/agent_utils.py +++ b/tests/integration/agent_utils.py @@ -1,7 +1,9 @@ -import concurrent +import concurrent.futures + +from autogpt.agent.agent import Agent -def run_interaction_loop(agent, timeout): +def run_interaction_loop(agent: Agent, timeout: float | None): with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(agent.start_interaction_loop) try: diff --git a/tests/integration/goal_oriented/test_browse_website.py b/tests/integration/goal_oriented/test_browse_website.py index 86dc39ea..9591f2f9 100644 --- a/tests/integration/goal_oriented/test_browse_website.py +++ b/tests/integration/goal_oriented/test_browse_website.py @@ -1,27 +1,18 @@ -import os - import pytest +from autogpt.agent import Agent from autogpt.commands.file_operations import read_file -from autogpt.config import Config -from tests.integration.agent_factory import create_browser_agent from tests.integration.agent_utils import run_interaction_loop -from tests.utils import get_workspace_file_path, requires_api_key - -CFG = Config() +from tests.utils import requires_api_key @requires_api_key("OPENAI_API_KEY") @pytest.mark.vcr -def test_browse_website(workspace) -> None: - CFG.workspace_path = workspace.root - CFG.file_logger_path = os.path.join(workspace.root, "file_logger.txt") - - file_name = get_workspace_file_path(workspace, "browse_website.txt") - agent = create_browser_agent(workspace) +def test_browse_website(browser_agent: Agent) -> None: + file_path = browser_agent.workspace.get_path("browse_website.txt") try: - run_interaction_loop(agent, 40) + run_interaction_loop(browser_agent, 40) # catch system exit exceptions except SystemExit: # the agent returns an exception when it shuts down - content = read_file(file_name) + content = read_file(file_path) assert "£25.89" in content, f"Expected £25.89, got {content}" diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index a2f2fa5a..da67235a 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -1,27 +1,18 @@ -import os - import pytest +from autogpt.agent import Agent from autogpt.commands.file_operations import read_file -from autogpt.config import Config -from tests.integration.agent_factory import create_writer_agent from tests.integration.agent_utils import run_interaction_loop from tests.utils import requires_api_key -CFG = Config() - @requires_api_key("OPENAI_API_KEY") @pytest.mark.vcr -def test_write_file(workspace) -> None: - CFG.workspace_path = workspace.root - CFG.file_logger_path = os.path.join(workspace.root, "file_logger.txt") - - file_name = str(workspace.get_path("hello_world.txt")) - agent = create_writer_agent(workspace) +def test_write_file(writer_agent: Agent) -> None: + file_path = str(writer_agent.workspace.get_path("hello_world.txt")) try: - run_interaction_loop(agent, 40) + run_interaction_loop(writer_agent, 40) # catch system exit exceptions except SystemExit: # the agent returns an exception when it shuts down - content = read_file(file_name) + content = read_file(file_path) assert content == "Hello World", f"Expected 'Hello World', got {content}" diff --git a/tests/vcr/__init__.py b/tests/vcr/__init__.py new file mode 100644 index 00000000..e69de29b From 6b4ad1f933dee86dfc272ee6b7d1e43826c6cffb Mon Sep 17 00:00:00 2001 From: Irmius <67007713+Irmius@users.noreply.github.com> Date: Thu, 27 Apr 2023 20:32:31 +0300 Subject: [PATCH 175/222] Fix `browse_website` headless mode for Firefox (#2816) Co-authored-by: Reinier van der Leer --- autogpt/commands/web_selenium.py | 5 ++++- docs/setup.md | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index b435660c..739520ec 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -78,6 +78,9 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: ) if CFG.selenium_web_browser == "firefox": + if CFG.selenium_headless: + options.headless = True + options.add_argument("--disable-gpu") driver = webdriver.Firefox( executable_path=GeckoDriverManager().install(), options=options ) @@ -92,7 +95,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: options.add_argument("--no-sandbox") if CFG.selenium_headless: - options.add_argument("--headless") + options.add_argument("--headless=new") options.add_argument("--disable-gpu") chromium_driver_path = Path("/usr/bin/chromedriver") diff --git a/docs/setup.md b/docs/setup.md index 7cbab43d..d0917c8b 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -64,6 +64,10 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt templates in the [repository]. 6. Continue to [Run with Docker](#run-with-docker) +!!! note "Docker only supports headless browsing" + Auto-GPT uses a browser in headless mode by default: `HEADLESS_BROWSER=True`. + Please do not change this setting in combination with Docker, or Auto-GPT will crash. + [Docker Hub]: https://hub.docker.com/r/significantgravitas/auto-gpt [repository]: https://github.com/Significant-Gravitas/Auto-GPT From 4b54e3c6d88ef2a4c83edfb4f2c2b346421f3b2e Mon Sep 17 00:00:00 2001 From: Iliass Date: Thu, 27 Apr 2023 11:18:44 -0700 Subject: [PATCH 176/222] Update broken link (#3416) Co-authored-by: Richard Beales --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 669577bc..b9487118 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,5 +1,5 @@ # Auto-GPT -Welcome to Auto-GPT. Please follow the [Installation](installation.md) guide to get started. +Welcome to Auto-GPT. Please follow the [Installation](https://significant-gravitas.github.io/Auto-GPT/setup/) guide to get started. It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. From 427b8648ee676e39d084e83aa6981313a0591a84 Mon Sep 17 00:00:00 2001 From: Dhruv Awasthi Date: Thu, 27 Apr 2023 23:54:28 +0530 Subject: [PATCH 177/222] Fix README: remove redundant "Disclaimer" (#3391) Co-authored-by: Richard Beales --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index c915418b..16f3d05a 100644 --- a/README.md +++ b/README.md @@ -125,7 +125,6 @@ This experiment aims to showcase the potential of GPT-4 but comes with some limi ## 🛡 Disclaimer -Disclaimer This project, Auto-GPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise. The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by Auto-GPT. From 94dc6f19aac62cf349f9557a8a1230eac14c9785 Mon Sep 17 00:00:00 2001 From: James Collins Date: Thu, 27 Apr 2023 11:48:18 -0700 Subject: [PATCH 178/222] Add a regression test for the embedding (#3422) --- tests/integration/test_llm_utils.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 tests/integration/test_llm_utils.py diff --git a/tests/integration/test_llm_utils.py b/tests/integration/test_llm_utils.py new file mode 100644 index 00000000..7f312bde --- /dev/null +++ b/tests/integration/test_llm_utils.py @@ -0,0 +1,25 @@ +import string + +import pytest +from numpy.random import RandomState + +from autogpt.llm_utils import get_ada_embedding +from tests.utils import requires_api_key + + +@pytest.fixture(scope="session") +def random_large_string(): + """Big string used to overwhelm token limits.""" + seed = 42 + n_characters = 30_000 + random = RandomState(seed) + return "".join(random.choice(list(string.ascii_lowercase), size=n_characters)) + + +@pytest.mark.xfail(reason="We have no mechanism for embedding large strings.") +@requires_api_key("OPENAI_API_KEY") +def test_get_ada_embedding_large_context(random_large_string): + # This test should be able to mock the openai call after we have a fix. We don't need + # to hit the API to test the logic of the function (so not using vcr). This is a quick + # regression test to document the issue. + get_ada_embedding(random_large_string) From 5ce6da95fcd02c1503661eafa2b358aad536268e Mon Sep 17 00:00:00 2001 From: Eddie Cohen <31940792+edcohen08@users.noreply.github.com> Date: Thu, 27 Apr 2023 15:26:47 -0400 Subject: [PATCH 179/222] Make y/n configurable (#3178) Co-authored-by: Reinier van der Leer --- .env.template | 5 +++++ autogpt/agent/agent.py | 10 +++++----- autogpt/config/config.py | 2 ++ autogpt/plugins.py | 4 ++-- autogpt/prompts/prompt.py | 4 ++-- autogpt/utils.py | 4 ++-- tests/unit/test_plugins.py | 2 ++ 7 files changed, 20 insertions(+), 11 deletions(-) diff --git a/.env.template b/.env.template index 3d2746c4..4d65c0b5 100644 --- a/.env.template +++ b/.env.template @@ -13,6 +13,11 @@ ## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) # AI_SETTINGS_FILE=ai_settings.yaml +## AUTHORISE COMMAND KEY - Key to authorise commands +# AUTHORISE_COMMAND_KEY=y +## EXIT_KEY - Key to exit AUTO-GPT +# EXIT_KEY=n + ################################################################################ ### LLM PROVIDER ################################################################################ diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 9b48d540..fd6a7cb6 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -153,7 +153,7 @@ class Agent: console_input = clean_input( Fore.MAGENTA + "Input:" + Style.RESET_ALL ) - if console_input.lower().strip() == "y": + if console_input.lower().strip() == cfg.authorise_key: user_input = "GENERATE NEXT COMMAND JSON" break elif console_input.lower().strip() == "s": @@ -171,7 +171,7 @@ class Agent: Fore.YELLOW, "", ) - if self_feedback_resp[0].lower().strip() == "y": + if self_feedback_resp[0].lower().strip() == cfg.authorise_key: user_input = "GENERATE NEXT COMMAND JSON" else: user_input = self_feedback_resp @@ -179,7 +179,7 @@ class Agent: elif console_input.lower().strip() == "": print("Invalid input format.") continue - elif console_input.lower().startswith("y -"): + elif console_input.lower().startswith(f"{cfg.authorise_key} -"): try: self.next_action_count = abs( int(console_input.split(" ")[1]) @@ -187,12 +187,12 @@ class Agent: user_input = "GENERATE NEXT COMMAND JSON" except ValueError: print( - "Invalid input format. Please enter 'y -n' where n is" + f"Invalid input format. Please enter '{cfg.authorise_key} -N' where N is" " the number of continuous tasks." ) continue break - elif console_input.lower() == "n": + elif console_input.lower() == cfg.exit_key: user_input = "EXIT" break else: diff --git a/autogpt/config/config.py b/autogpt/config/config.py index c0afaeb1..67e735c9 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -28,6 +28,8 @@ class Config(metaclass=Singleton): self.allow_downloads = False self.skip_news = False + self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y") + self.exit_key = os.getenv("EXIT_KEY", "n") self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") diff --git a/autogpt/plugins.py b/autogpt/plugins.py index 57045bb8..d03f0037 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -262,6 +262,6 @@ def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool: return True ack = input( f"WARNING: Plugin {plugin_name} found. But not in the" - " allowlist... Load? (y/n): " + f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): " ) - return ack.lower() == "y" + return ack.lower() == cfg.authorise_key diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index c1240157..cbfcfca9 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -108,9 +108,9 @@ Name: {config.ai_name} Role: {config.ai_role} Goals: {config.ai_goals} API Budget: {"infinite" if config.api_budget <= 0 else f"${config.api_budget}"} -Continue (y/n): """ +Continue ({CFG.authorise_key}/{CFG.exit_key}): """ ) - if should_continue.lower() == "n": + if should_continue.lower() == CFG.exit_key: config = AIConfig() if not config.ai_name: diff --git a/autogpt/utils.py b/autogpt/utils.py index 17be611a..6efaceec 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -47,14 +47,14 @@ def clean_input(prompt: str = "", talk=False): "sure", "alright", ]: - return "y" + return cfg.authorise_key elif plugin_response.lower() in [ "no", "nope", "n", "negative", ]: - return "n" + return cfg.exit_key return plugin_response # ask for input, default when just pressing Enter is y diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py index de1653e4..08c9114c 100644 --- a/tests/unit/test_plugins.py +++ b/tests/unit/test_plugins.py @@ -25,6 +25,8 @@ def mock_config_denylist_allowlist_check(): plugins_denylist = ["BadPlugin"] plugins_allowlist = ["GoodPlugin"] + authorise_key = "y" + exit_key = "n" return MockConfig() From 1772a01d0440f7a8f98d2b8b7bc16fda5833ffe8 Mon Sep 17 00:00:00 2001 From: Johnny C <3631074+gantit@users.noreply.github.com> Date: Thu, 27 Apr 2023 23:43:56 +0200 Subject: [PATCH 180/222] Fix URL to docs in API throttling message (#3201) Co-authored-by: Reinier van der Leer --- autogpt/llm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index a98b12a3..11f8da3a 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -30,7 +30,7 @@ def retry_openai_api( api_key_error_msg = ( f"Please double check that you have setup a " f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can " - f"read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}" + f"read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}" ) backoff_msg = ( f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}" @@ -176,7 +176,7 @@ def create_chat_completion( if not warned_user: logger.double_check( f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " - + f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}" + + f"You can read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}" ) warned_user = True except (APIError, Timeout) as e: From 20ef130341981e7500039e31d87e566e432acdd7 Mon Sep 17 00:00:00 2001 From: rickythefox Date: Fri, 28 Apr 2023 14:51:29 +0200 Subject: [PATCH 181/222] Add tests for code/shell execution & improve config fixture (#1268) Co-authored-by: Reinier van der Leer --- autogpt/commands/execute_code.py | 11 ++---- tests/conftest.py | 18 +++++----- tests/integration/test_execute_code.py | 50 ++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 18 deletions(-) create mode 100644 tests/integration/test_execute_code.py diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 590f95cc..1bcdede2 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -113,15 +113,9 @@ def execute_shell(command_line: str) -> str: str: The output of the command """ - if not CFG.execute_local_commands: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - current_dir = os.getcwd() + current_dir = Path.cwd() # Change dir into workspace if necessary - if CFG.workspace_path not in current_dir: + if not current_dir.is_relative_to(CFG.workspace_path): os.chdir(CFG.workspace_path) print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") @@ -154,6 +148,7 @@ def execute_shell_popen(command_line) -> str: Returns: str: Description of the fact that the process started and its id """ + current_dir = os.getcwd() # Change dir into workspace if necessary if CFG.workspace_path not in current_dir: diff --git a/tests/conftest.py b/tests/conftest.py index c5814b0a..e3c590d2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,7 @@ from pathlib import Path import pytest +from pytest_mock import MockerFixture from autogpt.api_manager import ApiManager from autogpt.config import Config @@ -10,7 +11,7 @@ pytest_plugins = ["tests.integration.agent_factory"] @pytest.fixture() -def workspace_root(tmp_path) -> Path: +def workspace_root(tmp_path: Path) -> Path: return tmp_path / "home/users/monty/auto_gpt_workspace" @@ -21,20 +22,17 @@ def workspace(workspace_root: Path) -> Workspace: @pytest.fixture() -def config(workspace: Workspace) -> Config: +def config(mocker: MockerFixture, workspace: Workspace) -> Config: config = Config() # Do a little setup and teardown since the config object is a singleton - old_ws_path = config.workspace_path - old_file_logger_path = config.file_logger_path - - config.workspace_path = workspace.root - config.file_logger_path = workspace.get_path("file_logger.txt") + mocker.patch.multiple( + config, + workspace_path=workspace.root, + file_logger_path=workspace.get_path("file_logger.txt"), + ) yield config - config.file_logger_path = old_file_logger_path - config.workspace_path = old_ws_path - @pytest.fixture() def api_manager() -> ApiManager: diff --git a/tests/integration/test_execute_code.py b/tests/integration/test_execute_code.py new file mode 100644 index 00000000..2d4e0293 --- /dev/null +++ b/tests/integration/test_execute_code.py @@ -0,0 +1,50 @@ +import random +import string +import tempfile + +import pytest +from pytest_mock import MockerFixture + +import autogpt.commands.execute_code as sut # system under testing +from autogpt.config import Config + + +@pytest.fixture +def config_allow_execute(config: Config, mocker: MockerFixture): + yield mocker.patch.object(config, "execute_local_commands", True) + + +@pytest.fixture +def python_test_file(config: Config, random_string): + temp_file = tempfile.NamedTemporaryFile(dir=config.workspace_path, suffix=".py") + temp_file.write(str.encode(f"print('Hello {random_string}!')")) + temp_file.flush() + + yield temp_file.name + temp_file.close() + + +@pytest.fixture +def random_string(): + return "".join(random.choice(string.ascii_lowercase) for _ in range(10)) + + +def test_execute_python_file(python_test_file: str, random_string: str): + result = sut.execute_python_file(python_test_file) + assert result == f"Hello {random_string}!\n" + + +def test_execute_python_file_invalid(): + assert all( + s in sut.execute_python_file("not_python").lower() + for s in ["error:", "invalid", ".py"] + ) + assert all( + s in sut.execute_python_file("notexist.py").lower() + for s in ["error:", "does not exist"] + ) + + +def test_execute_shell(config_allow_execute, random_string): + result = sut.execute_shell(f"echo 'Hello {random_string}!'") + assert f"Hello {random_string}!" in result From cf5fdabdfc320c78a90223aadf22f7cb7d612000 Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Fri, 28 Apr 2023 18:32:11 +0200 Subject: [PATCH 182/222] Removing unitest in favor of pytest from test_config.py (#3417) * removing unitest in favor of pytest * remove singleton test and unnecessary fixture --------- Co-authored-by: James Collins --- tests/test_config.py | 185 +++++++++++++++++++++---------------------- 1 file changed, 89 insertions(+), 96 deletions(-) diff --git a/tests/test_config.py b/tests/test_config.py index 5dd9e9c6..e1fc6745 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,126 +1,119 @@ -from unittest import TestCase +""" +Test cases for the Config class, which handles the configuration settings +for the AI and ensures it behaves as a singleton. +""" + +import pytest from autogpt.config import Config -class TestConfig(TestCase): +def test_initial_values(config): """ - Test cases for the Config class, which handles the configuration settings - for the AI and ensures it behaves as a singleton. + Test if the initial values of the Config class attributes are set correctly. """ + assert config.debug_mode == False + assert config.continuous_mode == False + assert config.speak_mode == False + assert config.fast_llm_model == "gpt-3.5-turbo" + assert config.smart_llm_model == "gpt-4" + assert config.fast_token_limit == 4000 + assert config.smart_token_limit == 8000 - def setUp(self): - """ - Set up the test environment by creating an instance of the Config class. - """ - self.config = Config() - def test_singleton(self): - """ - Test if the Config class behaves as a singleton by ensuring that two instances are the same. - """ - config2 = Config() - self.assertIs(self.config, config2) +def test_set_continuous_mode(config): + """ + Test if the set_continuous_mode() method updates the continuous_mode attribute. + """ + # Store continuous mode to reset it after the test + continuous_mode = config.continuous_mode - def test_initial_values(self): - """ - Test if the initial values of the Config class attributes are set correctly. - """ - self.assertFalse(self.config.debug_mode) - self.assertFalse(self.config.continuous_mode) - self.assertFalse(self.config.speak_mode) - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo") - self.assertEqual(self.config.smart_llm_model, "gpt-4") - self.assertEqual(self.config.fast_token_limit, 4000) - self.assertEqual(self.config.smart_token_limit, 8000) + config.set_continuous_mode(True) + assert config.continuous_mode == True - def test_set_continuous_mode(self): - """ - Test if the set_continuous_mode() method updates the continuous_mode attribute. - """ - # Store continuous mode to reset it after the test - continuous_mode = self.config.continuous_mode + # Reset continuous mode + config.set_continuous_mode(continuous_mode) - self.config.set_continuous_mode(True) - self.assertTrue(self.config.continuous_mode) - # Reset continuous mode - self.config.set_continuous_mode(continuous_mode) +def test_set_speak_mode(config): + """ + Test if the set_speak_mode() method updates the speak_mode attribute. + """ + # Store speak mode to reset it after the test + speak_mode = config.speak_mode - def test_set_speak_mode(self): - """ - Test if the set_speak_mode() method updates the speak_mode attribute. - """ - # Store speak mode to reset it after the test - speak_mode = self.config.speak_mode + config.set_speak_mode(True) + assert config.speak_mode == True - self.config.set_speak_mode(True) - self.assertTrue(self.config.speak_mode) + # Reset speak mode + config.set_speak_mode(speak_mode) - # Reset speak mode - self.config.set_speak_mode(speak_mode) - def test_set_fast_llm_model(self): - """ - Test if the set_fast_llm_model() method updates the fast_llm_model attribute. - """ - # Store model name to reset it after the test - fast_llm_model = self.config.fast_llm_model +def test_set_fast_llm_model(config): + """ + Test if the set_fast_llm_model() method updates the fast_llm_model attribute. + """ + # Store model name to reset it after the test + fast_llm_model = config.fast_llm_model - self.config.set_fast_llm_model("gpt-3.5-turbo-test") - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test") + config.set_fast_llm_model("gpt-3.5-turbo-test") + assert config.fast_llm_model == "gpt-3.5-turbo-test" - # Reset model name - self.config.set_fast_llm_model(fast_llm_model) + # Reset model name + config.set_fast_llm_model(fast_llm_model) - def test_set_smart_llm_model(self): - """ - Test if the set_smart_llm_model() method updates the smart_llm_model attribute. - """ - # Store model name to reset it after the test - smart_llm_model = self.config.smart_llm_model - self.config.set_smart_llm_model("gpt-4-test") - self.assertEqual(self.config.smart_llm_model, "gpt-4-test") +def test_set_smart_llm_model(config): + """ + Test if the set_smart_llm_model() method updates the smart_llm_model attribute. + """ + # Store model name to reset it after the test + smart_llm_model = config.smart_llm_model - # Reset model name - self.config.set_smart_llm_model(smart_llm_model) + config.set_smart_llm_model("gpt-4-test") + assert config.smart_llm_model == "gpt-4-test" - def test_set_fast_token_limit(self): - """ - Test if the set_fast_token_limit() method updates the fast_token_limit attribute. - """ - # Store token limit to reset it after the test - fast_token_limit = self.config.fast_token_limit + # Reset model name + config.set_smart_llm_model(smart_llm_model) - self.config.set_fast_token_limit(5000) - self.assertEqual(self.config.fast_token_limit, 5000) - # Reset token limit - self.config.set_fast_token_limit(fast_token_limit) +def test_set_fast_token_limit(config): + """ + Test if the set_fast_token_limit() method updates the fast_token_limit attribute. + """ + # Store token limit to reset it after the test + fast_token_limit = config.fast_token_limit - def test_set_smart_token_limit(self): - """ - Test if the set_smart_token_limit() method updates the smart_token_limit attribute. - """ - # Store token limit to reset it after the test - smart_token_limit = self.config.smart_token_limit + config.set_fast_token_limit(5000) + assert config.fast_token_limit == 5000 - self.config.set_smart_token_limit(9000) - self.assertEqual(self.config.smart_token_limit, 9000) + # Reset token limit + config.set_fast_token_limit(fast_token_limit) - # Reset token limit - self.config.set_smart_token_limit(smart_token_limit) - def test_set_debug_mode(self): - """ - Test if the set_debug_mode() method updates the debug_mode attribute. - """ - # Store debug mode to reset it after the test - debug_mode = self.config.debug_mode +def test_set_smart_token_limit(config): + """ + Test if the set_smart_token_limit() method updates the smart_token_limit attribute. + """ + # Store token limit to reset it after the test + smart_token_limit = config.smart_token_limit - self.config.set_debug_mode(True) - self.assertTrue(self.config.debug_mode) + config.set_smart_token_limit(9000) + assert config.smart_token_limit == 9000 - # Reset debug mode - self.config.set_debug_mode(debug_mode) + # Reset token limit + config.set_smart_token_limit(smart_token_limit) + + +def test_set_debug_mode(config): + """ + Test if the set_debug_mode() method updates the debug_mode attribute. + """ + # Store debug mode to reset it after the test + debug_mode = config.debug_mode + + config.set_debug_mode(True) + assert config.debug_mode == True + + # Reset debug mode + config.set_debug_mode(debug_mode) From aebe89148963c53f9e1a81021d997d54c5570729 Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Fri, 28 Apr 2023 18:48:30 +0200 Subject: [PATCH 183/222] Remove unittest in favor of pytest in the test_token_counter module (#3453) * init remove unittest for pytest * docstrings * black --------- Co-authored-by: James Collins --- tests/test_token_counter.py | 131 ++++++++++++++++++------------------ 1 file changed, 66 insertions(+), 65 deletions(-) diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py index f7c84672..ac25796d 100644 --- a/tests/test_token_counter.py +++ b/tests/test_token_counter.py @@ -1,72 +1,73 @@ -import unittest +import pytest import tests.context from autogpt.token_counter import count_message_tokens, count_string_tokens -class TestTokenCounter(unittest.TestCase): - def test_count_message_tokens(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages), 17) - - def test_count_message_tokens_with_name(self): - messages = [ - {"role": "user", "content": "Hello", "name": "John"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages), 17) - - def test_count_message_tokens_empty_input(self): - # Empty input should return 3 tokens - self.assertEqual(count_message_tokens([]), 3) - - def test_count_message_tokens_invalid_model(self): - # Invalid model should raise a KeyError - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - with self.assertRaises(KeyError): - count_message_tokens(messages, model="invalid_model") - - def test_count_message_tokens_gpt_4(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages, model="gpt-4-0314"), 15) - - def test_count_string_tokens(self): - """Test that the string tokens are counted correctly.""" - - string = "Hello, world!" - self.assertEqual( - count_string_tokens(string, model_name="gpt-3.5-turbo-0301"), 4 - ) - - def test_count_string_tokens_empty_input(self): - """Test that the string tokens are counted correctly.""" - - self.assertEqual(count_string_tokens("", model_name="gpt-3.5-turbo-0301"), 0) - - def test_count_message_tokens_invalid_model(self): - # Invalid model should raise a NotImplementedError - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - with self.assertRaises(NotImplementedError): - count_message_tokens(messages, model="invalid_model") - - def test_count_string_tokens_gpt_4(self): - """Test that the string tokens are counted correctly.""" - - string = "Hello, world!" - self.assertEqual(count_string_tokens(string, model_name="gpt-4-0314"), 4) +def test_count_message_tokens(): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + assert count_message_tokens(messages) == 17 -if __name__ == "__main__": - unittest.main() +def test_count_message_tokens_with_name(): + messages = [ + {"role": "user", "content": "Hello", "name": "John"}, + {"role": "assistant", "content": "Hi there!"}, + ] + assert count_message_tokens(messages) == 17 + + +def test_count_message_tokens_empty_input(): + """Empty input should return 3 tokens""" + assert count_message_tokens([]) == 3 + + +def test_count_message_tokens_invalid_model(): + """Invalid model should raise a KeyError""" + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + with pytest.raises(KeyError): + count_message_tokens(messages, model="invalid_model") + + +def test_count_message_tokens_gpt_4(): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + assert count_message_tokens(messages, model="gpt-4-0314") == 15 + + +def test_count_string_tokens(): + """Test that the string tokens are counted correctly.""" + + string = "Hello, world!" + assert count_string_tokens(string, model_name="gpt-3.5-turbo-0301") == 4 + + +def test_count_string_tokens_empty_input(): + """Test that the string tokens are counted correctly.""" + + assert count_string_tokens("", model_name="gpt-3.5-turbo-0301") == 0 + + +def test_count_message_tokens_invalid_model(): + """Invalid model should raise a NotImplementedError""" + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + with pytest.raises(NotImplementedError): + count_message_tokens(messages, model="invalid_model") + + +def test_count_string_tokens_gpt_4(): + """Test that the string tokens are counted correctly.""" + + string = "Hello, world!" + assert count_string_tokens(string, model_name="gpt-4-0314") == 4 From c1f1da27e7056595cd2bfbf817c30bb9e4803b8b Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Fri, 28 Apr 2023 21:13:30 +0300 Subject: [PATCH 184/222] move remove_color_codes to utils and add tests (#3260) * move remove_color_codes to utils and add tests * Fix for ai_settings goals loaded as list(dict) Some ai_settings formats can cause goals to load as list(dict) not list(str) Refactor code in utils.py to explicitly convert input type to string in remove_color_codes() function. - Updated remove_color_codes function to convert input argument to string type explicitly to avoid unexpected type errors. - Test case added to check conversion of dict to string in remove_color_codes function. * Update tests/test_utils.py Co-authored-by: James Collins * move remove_color_codes fn and tests to proper files --------- Co-authored-by: Luke Kyohere Co-authored-by: James Collins --- tests/test_logs.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 tests/test_logs.py diff --git a/tests/test_logs.py b/tests/test_logs.py new file mode 100644 index 00000000..0e8660b6 --- /dev/null +++ b/tests/test_logs.py @@ -0,0 +1,28 @@ +import pytest + +from autogpt.logs import remove_color_codes + + +@pytest.mark.parametrize( + "raw_text, clean_text", + [ + ( + "COMMAND = \x1b[36mbrowse_website\x1b[0m ARGUMENTS = \x1b[36m{'url': 'https://www.google.com', 'question': 'What is the capital of France?'}\x1b[0m", + "COMMAND = browse_website ARGUMENTS = {'url': 'https://www.google.com', 'question': 'What is the capital of France?'}", + ), + ( + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}", + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}", + ), + ("", ""), + ("hello", "hello"), + ("hello\x1B[31m world", "hello world"), + ("\x1B[36mHello,\x1B[32m World!", "Hello, World!"), + ( + "\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found", + "Error: file not found", + ), + ], +) +def test_remove_color_codes(raw_text, clean_text): + assert remove_color_codes(raw_text) == clean_text From ee4043ae19129eece625adb84ac06b24aa3efddd Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Fri, 28 Apr 2023 20:27:52 +0200 Subject: [PATCH 185/222] Refactor test_chat to use pytest instead of unittest (#3484) * refactor_for_pytest * formatting --------- Co-authored-by: James Collins --- tests/unit/test_chat.py | 140 +++++++++++++++++++--------------------- 1 file changed, 67 insertions(+), 73 deletions(-) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 7b656d73..61ea773c 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -1,88 +1,82 @@ # Generated by CodiumAI import time -import unittest from unittest.mock import patch from autogpt.chat import create_chat_message, generate_context -class TestChat(unittest.TestCase): - """Test the chat module functions.""" +def test_happy_path_role_content(): + """Test that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content.""" + result = create_chat_message("system", "Hello, world!") + assert result == {"role": "system", "content": "Hello, world!"} - def test_happy_path_role_content(self): - """Test that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content.""" - result = create_chat_message("system", "Hello, world!") - self.assertEqual(result, {"role": "system", "content": "Hello, world!"}) - def test_empty_role_content(self): - """Test that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content.""" - result = create_chat_message("", "") - self.assertEqual(result, {"role": "", "content": ""}) +def test_empty_role_content(): + """Test that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content.""" + result = create_chat_message("", "") + assert result == {"role": "", "content": ""} - @patch("time.strftime") - def test_generate_context_empty_inputs(self, mock_strftime): - """Test the behavior of the generate_context function when all input parameters are empty.""" - # Mock the time.strftime function to return a fixed value - mock_strftime.return_value = "Sat Apr 15 00:00:00 2023" - # Arrange - prompt = "" - relevant_memory = "" - full_message_history = [] - model = "gpt-3.5-turbo-0301" - # Act - result = generate_context(prompt, relevant_memory, full_message_history, model) +def test_generate_context_empty_inputs(mocker): + """Test the behavior of the generate_context function when all input parameters are empty.""" + # Mock the time.strftime function to return a fixed value + mocker.patch("time.strftime", return_value="Sat Apr 15 00:00:00 2023") + # Arrange + prompt = "" + relevant_memory = "" + full_message_history = [] + model = "gpt-3.5-turbo-0301" - # Assert - expected_result = ( - -1, - 47, - 3, - [ - {"role": "system", "content": ""}, - { - "role": "system", - "content": f"The current time and date is {time.strftime('%c')}", - }, - { - "role": "system", - "content": f"This reminds you of these events from your past:\n\n\n", - }, - ], - ) - self.assertEqual(result, expected_result) + # Act + result = generate_context(prompt, relevant_memory, full_message_history, model) - def test_generate_context_valid_inputs(self): - """Test that the function successfully generates a current_context given valid inputs.""" - # Given - prompt = "What is your favorite color?" - relevant_memory = "You once painted your room blue." - full_message_history = [ - create_chat_message("user", "Hi there!"), - create_chat_message("assistant", "Hello! How can I assist you today?"), - create_chat_message("user", "Can you tell me a joke?"), - create_chat_message( - "assistant", - "Why did the tomato turn red? Because it saw the salad dressing!", - ), - create_chat_message("user", "Haha, that's funny."), - ] - model = "gpt-3.5-turbo-0301" + # Assert + expected_result = ( + -1, + 47, + 3, + [ + {"role": "system", "content": ""}, + { + "role": "system", + "content": f"The current time and date is {time.strftime('%c')}", + }, + { + "role": "system", + "content": f"This reminds you of these events from your past:\n\n\n", + }, + ], + ) + assert result == expected_result - # When - result = generate_context(prompt, relevant_memory, full_message_history, model) - # Then - self.assertIsInstance(result[0], int) - self.assertIsInstance(result[1], int) - self.assertIsInstance(result[2], int) - self.assertIsInstance(result[3], list) - self.assertGreaterEqual(result[0], 0) - self.assertGreaterEqual(result[1], 0) - self.assertGreaterEqual(result[2], 0) - self.assertGreaterEqual( - len(result[3]), 3 - ) # current_context should have at least 3 messages - self.assertLessEqual( - result[1], 2048 - ) # token limit for GPT-3.5-turbo-0301 is 2048 tokens +def test_generate_context_valid_inputs(): + """Test that the function successfully generates a current_context given valid inputs.""" + # Given + prompt = "What is your favorite color?" + relevant_memory = "You once painted your room blue." + full_message_history = [ + create_chat_message("user", "Hi there!"), + create_chat_message("assistant", "Hello! How can I assist you today?"), + create_chat_message("user", "Can you tell me a joke?"), + create_chat_message( + "assistant", + "Why did the tomato turn red? Because it saw the salad dressing!", + ), + create_chat_message("user", "Haha, that's funny."), + ] + model = "gpt-3.5-turbo-0301" + + # When + result = generate_context(prompt, relevant_memory, full_message_history, model) + + # Then + assert isinstance(result[0], int) + assert isinstance(result[1], int) + assert isinstance(result[2], int) + assert isinstance(result[3], list) + assert result[0] >= 0 + assert result[2] >= 0 + assert result[1] >= 0 + assert len(result[3]) >= 3 # current_context should have at least 3 messages + assert result[1] <= 2048 # token limit for GPT-3.5-turbo-0301 is 2048 tokens From 3b74d2150e26ec9387e40cf65a75d8a995d8c9f3 Mon Sep 17 00:00:00 2001 From: James Collins Date: Fri, 28 Apr 2023 12:00:54 -0700 Subject: [PATCH 186/222] Organize all the llm stuff into a subpackage (#3436) --- autogpt/agent/agent.py | 3 +-- autogpt/agent/agent_manager.py | 2 +- autogpt/commands/analyze_code.py | 2 +- autogpt/commands/improve_code.py | 2 +- autogpt/commands/write_tests.py | 2 +- autogpt/json_utils/json_fix_llm.py | 2 +- autogpt/llm/__init__.py | 22 ++++++++++++++++++++++ autogpt/{ => llm}/api_manager.py | 2 +- autogpt/{ => llm}/chat.py | 16 +++++++--------- autogpt/{ => llm}/llm_utils.py | 2 +- autogpt/{ => llm}/modelsinfo.py | 0 autogpt/{ => llm}/token_counter.py | 0 autogpt/logs.py | 2 -- autogpt/memory/local.py | 2 +- autogpt/memory/milvus.py | 2 +- autogpt/memory/pinecone.py | 2 +- autogpt/memory/redismem.py | 2 +- autogpt/memory/weaviate.py | 2 +- autogpt/processing/text.py | 13 ++++--------- autogpt/prompts/prompt.py | 2 +- autogpt/setup.py | 2 +- tests/conftest.py | 2 +- tests/integration/test_llm_utils.py | 2 +- tests/integration/weaviate_memory_tests.py | 2 +- tests/test_agent.py | 3 --- tests/test_agent_manager.py | 2 +- tests/test_api_manager.py | 2 +- tests/test_llm_utils.py | 6 +++--- tests/test_token_counter.py | 3 +-- tests/unit/test_chat.py | 2 +- tests/unit/test_get_self_feedback.py | 2 +- 31 files changed, 59 insertions(+), 51 deletions(-) create mode 100644 autogpt/llm/__init__.py rename autogpt/{ => llm}/api_manager.py (98%) rename autogpt/{ => llm}/chat.py (94%) rename autogpt/{ => llm}/llm_utils.py (99%) rename autogpt/{ => llm}/modelsinfo.py (100%) rename autogpt/{ => llm}/token_counter.py (100%) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index fd6a7cb6..9e0f54e3 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -1,11 +1,10 @@ from colorama import Fore, Style from autogpt.app import execute_command, get_command -from autogpt.chat import chat_with_ai, create_chat_message from autogpt.config import Config from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.utilities import validate_json -from autogpt.llm_utils import create_chat_completion +from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 257127a3..1283fdae 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -4,7 +4,7 @@ from __future__ import annotations from typing import List from autogpt.config.config import Config -from autogpt.llm_utils import create_chat_completion +from autogpt.llm import create_chat_completion from autogpt.singleton import Singleton from autogpt.types.openai import Message diff --git a/autogpt/commands/analyze_code.py b/autogpt/commands/analyze_code.py index 47cfc1e7..cff8a737 100644 --- a/autogpt/commands/analyze_code.py +++ b/autogpt/commands/analyze_code.py @@ -2,7 +2,7 @@ from __future__ import annotations from autogpt.commands.command import command -from autogpt.llm_utils import call_ai_function +from autogpt.llm import call_ai_function @command( diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py index f953cf2b..ff4cf48c 100644 --- a/autogpt/commands/improve_code.py +++ b/autogpt/commands/improve_code.py @@ -3,7 +3,7 @@ from __future__ import annotations import json from autogpt.commands.command import command -from autogpt.llm_utils import call_ai_function +from autogpt.llm import call_ai_function @command( diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py index 91cd9304..c6a03324 100644 --- a/autogpt/commands/write_tests.py +++ b/autogpt/commands/write_tests.py @@ -4,7 +4,7 @@ from __future__ import annotations import json from autogpt.commands.command import command -from autogpt.llm_utils import call_ai_function +from autogpt.llm import call_ai_function @command( diff --git a/autogpt/json_utils/json_fix_llm.py b/autogpt/json_utils/json_fix_llm.py index 10317acc..73f76648 100644 --- a/autogpt/json_utils/json_fix_llm.py +++ b/autogpt/json_utils/json_fix_llm.py @@ -11,7 +11,7 @@ from regex import regex from autogpt.config import Config from autogpt.json_utils.json_fix_general import correct_json -from autogpt.llm_utils import call_ai_function +from autogpt.llm import call_ai_function from autogpt.logs import logger from autogpt.speech import say_text diff --git a/autogpt/llm/__init__.py b/autogpt/llm/__init__.py new file mode 100644 index 00000000..3a958285 --- /dev/null +++ b/autogpt/llm/__init__.py @@ -0,0 +1,22 @@ +from autogpt.llm.api_manager import ApiManager +from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context +from autogpt.llm.llm_utils import ( + call_ai_function, + create_chat_completion, + get_ada_embedding, +) +from autogpt.llm.modelsinfo import COSTS +from autogpt.llm.token_counter import count_message_tokens, count_string_tokens + +__all__ = [ + "ApiManager", + "create_chat_message", + "generate_context", + "chat_with_ai", + "call_ai_function", + "create_chat_completion", + "get_ada_embedding", + "COSTS", + "count_message_tokens", + "count_string_tokens", +] diff --git a/autogpt/api_manager.py b/autogpt/llm/api_manager.py similarity index 98% rename from autogpt/api_manager.py rename to autogpt/llm/api_manager.py index b16f3af7..9143389e 100644 --- a/autogpt/api_manager.py +++ b/autogpt/llm/api_manager.py @@ -3,8 +3,8 @@ from __future__ import annotations import openai from autogpt.config import Config +from autogpt.llm.modelsinfo import COSTS from autogpt.logs import logger -from autogpt.modelsinfo import COSTS from autogpt.singleton import Singleton diff --git a/autogpt/chat.py b/autogpt/llm/chat.py similarity index 94% rename from autogpt/chat.py rename to autogpt/llm/chat.py index 469ec9bd..9bfd37e0 100644 --- a/autogpt/chat.py +++ b/autogpt/llm/chat.py @@ -3,10 +3,10 @@ from random import shuffle from openai.error import RateLimitError -from autogpt import token_counter -from autogpt.api_manager import ApiManager from autogpt.config import Config -from autogpt.llm_utils import create_chat_completion +from autogpt.llm.api_manager import ApiManager +from autogpt.llm.llm_utils import create_chat_completion +from autogpt.llm.token_counter import count_message_tokens from autogpt.logs import logger from autogpt.types.openai import Message @@ -43,7 +43,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model): next_message_to_add_index = len(full_message_history) - 1 insertion_index = len(current_context) # Count the currently used tokens - current_tokens_used = token_counter.count_message_tokens(current_context, model) + current_tokens_used = count_message_tokens(current_context, model) return ( next_message_to_add_index, current_tokens_used, @@ -114,7 +114,7 @@ def chat_with_ai( prompt, relevant_memory, full_message_history, model ) - current_tokens_used += token_counter.count_message_tokens( + current_tokens_used += count_message_tokens( [create_chat_message("user", user_input)], model ) # Account for user input (appended later) @@ -122,9 +122,7 @@ def chat_with_ai( # print (f"CURRENT TOKENS USED: {current_tokens_used}") message_to_add = full_message_history[next_message_to_add_index] - tokens_to_add = token_counter.count_message_tokens( - [message_to_add], model - ) + tokens_to_add = count_message_tokens([message_to_add], model) if current_tokens_used + tokens_to_add > send_token_limit: break @@ -175,7 +173,7 @@ def chat_with_ai( ) if not plugin_response or plugin_response == "": continue - tokens_to_add = token_counter.count_message_tokens( + tokens_to_add = count_message_tokens( [create_chat_message("system", plugin_response)], model ) if current_tokens_used + tokens_to_add > send_token_limit: diff --git a/autogpt/llm_utils.py b/autogpt/llm/llm_utils.py similarity index 99% rename from autogpt/llm_utils.py rename to autogpt/llm/llm_utils.py index 11f8da3a..c1ba5fa5 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -8,8 +8,8 @@ import openai from colorama import Fore, Style from openai.error import APIError, RateLimitError, Timeout -from autogpt.api_manager import ApiManager from autogpt.config import Config +from autogpt.llm.api_manager import ApiManager from autogpt.logs import logger from autogpt.types.openai import Message diff --git a/autogpt/modelsinfo.py b/autogpt/llm/modelsinfo.py similarity index 100% rename from autogpt/modelsinfo.py rename to autogpt/llm/modelsinfo.py diff --git a/autogpt/token_counter.py b/autogpt/llm/token_counter.py similarity index 100% rename from autogpt/token_counter.py rename to autogpt/llm/token_counter.py diff --git a/autogpt/logs.py b/autogpt/logs.py index fc529f25..c1ac126c 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -1,11 +1,9 @@ """Logging module for Auto-GPT.""" -import json import logging import os import random import re import time -import traceback from logging import LogRecord from colorama import Fore, Style diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 87d83cc5..18b7255c 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -7,7 +7,7 @@ from typing import Any, List import numpy as np import orjson -from autogpt.llm_utils import get_ada_embedding +from autogpt.llm import get_ada_embedding from autogpt.memory.base import MemoryProviderSingleton EMBED_DIM = 1536 diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index 41f9ed13..40694df5 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -4,7 +4,7 @@ import re from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections from autogpt.config import Config -from autogpt.llm_utils import get_ada_embedding +from autogpt.llm import get_ada_embedding from autogpt.memory.base import MemoryProviderSingleton diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py index 3aa78f9e..a0bd162a 100644 --- a/autogpt/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -1,7 +1,7 @@ import pinecone from colorama import Fore, Style -from autogpt.llm_utils import get_ada_embedding +from autogpt.llm import get_ada_embedding from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py index 5618bb37..6b512778 100644 --- a/autogpt/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -10,7 +10,7 @@ from redis.commands.search.field import TextField, VectorField from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.query import Query -from autogpt.llm_utils import get_ada_embedding +from autogpt.llm import get_ada_embedding from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py index fbebbfd7..be29e1b9 100644 --- a/autogpt/memory/weaviate.py +++ b/autogpt/memory/weaviate.py @@ -3,7 +3,7 @@ from weaviate import Client from weaviate.embedded import EmbeddedOptions from weaviate.util import generate_uuid5 -from autogpt.llm_utils import get_ada_embedding +from autogpt.llm import get_ada_embedding from autogpt.memory.base import MemoryProviderSingleton diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 9946951a..372e331d 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -4,9 +4,8 @@ from typing import Dict, Generator, Optional import spacy from selenium.webdriver.remote.webdriver import WebDriver -from autogpt import token_counter from autogpt.config import Config -from autogpt.llm_utils import create_chat_completion +from autogpt.llm import count_message_tokens, create_chat_completion from autogpt.memory import get_memory CFG = Config() @@ -44,7 +43,7 @@ def split_text( ] expected_token_usage = ( - token_usage_of_chunk(messages=message_with_additional_sentence, model=model) + count_message_tokens(messages=message_with_additional_sentence, model=model) + 1 ) if expected_token_usage <= max_length: @@ -56,7 +55,7 @@ def split_text( create_message(" ".join(current_chunk), question) ] expected_token_usage = ( - token_usage_of_chunk(messages=message_this_sentence_only, model=model) + count_message_tokens(messages=message_this_sentence_only, model=model) + 1 ) if expected_token_usage > max_length: @@ -68,10 +67,6 @@ def split_text( yield " ".join(current_chunk) -def token_usage_of_chunk(messages, model): - return token_counter.count_message_tokens(messages, model) - - def summarize_text( url: str, text: str, question: str, driver: Optional[WebDriver] = None ) -> str: @@ -112,7 +107,7 @@ def summarize_text( memory.add(memory_to_add) messages = [create_message(chunk, question)] - tokens_for_chunk = token_counter.count_message_tokens(messages, model) + tokens_for_chunk = count_message_tokens(messages, model) print( f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens" ) diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index cbfcfca9..7a53f603 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -1,8 +1,8 @@ from colorama import Fore -from autogpt.api_manager import ApiManager from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config +from autogpt.llm import ApiManager from autogpt.logs import logger from autogpt.prompts.generator import PromptGenerator from autogpt.setup import prompt_user diff --git a/autogpt/setup.py b/autogpt/setup.py index e3c251ec..ddc7c6bf 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -6,7 +6,7 @@ from colorama import Fore, Style from autogpt import utils from autogpt.config import Config from autogpt.config.ai_config import AIConfig -from autogpt.llm_utils import create_chat_completion +from autogpt.llm import create_chat_completion from autogpt.logs import logger CFG = Config() diff --git a/tests/conftest.py b/tests/conftest.py index e3c590d2..da00058b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,8 +3,8 @@ from pathlib import Path import pytest from pytest_mock import MockerFixture -from autogpt.api_manager import ApiManager from autogpt.config import Config +from autogpt.llm import ApiManager from autogpt.workspace import Workspace pytest_plugins = ["tests.integration.agent_factory"] diff --git a/tests/integration/test_llm_utils.py b/tests/integration/test_llm_utils.py index 7f312bde..74b86016 100644 --- a/tests/integration/test_llm_utils.py +++ b/tests/integration/test_llm_utils.py @@ -3,7 +3,7 @@ import string import pytest from numpy.random import RandomState -from autogpt.llm_utils import get_ada_embedding +from autogpt.llm.llm_utils import get_ada_embedding from tests.utils import requires_api_key diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py index f4085775..2cea871c 100644 --- a/tests/integration/weaviate_memory_tests.py +++ b/tests/integration/weaviate_memory_tests.py @@ -5,7 +5,7 @@ from weaviate import Client from weaviate.util import get_valid_uuid from autogpt.config import Config -from autogpt.llm_utils import get_ada_embedding +from autogpt.llm import get_ada_embedding from autogpt.memory.weaviate import WeaviateMemory diff --git a/tests/test_agent.py b/tests/test_agent.py index 9c67f7e4..e8596731 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -3,10 +3,7 @@ from unittest.mock import MagicMock import pytest from autogpt.agent import Agent -from autogpt.chat import chat_with_ai from autogpt.config import Config -from autogpt.speech import say_text -from autogpt.utils import clean_input @pytest.fixture diff --git a/tests/test_agent_manager.py b/tests/test_agent_manager.py index d1bb3d8b..e7e696ae 100644 --- a/tests/test_agent_manager.py +++ b/tests/test_agent_manager.py @@ -1,7 +1,7 @@ import pytest from autogpt.agent.agent_manager import AgentManager -from autogpt.llm_utils import create_chat_completion +from autogpt.llm import create_chat_completion @pytest.fixture diff --git a/tests/test_api_manager.py b/tests/test_api_manager.py index 71aa6d63..ba64a72f 100644 --- a/tests/test_api_manager.py +++ b/tests/test_api_manager.py @@ -2,7 +2,7 @@ from unittest.mock import MagicMock, patch import pytest -from autogpt.api_manager import COSTS, ApiManager +from autogpt.llm import COSTS, ApiManager api_manager = ApiManager() diff --git a/tests/test_llm_utils.py b/tests/test_llm_utils.py index 83cac0de..27c9ff88 100644 --- a/tests/test_llm_utils.py +++ b/tests/test_llm_utils.py @@ -1,8 +1,8 @@ import pytest from openai.error import APIError, RateLimitError -from autogpt.llm_utils import get_ada_embedding, retry_openai_api -from autogpt.modelsinfo import COSTS +from autogpt.llm import COSTS, get_ada_embedding +from autogpt.llm.llm_utils import retry_openai_api @pytest.fixture(params=[RateLimitError, APIError]) @@ -19,7 +19,7 @@ def mock_create_embedding(mocker): mock_response.usage.prompt_tokens = 5 mock_response.__getitem__.side_effect = lambda key: [{"embedding": [0.1, 0.2, 0.3]}] return mocker.patch( - "autogpt.llm_utils.create_embedding", return_value=mock_response + "autogpt.llm.llm_utils.create_embedding", return_value=mock_response ) diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py index ac25796d..966a0aff 100644 --- a/tests/test_token_counter.py +++ b/tests/test_token_counter.py @@ -1,7 +1,6 @@ import pytest -import tests.context -from autogpt.token_counter import count_message_tokens, count_string_tokens +from autogpt.llm import count_message_tokens, count_string_tokens def test_count_message_tokens(): diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 61ea773c..9cb7c625 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -2,7 +2,7 @@ import time from unittest.mock import patch -from autogpt.chat import create_chat_message, generate_context +from autogpt.llm import create_chat_message, generate_context def test_happy_path_role_content(): diff --git a/tests/unit/test_get_self_feedback.py b/tests/unit/test_get_self_feedback.py index 54641f91..09707270 100644 --- a/tests/unit/test_get_self_feedback.py +++ b/tests/unit/test_get_self_feedback.py @@ -1,6 +1,6 @@ from autogpt.agent.agent import Agent from autogpt.config import AIConfig -from autogpt.llm_utils import create_chat_completion +from autogpt.llm import create_chat_completion def test_get_self_feedback(mocker): From aa3e37ac149cb9789d2331016a30b13bf89fcd75 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Fri, 28 Apr 2023 13:07:49 -0700 Subject: [PATCH 187/222] Fix memory by adding it only when context window full (#3469) * Fix memory by adding it only when context window full * clean json utils --- autogpt/agent/agent.py | 11 +- autogpt/json_utils/utilities.py | 32 +- autogpt/llm/chat.py | 8 + autogpt/memory_management/store_memory.py | 33 ++ ...ve_memory_trimmed_from_context_window.yaml | 335 ++++++++++++++++++ tests/integration/test_memory_management.py | 64 ++++ 6 files changed, 471 insertions(+), 12 deletions(-) create mode 100644 autogpt/memory_management/store_memory.py create mode 100644 tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml create mode 100644 tests/integration/test_memory_management.py diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 9e0f54e3..a049b9d2 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -3,7 +3,7 @@ from colorama import Fore, Style from autogpt.app import execute_command, get_command from autogpt.config import Config from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques -from autogpt.json_utils.utilities import validate_json +from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text @@ -107,7 +107,7 @@ class Agent: # Print Assistant thoughts if assistant_reply_json != {}: - validate_json(assistant_reply_json, "llm_response_format_1") + validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT) # Get command name and arguments try: print_assistant_thoughts( @@ -251,13 +251,6 @@ class Agent: result = plugin.post_command(command_name, result) if self.next_action_count > 0: self.next_action_count -= 1 - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " - f"\nResult: {result} " - f"\nHuman Feedback: {user_input} " - ) - - self.memory.add(memory_to_add) # Check if there's a result from the command append it to the message # history diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index eb9bb687..fa604a42 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -8,6 +8,7 @@ from autogpt.config import Config from autogpt.logs import logger CFG = Config() +LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1" def extract_char_position(error_message: str) -> int: @@ -28,10 +29,10 @@ def extract_char_position(error_message: str) -> int: raise ValueError("Character position not found in the error message.") -def validate_json(json_object: object, schema_name: object) -> object: +def validate_json(json_object: object, schema_name: str) -> dict | None: """ :type schema_name: object - :param schema_name: + :param schema_name: str :type json_object: object """ with open(f"autogpt/json_utils/{schema_name}.json", "r") as f: @@ -48,7 +49,32 @@ def validate_json(json_object: object, schema_name: object) -> object: for error in errors: logger.error(f"Error: {error.message}") - elif CFG.debug_mode: + return None + if CFG.debug_mode: print("The JSON object is valid.") return json_object + + +def validate_json_string(json_string: str, schema_name: str) -> dict | None: + """ + :type schema_name: object + :param schema_name: str + :type json_object: object + """ + + try: + json_loaded = json.loads(json_string) + return validate_json(json_loaded, schema_name) + except: + return None + + +def is_string_valid_json(json_string: str, schema_name: str) -> bool: + """ + :type schema_name: object + :param schema_name: str + :type json_object: object + """ + + return validate_json_string(json_string, schema_name) is not None diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index 9bfd37e0..119468c3 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -8,6 +8,9 @@ from autogpt.llm.api_manager import ApiManager from autogpt.llm.llm_utils import create_chat_completion from autogpt.llm.token_counter import count_message_tokens from autogpt.logs import logger +from autogpt.memory_management.store_memory import ( + save_memory_trimmed_from_context_window, +) from autogpt.types.openai import Message cfg = Config() @@ -124,6 +127,11 @@ def chat_with_ai( tokens_to_add = count_message_tokens([message_to_add], model) if current_tokens_used + tokens_to_add > send_token_limit: + save_memory_trimmed_from_context_window( + full_message_history, + next_message_to_add_index, + permanent_memory, + ) break # Add the most recent message to the start of the current context, diff --git a/autogpt/memory_management/store_memory.py b/autogpt/memory_management/store_memory.py new file mode 100644 index 00000000..e37434ef --- /dev/null +++ b/autogpt/memory_management/store_memory.py @@ -0,0 +1,33 @@ +from autogpt.json_utils.utilities import ( + LLM_DEFAULT_RESPONSE_FORMAT, + is_string_valid_json, +) +from autogpt.logs import logger + + +def format_memory(assistant_reply, next_message_content): + # the next_message_content is a variable to stores either the user_input or the command following the assistant_reply + result = ( + "None" if next_message_content.startswith("Command") else next_message_content + ) + user_input = ( + "None" + if next_message_content.startswith("Human feedback") + else next_message_content + ) + + return f"Assistant Reply: {assistant_reply}\nResult: {result}\nHuman Feedback:{user_input}" + + +def save_memory_trimmed_from_context_window( + full_message_history, next_message_to_add_index, permanent_memory +): + while next_message_to_add_index >= 0: + message_content = full_message_history[next_message_to_add_index]["content"] + if is_string_valid_json(message_content, LLM_DEFAULT_RESPONSE_FORMAT): + next_message = full_message_history[next_message_to_add_index + 1] + memory_to_add = format_memory(message_content, next_message["content"]) + logger.debug(f"Storing the following memory: {memory_to_add}") + permanent_memory.add(memory_to_add) + + next_message_to_add_index -= 1 diff --git a/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml new file mode 100644 index 00000000..1ad25810 --- /dev/null +++ b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml @@ -0,0 +1,335 @@ +interactions: +- request: + body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": Result: + None Human Feedback: Command Result: Important Information. "], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '207' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciIjVZd5xEjlUIHrDT6QAqCiJyqAJqZ3/3jv53dnff + eIEkj0/VrDnGb8z6j3/99dffTVbe8uHvf/76+/Xsh7//x/fZNR3Sv//563/+66+//vrrP36f/9+b + tzq7Xa/Pd/F7/ffl8329TX//85fw30/+70v//PX3XQk9tt8IacmL0+aAZtXYYkmRk3i0zh8b5pFi + rLzv0M2o1HUIhpPLtvVdK+e2udog7hYz05U79abP/jiDvFzb9HMyBc7pcmfBlXkJ0Vv87niwkiU4 + sljB4kI/loM4RSoURpsz1eO6N+lmkEMxVCa7P9Ys7oPZj5AyWQMhDrfQQB6Ers7xVmJeLXv8Izjl + E26iPzMzyWyP98HTVF5ayfH4Kp98xAsYAeWwIWatnrJ5EK8YqgWVCZG1jo/vYRChR2nAgtXpbUx7 + bwyVLb+PZJsJy4zp86VBhbH3yfaYXbJpV8Qq7Be7kezyzs9mGaQczmTDSGAeC29azBYFv241gsW8 + j7m4fkYwlVHFvLVUGO0qnhMIzdbCRbQ5Zf0wfQC87Z7RJc+vXT/OU6Gc7oqFY/DWZUM9TwVnf13h + 9TVJ47Gzb7dNhIWQaRtB7vpYtBOIM6nFoF1xN6V+BYi8n0eiHac3mjr5ESmhZlH83jQu7ze8xqhC + 3pO4Ual6DKQ+gfV+ZeJNuhb5TMkiRNdOCZnxzE5ojjenBgYtw8x5OrHXH/RxAXFWplh5+G45Tout + DmtPNIidLFjZbxG2ACo9xhJ6196YNCsdxN3ryez+2Br8GIcmNEWUMHN3XnZzliQiKMa7x+Mabw2q + nHwTmLdoWEDNlk8qoQmUu3NDRzWLvPnOnRQyCgHx8qPCRznYhZKUSFdiBo7H5937LYEh7q7EFgXT + m7faKYFMck50SXWtm6ISQpBjsyFX97nwhnIvNbB5zoxCkEt8ekmujVbCJf1Tn/N9OUkomSKd7DhE + 3TiMJsjyqVWJk5fE43Oo+cru3chk+47Kcl7blxzMMDkx6+IXfDxgM4d4teVE448mG2+F3YC07Y9k + L6t6KbJ7M26uy6Ihunpp0LRUD5HC9ssPcb19iWb/eirQK2lKLN7mCs3f9UG4EHzmvjSz46K4jSBR + wWFXE2TjI702Jlpp0YdsTweW9fXxISpSb77ILSjKbl5bkIOi2SZ9HR9DPNloryo+7CeiOfWqm9hT + T5fFSajYSdPNbLx91q68iS53Ym4Vo5wUT8GgHE4+2e5EF/F3ieFXfwxb+jL+7Qe6zscHIw/3kQ1a + 76RwiCqBLqJg330eGdTwnE4Ncbj8yfhhq1eg6lbCzFoV4748pBGIWeGySHnpxqxrnYDSlvXMX+/X + JTdHhuFRPijTrkZlcMEsIphk3SQnxIxsbj+yC3f/reOxdgHRxVOo5cSYNPZnv7uNGcLBW8dMy9e5 + N3vSB2B/e56Zvz57HQOpSsGqng7D7Zkan6t/fsLFFVOmfXQcN3n3qSB/7Xd4JV9Pxpp/nlg2NvGL + Hr/rzbREakGzsz2xz4mXTdXVdFHa3XxmJxIqJ8uS1d//R9Q2HPm8302t4vs0Ia4nXflILrMIV25p + 9IMXRTcDk5+yD/FE9CllnO2J9YTrEUKSCS8az7c6keF4yrdUejkaEo+HvAdotZjpneZ006JGGLgh + nyjSu8n7ra9c53ZF8OWy4TQrqxMsD/Ibr/Sw5vNLFUz0eFgW0/TikfXO843hDZnF9LW141O38UN0 + /5xLojXHIm6bVWyhzcZeUnGhr0oWZWsVjv11y/RLGGd8UUQLNG5FGUtdzhG7Hh8jpNNOYf5BexvM + LBf9r17oWE5iyTxRPUCqOQcq+Zt9NkjnsoaG70JiNDtm0FUsJyATZ8lU8ci8T3V72Irz0gKmR6bn + cZfjA6hUkJielK+sv8ahiyS9CEh+0N7eVMeqD/tHX7DcOlUdl+6hAOMMHY4bQfG49nrY6Lt/zJEv + RjdLjzmHcFhhRoRsx+dV0UpIafucuf7a9sa8e1SKSw5buhzJEk2fGw4Ryt8fhi+vB5/PSoGVJfYs + gi39Hs9jK0RwiMAk3lNbIRoMmg6Nzs90dcJZPMessZFjRz1z70SKB+IWVKFZw4iDpb6bhW0v/eqJ + WEXieKJFShmtvZwTo9KQ90n9agEf11fxymQPj3/rE+Y4CIiWu9jg76oE5RwLCYkbeW8MO1oUgIoi + Yno0XYzxLvkNIor8YtqjvXnczy8SqOYmJLt6cYtnXSsFVO0hJffTKzb6d6EcYP7AQJKXyNAc9qiW + o6XrMH0cNETvy0lW1KwdiX6uMm80S9cE+gpstm9In7HjHlmoGxYlM0XP7phZihRexzwjfj9fu0nd + GylsjXKHEd2cDL56r0fYHQSdBfml6egt8dK1OhsaUWsX+DQw+4Q+19Qm20Z681G+LEcYL4ZOPCyW + WfMuVgd4yM2NELcQ+Bi84hk2UXYnQVZEcXPWuyccfFxi/j74nAYt0cFf3jdEy5XR+9hWVSnX/lDQ + 8eZF3exfb09UvZcpPurKAY3zi9eQdiaw7WZsjAElyEWiFHWMUP3RDd0WBKjsRCMJe+4QOxhDtVnc + /CuJFlFofKrm3MBT/dyJ/YESTRv2roGvtR0VNPfR/c4vcqqPwsxyMI2RVb0O7OhMxHscdrwXgtYG + KXEUYhXR0eAD5xUapvMaC9JNRr1qQQHn+hOS3ddPTL/+Vu6oRDfx9CxnJwpvymPnXolDD6+YYqs1 + Qbs/Nbbb4iufPtE8y/aj9DGKP694jEU7Re05Sonz1j9GR66fG+TocyYB5FlW3Qq1hVLSJ+LHes6n + 73psvv2L3aqpNXrFME5wCVcZXeqZWDK6Supff6BFUSWch40AcKlm4+c3u24SCxs2SFkztzz2MYtF + RYf5NL4YEY4xYpf12lbOBDHiPMnbG8JK69E5bEP261fUuZYLxfG2FTPXXVKOWygtxdt4Kl1994t2 + 4rP5+QWm3ry5nEmc1vCpX3sWOKgy5tSmB4Ty14cF9YzLyYq2TzSr6ZFoprov5zVqZAT+5sn8hB4N + plgbADXbF3SeihLxxlUK2Y/YheyEVYNGYZlX0O0Cg93UxojXsN4lkOJTxrbN9oU+/f0oopWQpVhW + toPRf1xbgvb0kJnaORYa8tV4g5W5fTDbX25jQVjmNYgVi+n01Td6S4wUZK7tWWx2Yta35UMC7Fod + hfNqgUYnGSmUiWVTJeVmRpdwrJG7jCjD1dR6f/z+7dmlTD9XyJvWfmJBVy4r5ji+0HH/cpegvG1t + Kn79AttvQ105Facz0e2EG93HjwrYqSjF3SoZMz7nGyp7eXonznITd7PixLViUxrj6WpUHrXyQwKr + KYgYeShl3F/33JW/vECnr/7+9gvEvYfw4ls/LBCuEnLssCdGExjGdPaOKZpCKyeutLwYbMs/GO5k + cvFkyEo23iWzhQoVM1ZWp50xW/khRetjmtMuPPFyymcxR6HZWMyhaBnTtb2/wcJstvTIvQ0a64gJ + sI78AyPLweCrRer2P/9F58vpnU3v/oOBtPTKXE9S+Pxc9jny7PWO8jeWeOuufYAyfYzMaZ6bbGRV + pSLJXK6ofHu2cS82hb0Z2fhkd1nrEL/aUwThbkd/fsUYV2nayqupen77idYNxG0oJIIls2C5enhD + t7uIqH56L+afVzc+mEMIimEVJsmrzS6eh0/Vg0Q3129/WHlcjiJJxuVewMq7M7u16lg2vGE8kUB2 + HGO6C1iFHukK2ebqu+sd3/YhXokx0Q/na8ytyzzLS1yccBd/XlmfRmoF3DousBy0rdHrFjPRx8Uq + +fHaMAzd6Y9+kbR7G2yslfaP3uKiO3lN+zRlEG99RIVaFTMmm0ECWE4qdupuijHkSwrwWqYGTSsU + dnytK4L89StYLvS0m8iD9OiwOR2I+6JzOQme+lQe4bGm8vbloTHdJia869T6Lz1TXKmCquIR3iw3 + cTm1gzQij5EzHheHTTYuxeCEzmETMv27v199bNHjWgyUZ4R2zbc+IbxeF3h6jLNBrb0qwbc/EL/z + N9noznuq+J/bhu7yV5LNi8e9ApX5NjlmmhJPZ//rZxQ9JNbL84zZMGwBDQsJM+9IPuU0yjgBZ7fJ + 8VIWAj4UhlRDnPRP/JEvZTnexXeL8LUxWJaiRzmFEPTy5k5O5NufumkzPyo4x3f7x3OcBuLyCev9 + 2mT+LQ2MSeudBD0HTljQ3Cy+0u6JhNwjPRHiqBUffnyNCkcnPz/cB7U0A7bKGCvLz8WYbdMF0Hvi + Md9nLZ8OXiugL6/h2esqNF24J0H3cHssn0+6sVpVj6eSDOcL0wZUZVTFnxmux7dKjEcjoBHl0whL + 7FhEn3cKmtnNm5H66CZi7s73kkfN+wm//rTLtrY3qflNQubndSPOpta68dcf0WWeKLBEy6ZF6lKY + nuiAl6G1MXrUBCESKleit1jlJY3iwIZWlV367hQ75mZqY7gm3P7jrwTHt/GfftHJ20U2xa99KL86 + sWM757BHk3/oW7SpDzuSFO4aNdfw5qJqcW6JecGtMe/ajwTgDCldCYplTI/rpYb5lB2J70HT9S+w + Z8hvskiMcDx0k+9cXFiidocXEruVX162QEObiKVBdfovXm0PVkTsgxd0Y+egGxQnsWJ29HIy/nj6 + ItyXAyfq64r51Jln8c96BlltxeNL7kZ0ZHsFiy9p4w3Y8egfvkZGsvRaqbjegK7qhO3cHee9n3Qh + LLtwTTCCPBvv/eQqoFQdFW67wpu+5xlwjkq2C2tsTJ15F0Am0p3pU1Hy5hqHtvLjqa+/K8fstrHg + y6+YNzvi8bC9gDS0Sv3l9cqbBJAXYF3HhumLaPSolkUR6FHL6QPfu24owocJK5pcqCiKQ8aVbQ7o + 2p8K4ntOXQ4HdysCf+g7hiW76vicHXo5j3JCiCwEiF+o3cLxEiokcHRsTHhHeziJ9M2cpbUvx9d2 + fUJmmJ4YOfZXg9/lW4ic/X3FjOOnLefj4UDRr7/RH08HXaXCXrqpzJrcY8zLRJWU2/26Z66yKL2f + Psrbs3n/w4PC7Wq1m+2wu9P38qBz3gBbwHqxEOgo0VXc9KcOYBlwkcpfvpix9bSg3B0b8tX3jKMr + TUEZpIrF3/PySS6tBJ/NdGE7muvdKrtcc5Bm0SJWeY+QsEqUBkIhuBOKALL2x4dfvSee/lI82gRn + Wz7Uro8Xl9cDMfUcN2irHpaM6I+2Y8kjseH7e9nXX6O+6k8hfPmESsmCdbOVrlMYut5i+LxxOkFq + UooO96Sjs53EHvd8lqKvPpIjWR09LrbTDHM/vbBcHv2sPR43PdK7NWLagle/PCOEQ237NFtOYsYI + iRcQprGIPyfzwL95kgrokj2wcDgK3tOPG1X+5hEYoiNGfXmIQpjuZ4QfsvrsGPWWLVwvdsIMudtl + nXG2MFzcu4tlsbe4wN9hpXS7OsbfftB1YjuN8M1bmC+5AZpT1ziAcpqEP35zbsTRgtHQdaKZnHcf + KscCNBd5R7/+zBtBUGbEBUyo3D4O5eAhr5apNBf4/uXDyjr5Ljw1lbHwVepcyG4bE4y8NAm+JQYX + O7Ft5T7Y+Gwn6DXvjdOlRtuzdWfax9Yz0Ql5/uM33LNlFo/e0vXRfJpfTBflJepfLIxgp3xkhl+V + b8xDp/WQN8L9myacDK4vXRe+eS8J3q+w+/JKpQjvo8rMeSuUvE+jGibf41S2ogsaut1egK3gh3SM + lm32cg6yBd98FJe7RWN8fnmZ9jzJFK2lwuvly3pGfh1NLHjf824mcVTDj7/T3RahYdfRAl2qhBNr + Cj8ZX+V9BNJ8E5mavGhHl5Nuoud7JMz4vJfGZFmz/uN9XH7eS29+rYoC6tdxoD89WX/zN8DiymdY + PEk/v2jD+1Nhsv/YejxbpJPRKZ8XmOfI+uPf0Fe/yI83RUBYRcdPE7DcA7ucpItao0EolxRu6WCM + Yd0dYNuGb/whHe3o2U1mKPjiSAWpwkhYpWmDog40Yi1sp2PBSpZBJvKdKo+l36F31QGMF00nVn/d + Zf18piYkQu4yS4UxY57/TpEyyNU3v3nw1omSm7xrdJc57rbtJsGzn0jdhgUjsjAgqmpTBNg1OypL + UWeMn2P+zTefNVHPUpFNzkE2YSG/KCG8eZXsl6/tUSjRaRUUf/In5DbyjRlNUHrzOiILFN+GJ/GD + Z8/n5KyFymUNHtvSW8fnfl/0yNjsX0w/KZqx+uotfOrFg/iKUfIxyf0n0pdZ/Du/5eA/ri5aHUSN + qBI9ZnMwOPqPf5gu9b3H/RQk+R4pMdOtrcon7Hg92tyDE14+FCMWa6LVEPQLmX55lU/WbaHDdfX4 + YIgyHc14Mz1hfyvOFBRxi9bffgXaxkp/eXg5XPWHuqlXvcKsi9Xz8ZuvozXRPTxf3GVMm+DuguDW + d7JLl4IxDEN5gO95Yv6t0IyVtwMXOlNHxIoei5KmXnOAb35OzC8/D1/9Rs1gEbrM914sLD4HAe1c + +cJ29WIRV6/t+gBpsPTIL7+mXIIDyB/RpihrWMn8FGRkuUFInKYJstE6mS5qVa+lr5w7ZVv4z5uy + fLM73dSwMr5+I1LGPHD/5MnzBSkjSgb6IW4fq0iYXKUCFh02TJ2egzcH4rqQj7o0EB8UwoVjvl+g + 5lKsma+ME591AUXw/f14JZetJwKbn4oQTw1Rv/OSD1YkG4XpXqSif1A7xvCmAeNyAlqoD8ujjVo3 + KBE385en1Yw3CxNQOKVvXM+GlwnaQ36Cpws73ErLi9f/ztuX13HBwEajn9ohZHQR0IXEFmX/2lgj + WDvDZsHyszF6k8kV2JOc47WTvvisemxGfJ2cyVZwTW/lxFcLnoFYE7X/rL2Z3ZsZoki9kMP9AR73 + dEmGItV7upBWc0k39LmAe2saGLpoyPrLWn9C2kQmCUxH8ppfXlPoG2A//8BcoTyBv7xu6Fw1Rrn+ + 5ZPK2TGJ7V00r9dHVVf0WDeIPh0y9Du/MBwWNds6T8NYBezZoN/8CFt2nM1zMZ4UHG57Yk8Iexy1 + Jxm+/p6ch0/RTbI7NNDvt8dvnmWX/WklLxAuO5M5wtCVr2sgPH/zJubvjKj8w5e/+YHbBz4fOzRI + yLnF/Zffq4xKxTVH9EVs5myKoRujyyuBSNtv6NItvmTx9cP68d5hsd/fs/HLm6i/RTn7+ec1w5sW + vvv5Jz8Qg3anwq8eH6b16ubOlDDKnu6JBbzFfP7p93d+hqvicIv5HDoY9jJqiB7sHe+Pn/7qMwuq + aIFo6hUH2I6txLYmexjjRmx8pGbNSKxg9+ymqrs0YDrN6jdfMfqs9SvoA+QzYzPv48mDpoafP8O3 + pcJ59nApfOclzE+Ej8FB1tIfj2B00r1y7kOjV65zP9FZuVPj689k5ayMKju8Yjubn0K4AGSkD2bf + 2nc558r2pJDDKSDubos4v9qbEH3zTKJ2Ts05XYW18u3/vzwYUVGqZbCuc0Ms9CHxWO4eM9Tb8cDw + 2pHjsXP4DR1P1oquY96h/i6yBl5z5ZPd44YzXp5KCYTWkegCUN1NXz5C7rx9M2vL5njce3cJnWMx + IVZA83ICPtm//PpP/Q4uzUwQ3n1IjFiZ42YLnQVjX6cYeq8s+WQkPZCg7pndshdiYXtZyMFlnqmw + O9+733xMth4njWkrLSlHJ5F6UC4K4I9uFRm/M3dEW34diaEnpiGuUSHBd/5K7JcR8Dk59hEs9/2d + 5f2WlUMuZDc4QJEzA1/1uDNn3//l5Rg1deUNVvwskIMuV6KfR8MTv/O8zd47ELbLuz4bFzftpIi3 + o47F7aqOx3E6POXvvIqEr3PK2+SZj3CyUkoC93nz6LQPRDiVxZ0uLpcLGpf6SoIv/9P5ez7nxflh + wdBRi5k3ARv0PlUp1PYiJ1vnWXp85LWtvEl8xHM0Xbyh2woC2r9ZQZVVGHvz2U1GtFwIhHkdf/DB + 3pIa+gZvMcu9dTYt1TyEn17+8qEJK5K7iZ8R+tPfuKfFMvz0fOvkzODtXKnyenkyv37hU/YfV5WV + qKum7zyDGONGPc1IuSyBGTmq0Ri/Zwn2Uq4SbZneuj96uj4aPp3PY+kxK7mOaDhA/csb4vG5yTHo + fZ3he77vYr5V7hSybNyy++l+99jFfPfIyw3CtHfuev2cFpby9+9WwH/+66+//tfvhkHdXG+v78WA + 4TYN//7vqwL/Tq/pvwVB/DcT/9xEoH1a3P7+578uIfz96Zr6M/zvoalu7/7vf/4SlT/XDf4emiF9 + /b/P//X9a//5r/8DAAD//wMAaBDGhuMgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf024783c8096de-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 28 Apr 2023 14:54:47 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '325' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - f59ce3c6f76fb37599125046aec3d2bb + status: + code: 200 + message: OK +- request: + body: '{"input": ["Important Information"], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '100' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciCjVZdwjI2SoEj51OR0QREDlWAbWzv3sH/zu7u2+e + RCB5lJo15viNWf/xr7/++ruK8+ej//ufv/7+ZF3/9/+YryX3/v73P3/9z3/99ddff/3H7+//9+Sz + jJ9Jkn3T3+O/m9k3eY5///OX8N9X/u9D//z1N7ldGbNR1kfT59Co6Mw2K/aiVIi5YisGtOPUMVc/ + L9re624DJJoVM7PigS5k25cPVXN7MDsSyvbbeMOAFt0tZSRIHC4+lqELC7w4EyLkQtT7lXUGOVv4 + xP6aWj52jb5AVWAsmZY3H0Sr5pvJ3tn4sEc2Bfm0J7cMHo67IOq27vK+rnMT1MzcMUfRtvqg6h8N + neTdjm3dzTmezPutAtE+a8zpTl0+tXmwQGccXom7e9hxzz/GhBJtFxP7GR/ygdlYle8eUunickmi + US+7DkY/14n9El2PVpWkwUtRLlR+oi8atsc7hr47fYjrGEU+7dKwBOXerPD4dMN4bJMaQ4iMB9mb + m1gf40wI0CpbypSHFxSxaJpMFGYhJkTCu0jAbizA/dgTsn1POGdT4z7lNQ1Vtg9jFQ1ws314R+iM + uTUE8/pkmpwddJPpWvlup6phKZhH+0FcY0v4dE2eMvpe7IxoFwYxvSZnGazE3ODNEK50Zp8WGjpM + tkp2+15DvEpQAXW9jZkG6UYf3ru6QivPD/ellBr5NGy3AuyJR+jXMuS2t95+Awd4Jsw5Xl95Hwbd + E+KbOhDte3t443FRl0jp+RsPxKxaOhYhheubMyqHkhEJweK2kXUVUmYa/a4dJyTL0ETai5DqHSD+ + 3ZtX2DzXIj6v73c+JVluAnMblxk1nWJaDFYJJ1LdmOYPe1R56duACR0CYl2FQqeNaptw2eYRMQPm + eP3tET6gv9CG+dpRi1dppT7A/cCFHCIvb/ssUUxwk8/AtmdZ4s1d+GwA1+Gb7EdRjrg8ltqmzuQD + wevzHo2raAuKAlHI3FAq4u69rBr0FT4dHnl94PxjdqUUjvsvs7SQ5DxwVxu0DvYbgje3jLPLUung + IUU+cfinQM3xbT/lOKlbtr8GKh+Cbz7B5p0hsideFY+Lz2GA18dzsNBFEh9Ph/1Rvt+vI9ubG+SN + py96wuNSMaa3LEV0HW4GWJ7vBXFegeDxvD4WcAl0g+gtU9E0NFEF922ZM+sqGLpw3SkplKtsR0hr + brzWcqJwabx9YNawafVRJ4tSlkf0wTA9gmgKe/cI3mPPiLVqRL1TFqSAOy0NujwsIZqu1DeQpNgx + c8rUb4fruaGwWBFGl1k3oMnU8xT1eBMTs8F2PqRPHKCPVn6Y+0jktutWzhXOn+LK7Msj493KIRNc + hQdnvtMeeZ/trk+IPm+baFKwzId8lYToGB835IyzT9SwvU7RVf4qxDT6Tz6tjyyA62qpsX3XeS23 + nChQxhe2cO+Xb70+HXZH8PNWwZNK3Zhd3UxFn1oumJExveW7zgvRtlXfzPhKYTRsW/sMZ+E90pW1 + 0bjItxlF5095pfLJtHU+7y/42IwRw1/e42F5OjcgnO4ZbSrrHo/nnf6EQBGfeFiIIR9MbROgJk1q + slOrJh8DfnyApDtXKms+4z0JvALlW+dIPC++tUMtpCkI09Wg/C2/9eElr46gYW+PJ2uRtVO0LFXI + /exKM3v34GyuX1gsjQLTy0WJu82zCmCR7XSCfdXzhn5bYPCmbUpX59eej6iRRDAcGfDiIUhtt0mf + A0Juz9m2GpOIBmWVwZCWDtlnuzQfF/ryjnAgIaY9NIcPr+pWwS29Ueb6XEYsV90BZVp+Yq7mE0S1 + 10gRvS8LZjfJSp8KczVA0Dp3grvnLZ5ulWfDnRYG0ZWTnHfiDQAVOpTMO6EmZ4m6TNGvHsz1XUYs + pdCgyNVXmN3Duh2wRAtA7aaiIB0/Oj0HYMPwuK+IJpgD4mW9esDyYZjsmLG8HdtLGkKgr2MMpn5t + xzJ8GMgf6hfzkjDxBmEfYpg/01zBaV7fuF0isxovTE9I4E3L1klhlaUhUW1pp493Kroo4EqNX30U + o+5rtnd4UKDEw+2kj62JTEi8RUB52fke1z/1A71Xpkds32/y7iahAlVFGLJ9/Yh03l6jFH56uniS + UJ/S1beCp4QxFazs6fGuWWOE5PsNL16NO/+eKgD11j/xNymZ3n3y8AHlVz0yw8qe+mSVcIU4aVvi + qcY3pqbeZnK08X22FyWid3EvdZB98I3tGrTJ668f2UjvF1ti9QHmfBHVRyBJoeH+cPX1FV4+j+iz + uZ7I+fY+elz/vgB9CqIxXVvsou7LFAzj6RRieLnHaBTYqP72G/Of1qsdy9r1kYmeMZ7ls6XW8drB + oR5vVHqv4pYdO8mWvH14ZqYXdmioE/kK1/fI6DrnUzusrFsg2zFdz/p11YfTQxvkJn3VFJ1Q0/LI + SQPIHleXaIj6fJA/moEGJ3DwtOdZPqDX1lXCJbsQkrNPTvfkkKIa0RcxxaDTh8RzOoDUd9lW6Ew0 + ruqNAZ0aJMSmuzHm46BfYdtlAzF0/uX9dHyVUrnFBc7rA8/H5CzfwXkqI/Eno8trIiVXpFaEEGsn + t5wNn4uNziE6EsLTZVzb7bBBK0k8UGm01jnjhZNBv9RqvGg76jVHqTeRVxRnZl6OQzRY33UDy1Z/ + 4Y15cVv6tc4VqjhpmWc/cT6cl3EFxeqr4+ztb9rxfbgfkbdmGtk367alk5U3EIXXB934vpsPheqU + yHiahBjudM65p8gYVT6EFJDURZ3TNQCU2Wu2N8ftXE/CHbbSUsZrU0lbdqpoJ5dVllPhK4Vxj89+ + hrZvp6PA1jbns/+BJlW3xAlqJxqWt7cJOC+WeOEqojct1t4d1Bt7/tH7qVdTgM2VdlSe/dv49WMb + 9B62dBLhzbueqU9Y7y2PkfX23Q58cH2lDByF6D0U+a/e0T3fq4wU0rGd0sTTYMdEmTn9K47YLj6b + 8mN/PJD7YyeiuloVsLkl0ZauBTvPh1d1qFB2phVT0UuN+HePr+hCM4E5/GPwLhGiFOonVYjxsr7R + oLdvX7m+jxad3v4mn/tHAGa3aDCkeyeiZbGnyLmKXzzaEfM+psI7cEwnZ9aGJPm0XVwCOPjjmu0l + M+TjSjsOKH6LSwxJ9eZjZd59cBfTB1eWlHjTJU7vSno6XJiviwKf0lxfoHOzj+b9zHReX+QKXbbv + CI+hjdEgHx8bqEVVISTJNI9P6qaBfXgAph/2fitW1agpg15+mUPliI+R0RRyAsaT2fmzjH96qFhv + JtBi7dF41LpPhdh74RKzrd9e9+snd7btiJZ9R31Q77ErF6410PuVFjE/ZZ8JnWRrR1SqFdEwaHsT + tEezoAthLFu+aX0fihRsYteRGfVDkcsg5HKEkS1O0RTTboH4JzwSp7++o2nYOiJszG2Fryx1+MwD + qryVFJnsrsc9GhVNlSH+pm8Ky3fV8gPFE5rvM9WvUNRl24sP6pT3mFlqn3f4vQXw72uKFztbjBls + iAiyaG6ouOwrbzpmIvzhGesyMn3qzHqD1q+BMM31ypiPplSg5pAGLBRhy0U7MGf9ubt4LfcuYuHh + 0iHZ3wlMpc4zoq8gDSAdhoY46/1bH/PmIsA9JypdJvXD49HDA7RNO4Ml9qLg43HxLiEZhy27D1M7 + r8cdg9FvK6I7ytHjr10mgOxVPjvoktUO8/5Avief6JqtRH26GlMA8/rhNM94PK4t20TblBrMmrQb + mkZkC+j9JJyWSFvrU4JIB7MeES0jfTyM4RT+4QkhfrUehRt6gG29C+JH9oD6IqvucBztGp/UoORs + We5UkGvrzFSFL3R68BjIR1H08MB97nWF6abyn/tXb+v14qevULuWRDqeZQl1zDZVGFnL2N5CjHcy + 2vigLaIY97pTIOavxxDiznkQLwkVr4eUPtDiMnyIPrYyZ61Fz2iuJ7wUvKBdCUxSYeYb4kSlEFXF + lWK0l+yImZ/8ko/CzZWlLXljTNd0bMf29aKotY4q274n2vawfpSA3oXHduYKe9PJuN3hoqgfsosP + G0TjiyhCPa4bog+OHXNlCO7KOJ0FukFu3fJFtwcYz+s3HqtGj1Z3KtoAKXbxcB+qeHgv0wp++rmQ + F67O0/NVhZkPiHU/i+10U5mIlkqXE3xwJU4TZHUw+x+m7z9DNLyvuyNcveUCS+v92xu87FLCay3u + mf1WJX2qfWeBYtyF5CUPa94eywH/eItyrXznvEfFXdHFISBmLxb8j16t72bPvMTTvb4BPEFcqiEj + lbPXB93Yb2C9DV7E21dKTNXYvMJrv3KIZ3V5PChYEJGTezs8xHanD4/vZoK5n7AQ0Y53wbedUPHd + hcSr4IGmMepT6TB9KmamNxsNK7HT0MwvONtuJH36UmmBzhfjyw6xgtt0M+oLcKWdPvNTihhlWQaf + w3uBefqq2mFd1BiwHy7xr3/R8JG4SPUZ0HhfJdEYxUGn5N5rZFqSaFFHj2QDi7QUCDa91pv5vkCx + my3p9A01T5z7pVRZwYAX21vqMVs5nEEZHiVxQcB8rK+KLOtrssECWbUx3ZHPFcrjKZj7o5rz/LyR + YfX19sStVDUW0iygytspNsw4NF48rWwjBbeMl1h4ZKq+6hoPUBU/dBYFKxuJx0xcQHnXJuIWV8Ob + uuSq/eGR4RrG3rRb2xTabfUkqu4t9dqd7p1sF28Ty/SocPZGlzvk2dn58VPbVOjYId0LFGbNfqqL + +5EqdkVvVJQ2VTw92V4ABy0jLLyPnjfRfBSVU7m4MYtGeTsZ3fEB5LR/sO1Hs7xBSwFAq6uUPuZ+ + xW3ldoSzkI905X0lnSc0miDvqhLLkbqIG6HgBXwfokGs+ObF44UmIdD87M1+q+SjVOIMITf0qGiS + qz6GkhiC8bkS4pNUaKnfrw2Y9ZfM68+76HJ5wHUnCmy3aTNv+JrtFe2dV0/FXiwQcx8XAYXIfBCr + eupoBKwN6JneVeIYdInYKlphmPsTc78n5lGWbDY/nmZG0mHU5Y+hgdm/MVP5PvP+Xq18+WC7wIyq + 0FvuHdMrcD7tmFWZ6o8PMjk3RZE5XKnayTc+AWgb+8Cs+NZGzVuNFz++xdJoXdpJ1d4DDHrxJVsv + uHtzPWRw7/OECvg+ofGzMky4VOuQWR5r8zrIvgGUvfdk7uxfum1rH2HWbzyd9w4SnuY6AB2kO2nZ + vfFYQqMBdpva+cOn/CtnFeTZ0cErmMAbGssPkb32XdwImo6GsxRSCBThySKzR3ldq7dA5hIizFPq + MZ5A359hcZk+7Od/uvNheYePVnyocH+I8fy+RaRt3MPMRx+PTY32BMsIG+I3Wq1zVtcF/NZHlvyp + ZU8vn2BnZwhPy7PjibW/BVQ+/R3ZSuKprV/UuUJ9NzWirraR1/OPP6BfHrXzvpLHn5KqobBEzpy3 + xREdkS2i84YZdPxolj7u9VRUbk2YU7ToSm9wlav9y6+Yx+6NzkC8HaHSrYR23+Sds09ypzAs9JA2 + J7PSR8tONXhmpzWun+YuHtTRCeDZ4Y4RxLt8+L3v1fO8+NWP3rBkIwNnxci0tR7zYWdLGFL/ZTPC + Rkfng2gclc+JHX5+PG+QmGVKW3B31vMgHl/JA8OSX01GQInbMamfBvQC9an02R29YTHtO/SrD4YD + 0xuwVJZAklLDq8Vmlw8ftDTATVwbr+tqatvK2j/hdnieyH4NOeLSavNQ5u9P22ql8dk/iIrImi2Z + 81jeXy7MhmMt1n/8MLfu9hMu99ObSqHx5vyzWp7Rn3obxE00ys7rgcwOGmZMqRJToeAlzPxOl0Zs + xqu9Xglw4+aEJzk6RjRPWhnoUbgztZxaTuNd7KJfPnj5xpjzilaDfJXdEx6er2fLr0G2gPxVG8x4 + 1HedG68qQ3O+wfZe7utsznfQzL/M3Qq+PnT70gRnJTjMCY0tGossvUK/VGumjo3aTrOeIxdXlz96 + 9nu/cmryEiN2d/X1S1AKFAuZzozVsEIcRfYTwtfXIW7mqfHgpbUJb7Q/EUtR8pifvXUDki3IVNlS + P5aukpPCz496hXtAvCmSK/z0wi0Giw9OlwEcNgeXWUlJ9EkdIQXxEsWM7D4rb/b7GHnKtSCE3I9o + 2rziTPrlu1a4nPO7c9P9yRdMk2w8pmhgAz5/E2aYapvToEwzFBurhM37PRrO1SeVnsampxs1a9Dg + KoEL+en8xYvPq8mHOpmuyp4/RzbnLy3DxtEA7AdLpr+EdcS7ZukjZJQ1Md/COxqtcRTQuiqedPFe + 02hkt3UlN3jlUUrBbMtHVahgN7giZv/cesLsH1G7fmKmwqHNR9tICtSk2nb+fnL8vXxvBhikfLKZ + j1H3W785r6Sw6ireA3YH6ee3z8KKt1xcY/PHl2wH00MfJP3bIWNJO2Y+o6XXB8aWQi4qBtlTq/DG + OLRMAH4U8UCdRdxvnccEiQcBe34sk08yIRg+rDTpMn5co6EfREAG36h0uHpvj4XVToT5M9tZm+yX + dzUgJa8VnlhXx9ORdzbMfoGYsPP1OX8Mf3nIH32lP17xtlKIr29V8liirlPwQc3J67B8xMNhuqpw + uj4rPG5iRR82a0NGO219w4PSFfog3oQFyk1BJPsvITHHJyeA2Z/RrlgK8bRITwHsk1bFt9CL+QrS + 8oEKp7Lx5bX95tw7VncpfH0ctrWc1BPn+lB+ftDbvLR8/eONHx9aO9lDnX4X1D952dZfpPr4zkUN + YJ/vmKHHhjfUQpWhJcMTMXgeeoMA3QIam27xuhC/7Zw/qvBw7AUzlp9VO/7y9tl/4XSJxfaXl8BH + 3JjsNb/vds+pgHJkX8iuoCuPXpPnBhG09Sma+0198z4hfC9uRox4jdH67fsNgKRdiSO6fjwx8ZT9 + /B1u2d31KH4rGH58uqVrE/HooS8A27uObDdHXec381JCsUQpXTyEW56el3ED54v5ZWQZV3wYfemI + JlX4ULR/5zFr2baBaeWaDHdPKR77/XGD6p3MmNdOez7MeTpKcrbB1zoqo498NxpIxYdPwuW59qbf + PISJnx1RM1VFYvn0B/C2KGT6ejTi8WHu8M8fUtgcgmhM6rMB67VBmOebDRqQHWvQd5cPXlpq3xY7 + LNuArcbA35Nv8U7ajhjCkXz3Y9XkcbNIkwAitvqQbfxxOd3Y/V2+YR5gYdaDPD1f5/V878g+uZN8 + Yi92R7Mek33Xta3wke0CurcsU6nqh5gvU9+UdHTz8E1gEZ8MZVsBeZkHvMRBqU/T8zAphb4oiZdf + zXy6uNxHRSgtmeVPaVvN8zY49bt8zm/vfDq69+PPz/+ZH7GZn8EZK5GOikm9eptqG8iMbUGXPH3F + /aotS1iQ24LC4sn59FpyDVxjzBieWuBjJ0su2Jpcs5+/44cuCJSZj8g8r2rpiFRRnvMndpFPX53R + 5+oM83yGuYyd417r+gZNqvghhlB+9cnv1AIZq/Wa4PboRvzS3zVY7ssTs1bNWW8gmjKQR+lDLNti + +Z/5BGflyHb8tPyT78MCw5kO3ZLk5W0xBsrn1B+IaomDN5DHOoWX7XyxpIPQsrm/ovG8etPJOJd5 + 1e2pCdMAnC610yMf7vn+AY9H1ODpkG3mPHIwlHVcwG//6axf5U9l5kc8Zd/Df+UJzlX4Mlcm32h8 + UeeOnoS4mC+GyBO3LSpg3LwWtFfGnI9NnrkQ8HRL/PvViadyV9lgriObbKPcigRfl1UQ7aNGdqja + 6/1vXvCM7s953lujztSGUDGu154d9knC26/w7v7kkzMPRTMPCiAZ9Zfs/fLtDfHFK9C1rQqizvwy + 0qvjgqYd3wTjTxiPhz7O0C2NKW1+vCMTywc/rxX2mz90Uqtryno955XDouEUDXUJ5JVLlItnDw3u + CUQUOcWe4Xl/T2GvnZHqh3/y1qjs1QqUMvAUKvcu8djv+87+gWCHW9GUrlgjK8H5Suf5V8yPu5MI + VmJssKQ4yKPe+3FEqtirDL9CEwnrPTLR1rkbVFk9xXyQdEZRHolXzKWFrQvCPvShacX+N++Ixpf/ + 1UCBQ8hILi7z7rlTB3mJtwqz1jvajl10EgAHz4j8eORPnrclOaZLt6p5mSf5RgaiH3Gd0G0+zjyA + +mZ1xlKEh3zgjROCWfEL2w9HC63G0axgKFlEIcidfHTIVoS/f6cC/vNff/31v34nDMoqeX7mgwH9 + c+z//d9HBf59T+7/FgTx30z8cxKBdvf0+fc//3UI4e+6rcq6/999VTy/3d///CX+OW3wd1/198// + c/lf8//6z3/9HwAAAP//AwBim/ij4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf0247aaeea96de-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 28 Apr 2023 14:54:47 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '63' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 974557ca996688f2b7e568f6d601ad88 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/test_memory_management.py b/tests/integration/test_memory_management.py new file mode 100644 index 00000000..c9ab9fc9 --- /dev/null +++ b/tests/integration/test_memory_management.py @@ -0,0 +1,64 @@ +import json + +import pytest + +from autogpt.config import Config +from autogpt.memory import get_memory +from autogpt.memory_management.store_memory import ( + save_memory_trimmed_from_context_window, +) +from tests.utils import requires_api_key + + +@pytest.fixture +def message_history_fixture(): + assistant_reply = { + "thoughts": { + "text": "thoughts", + "reasoning": "reasoning", + "plan": "plan", + "criticism": "criticism", + "speak": "speak", + }, + "command": {"name": "google", "args": {"query": "google_query"}}, + } + return [ + {"content": json.dumps(assistant_reply, indent=4)}, + {"content": "Command Result: Important Information."}, + ] + + +@pytest.fixture +def expected_permanent_memory() -> str: + return """Assistant Reply: { + "thoughts": { + "text": "thoughts", + "reasoning": "reasoning", + "plan": "plan", + "criticism": "criticism", + "speak": "speak" + }, + "command": { + "name": "google", + "args": { + "query": "google_query" + } + } +} +Result: None +Human Feedback:Command Result: Important Information.""" + + +@requires_api_key("OPENAI_API_KEY") +@pytest.mark.vcr +def test_save_memory_trimmed_from_context_window( + message_history_fixture, expected_permanent_memory, config: Config +): + next_message_to_add_index = len(message_history_fixture) - 1 + memory = get_memory(config, init=True) + save_memory_trimmed_from_context_window( + message_history_fixture, next_message_to_add_index, memory + ) + + memory_found = memory.get_relevant("Important Information", 5) + assert memory_found[0] == expected_permanent_memory From 92009ceb32ecfe10c284f2ea7c6a96f95bb14c0a Mon Sep 17 00:00:00 2001 From: James Collins Date: Fri, 28 Apr 2023 14:12:47 -0700 Subject: [PATCH 188/222] More graceful browsing error handling (#3494) --- autogpt/commands/web_selenium.py | 10 +++++++++- tests/unit/test_web_selenium.py | 11 +++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 tests/unit/test_web_selenium.py diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 739520ec..79a8ffd9 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -7,6 +7,7 @@ from sys import platform from bs4 import BeautifulSoup from selenium import webdriver +from selenium.common.exceptions import WebDriverException from selenium.webdriver.chrome.options import Options as ChromeOptions from selenium.webdriver.common.by import By from selenium.webdriver.firefox.options import Options as FirefoxOptions @@ -43,7 +44,14 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]: Returns: Tuple[str, WebDriver]: The answer and links to the user and the webdriver """ - driver, text = scrape_text_with_selenium(url) + try: + driver, text = scrape_text_with_selenium(url) + except WebDriverException as e: + # These errors are often quite long and include lots of context. + # Just grab the first line. + msg = e.msg.split("\n")[0] + return f"Error: {msg}", None + add_header(driver) summary_text = summary.summarize_text(url, text, question, driver) links = scrape_links_with_selenium(driver, url) diff --git a/tests/unit/test_web_selenium.py b/tests/unit/test_web_selenium.py new file mode 100644 index 00000000..bf13a8d9 --- /dev/null +++ b/tests/unit/test_web_selenium.py @@ -0,0 +1,11 @@ +from autogpt.commands.web_selenium import browse_website + + +def test_browse_website(): + url = "https://barrel-roll.com" + question = "How to execute a barrel roll" + + response, _ = browse_website(url, question) + assert "Error" in response + # Sanity check that the response is not too long + assert len(response) < 200 From cfc7817869f1e0e62ac98a903b412c42a987840a Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 28 Apr 2023 16:25:41 -0500 Subject: [PATCH 189/222] update pyproject (#2757) * update pyproject * python bump --------- Co-authored-by: Richard Beales Co-authored-by: Reinier van der Leer Co-authored-by: Nicholas Tindle Co-authored-by: James Collins --- pyproject.toml | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fdb43d66..64926046 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,25 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + [project] -name = "auto-gpt" -version = "0.1.0" -description = "A GPT based ai agent" +name = "agpt" +version = "0.2.2" +authors = [ + { name="Torantulino", email="support@agpt.co" }, +] readme = "README.md" +requires-python = ">=3.10" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +description = "An open-source attempt to make GPT-4 autonomous" + +[project.urls] +"Homepage" = "https://github.com/Significant-Gravitas/Auto-GPT" +"Bug Tracker" = "https://github.com/Significant-Gravitas/Auto-GPT" [tool.black] line-length = 88 From c7d75643d396091ef353fbd9117fcf566869f5c4 Mon Sep 17 00:00:00 2001 From: Deso Date: Fri, 28 Apr 2023 17:39:52 -0400 Subject: [PATCH 190/222] =?UTF-8?q?Architecture-agnostic=20dev-container?= =?UTF-8?q?=20patch,=20now=20with=20Redis=20=F0=9F=98=8D=20(#3102)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Reinier van der Leer Co-authored-by: Nicholas Tindle Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> --- .devcontainer/Dockerfile | 31 +++++++++---------------------- .devcontainer/devcontainer.json | 12 ++++++------ .devcontainer/docker-compose.yml | 19 +++++++++++++++++++ 3 files changed, 34 insertions(+), 28 deletions(-) create mode 100644 .devcontainer/docker-compose.yml diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 87ca39d2..f31cdeba 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,26 +1,13 @@ -# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3-bullseye, 3.10-bullseye, 3-buster, 3.10-buster -ARG VARIANT=3-bullseye +# Use an official Python base image from the Docker Hub FROM python:3.10 -RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - # Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131 - && apt-get purge -y imagemagick imagemagick-6-common +# Install browsers +RUN apt-get update && apt-get install -y \ + chromium-driver firefox-esr \ + ca-certificates -# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897 -# They are installed by the base image (python) which does not have the patch. -RUN python3 -m pip install --upgrade setuptools +# Install utilities +RUN apt-get install -y curl jq wget git -# Install Chromium for web browsing -RUN apt-get install -y chromium-driver - -# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. -# COPY requirements.txt /tmp/pip-tmp/ -# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ -# && rm -rf /tmp/pip-tmp - -# [Optional] Uncomment this section to install additional OS packages. -# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ -# && apt-get -y install --no-install-recommends - -# [Optional] Uncomment this line to install global node packages. -# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 +# Declare working directory +WORKDIR /workspace/Auto-GPT diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index f26810fb..5d50e280 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,14 +1,14 @@ { - "build": { - "dockerfile": "./Dockerfile", - "context": "." - }, + "dockerComposeFile": "./docker-compose.yml", + "service": "auto-gpt", + "workspaceFolder": "/workspace/Auto-GPT", + "shutdownAction": "stopCompose", "features": { "ghcr.io/devcontainers/features/common-utils:2": { "installZsh": "true", "username": "vscode", - "userUid": "1000", - "userGid": "1000", + "userUid": "6942", + "userGid": "6942", "upgradePackages": "true" }, "ghcr.io/devcontainers/features/desktop-lite:1": {}, diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 00000000..90d8c116 --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,19 @@ +# To boot the app run the following: +# docker-compose run auto-gpt +version: '3.9' + +services: + auto-gpt: + depends_on: + - redis + build: + dockerfile: .devcontainer/Dockerfile + context: ../ + tty: true + environment: + MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} + REDIS_HOST: ${REDIS_HOST:-redis} + volumes: + - ../:/workspace/Auto-GPT + redis: + image: 'redis/redis-stack-server:latest' From b8478a96aea94f9f9f34cf966a0f7ef27c6d04c4 Mon Sep 17 00:00:00 2001 From: James Collins Date: Fri, 28 Apr 2023 15:04:31 -0700 Subject: [PATCH 191/222] Feature/llm data structs (#3486) * Organize all the llm stuff into a subpackage * Add structs for interacting with llms --- autogpt/agent/agent_manager.py | 3 +- autogpt/llm/__init__.py | 16 ++++++++ autogpt/llm/base.py | 65 +++++++++++++++++++++++++++++++ autogpt/llm/chat.py | 2 +- autogpt/llm/llm_utils.py | 2 +- autogpt/llm/providers/__init__.py | 0 autogpt/llm/providers/openai.py | 37 ++++++++++++++++++ autogpt/llm/token_counter.py | 2 +- autogpt/types/openai.py | 9 ----- 9 files changed, 122 insertions(+), 14 deletions(-) create mode 100644 autogpt/llm/base.py create mode 100644 autogpt/llm/providers/__init__.py create mode 100644 autogpt/llm/providers/openai.py delete mode 100644 autogpt/types/openai.py diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 1283fdae..17fb35d8 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -4,9 +4,8 @@ from __future__ import annotations from typing import List from autogpt.config.config import Config -from autogpt.llm import create_chat_completion +from autogpt.llm import Message, create_chat_completion from autogpt.singleton import Singleton -from autogpt.types.openai import Message class AgentManager(metaclass=Singleton): diff --git a/autogpt/llm/__init__.py b/autogpt/llm/__init__.py index 3a958285..2a6f0b8f 100644 --- a/autogpt/llm/__init__.py +++ b/autogpt/llm/__init__.py @@ -1,4 +1,13 @@ from autogpt.llm.api_manager import ApiManager +from autogpt.llm.base import ( + ChatModelInfo, + ChatModelResponse, + EmbeddingModelInfo, + EmbeddingModelResponse, + LLMResponse, + Message, + ModelInfo, +) from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context from autogpt.llm.llm_utils import ( call_ai_function, @@ -10,6 +19,13 @@ from autogpt.llm.token_counter import count_message_tokens, count_string_tokens __all__ = [ "ApiManager", + "Message", + "ModelInfo", + "ChatModelInfo", + "EmbeddingModelInfo", + "LLMResponse", + "ChatModelResponse", + "EmbeddingModelResponse", "create_chat_message", "generate_context", "chat_with_ai", diff --git a/autogpt/llm/base.py b/autogpt/llm/base.py new file mode 100644 index 00000000..722e0f0f --- /dev/null +++ b/autogpt/llm/base.py @@ -0,0 +1,65 @@ +from dataclasses import dataclass, field +from typing import List, TypedDict + + +class Message(TypedDict): + """OpenAI Message object containing a role and the message content""" + + role: str + content: str + + +@dataclass +class ModelInfo: + """Struct for model information. + + Would be lovely to eventually get this directly from APIs, but needs to be scraped from + websites for now. + + """ + + name: str + prompt_token_cost: float + completion_token_cost: float + max_tokens: int + + +@dataclass +class ChatModelInfo(ModelInfo): + """Struct for chat model information.""" + + pass + + +@dataclass +class EmbeddingModelInfo(ModelInfo): + """Struct for embedding model information.""" + + embedding_dimensions: int + + +@dataclass +class LLMResponse: + """Standard response struct for a response from an LLM model.""" + + model_info: ModelInfo + prompt_tokens_used: int = 0 + completion_tokens_used: int = 0 + + +@dataclass +class EmbeddingModelResponse(LLMResponse): + """Standard response struct for a response from an embedding model.""" + + embedding: List[float] = field(default_factory=list) + + def __post_init__(self): + if self.completion_tokens_used: + raise ValueError("Embeddings should not have completion tokens used.") + + +@dataclass +class ChatModelResponse(LLMResponse): + """Standard response struct for a response from an LLM model.""" + + content: str = None diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index 119468c3..e0f0226d 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -5,13 +5,13 @@ from openai.error import RateLimitError from autogpt.config import Config from autogpt.llm.api_manager import ApiManager +from autogpt.llm.base import Message from autogpt.llm.llm_utils import create_chat_completion from autogpt.llm.token_counter import count_message_tokens from autogpt.logs import logger from autogpt.memory_management.store_memory import ( save_memory_trimmed_from_context_window, ) -from autogpt.types.openai import Message cfg = Config() diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py index c1ba5fa5..9a2400c7 100644 --- a/autogpt/llm/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -10,8 +10,8 @@ from openai.error import APIError, RateLimitError, Timeout from autogpt.config import Config from autogpt.llm.api_manager import ApiManager +from autogpt.llm.base import Message from autogpt.logs import logger -from autogpt.types.openai import Message def retry_openai_api( diff --git a/autogpt/llm/providers/__init__.py b/autogpt/llm/providers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/llm/providers/openai.py b/autogpt/llm/providers/openai.py new file mode 100644 index 00000000..188d5cf7 --- /dev/null +++ b/autogpt/llm/providers/openai.py @@ -0,0 +1,37 @@ +from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo + +OPEN_AI_CHAT_MODELS = { + "gpt-3.5-turbo": ChatModelInfo( + name="gpt-3.5-turbo", + prompt_token_cost=0.002, + completion_token_cost=0.002, + max_tokens=4096, + ), + "gpt-4": ChatModelInfo( + name="gpt-4", + prompt_token_cost=0.03, + completion_token_cost=0.06, + max_tokens=8192, + ), + "gpt-4-32k": ChatModelInfo( + name="gpt-4-32k", + prompt_token_cost=0.06, + completion_token_cost=0.12, + max_tokens=32768, + ), +} + +OPEN_AI_EMBEDDING_MODELS = { + "text-embedding-ada-002": EmbeddingModelInfo( + name="text-embedding-ada-002", + prompt_token_cost=0.0004, + completion_token_cost=0.0, + max_tokens=8191, + embedding_dimensions=1536, + ), +} + +OPEN_AI_MODELS = { + **OPEN_AI_CHAT_MODELS, + **OPEN_AI_EMBEDDING_MODELS, +} diff --git a/autogpt/llm/token_counter.py b/autogpt/llm/token_counter.py index 2d50547b..5e13920e 100644 --- a/autogpt/llm/token_counter.py +++ b/autogpt/llm/token_counter.py @@ -5,8 +5,8 @@ from typing import List import tiktoken +from autogpt.llm.base import Message from autogpt.logs import logger -from autogpt.types.openai import Message def count_message_tokens( diff --git a/autogpt/types/openai.py b/autogpt/types/openai.py deleted file mode 100644 index 2af85785..00000000 --- a/autogpt/types/openai.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Type helpers for working with the OpenAI library""" -from typing import TypedDict - - -class Message(TypedDict): - """OpenAI Message object containing a role and the message content""" - - role: str - content: str From 9c6494aca7d7f000d5a4ba597ce927c43e3b94d1 Mon Sep 17 00:00:00 2001 From: Steven Baumann Date: Sat, 29 Apr 2023 08:57:48 -0400 Subject: [PATCH 192/222] Fix `clone_repository` to conform to URL validation (#3150) Co-authored-by: Reinier van der Leer --- autogpt/commands/git_operations.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index fb74374c..22233108 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -11,25 +11,25 @@ CFG = Config() @command( "clone_repository", "Clone Repository", - '"repository_url": "", "clone_path": ""', + '"url": "", "clone_path": ""', CFG.github_username and CFG.github_api_key, "Configure github_username and github_api_key.", ) @validate_url -def clone_repository(repository_url: str, clone_path: str) -> str: +def clone_repository(url: str, clone_path: str) -> str: """Clone a GitHub repository locally. Args: - repository_url (str): The URL of the repository to clone. + url (str): The URL of the repository to clone. clone_path (str): The path to clone the repository to. Returns: str: The result of the clone operation. """ - split_url = repository_url.split("//") + split_url = url.split("//") auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) try: - Repo.clone_from(auth_repo_url, clone_path) - return f"""Cloned {repository_url} to {clone_path}""" + Repo.clone_from(url=auth_repo_url, to_path=clone_path) + return f"""Cloned {url} to {clone_path}""" except Exception as e: return f"Error: {str(e)}" From 2d058feaf82debe93aa44822e22c230eb983c78b Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Sat, 29 Apr 2023 17:55:47 +0300 Subject: [PATCH 193/222] Extend & improve file operations tests (#3404) Co-authored-by: Reinier van der Leer --- autogpt/commands/file_operations.py | 2 +- tests/unit/test_file_operations.py | 264 ++++++++++++++++------------ 2 files changed, 152 insertions(+), 114 deletions(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 05f06088..e5181691 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -264,7 +264,7 @@ def download_file(url, filename): progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}" spinner.update_message(f"{message} {progress}") - return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})' + return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})' except requests.HTTPError as e: return f"Got an HTTP Error whilst trying to download file: {e}" except Exception as e: diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index b2760671..f324193a 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -1,12 +1,18 @@ +""" +This set of unit tests is designed to test the file operations that autoGPT has access to. +""" + import os -import shutil -import unittest from pathlib import Path +from tempfile import gettempdir + +import pytest from autogpt.commands.file_operations import ( append_to_file, check_duplicate_operation, delete_file, + download_file, log_operation, read_file, search_files, @@ -14,118 +20,150 @@ from autogpt.commands.file_operations import ( write_to_file, ) from autogpt.config import Config -from autogpt.workspace import Workspace +from autogpt.utils import readable_file_size -class TestFileOperations(unittest.TestCase): - """ - This set of unit tests is designed to test the file operations that autoGPT has access to. - """ - - def setUp(self): - self.config = Config() - workspace_path = os.path.join(os.path.dirname(__file__), "workspace") - self.workspace_path = Workspace.make_workspace(workspace_path) - self.config.workspace_path = workspace_path - self.config.file_logger_path = os.path.join(workspace_path, "file_logger.txt") - self.workspace = Workspace(workspace_path, restrict_to_workspace=True) - - self.test_file = str(self.workspace.get_path("test_file.txt")) - self.test_file2 = "test_file2.txt" - self.test_directory = str(self.workspace.get_path("test_directory")) - self.test_nested_file = str(self.workspace.get_path("nested/test_file.txt")) - self.file_content = "This is a test file.\n" - self.file_logger_logs = "file_logger.txt" - - with open(self.test_file, "w") as f: - f.write(self.file_content) - - def tearDown(self) -> None: - shutil.rmtree(self.workspace_path) - - def test_check_duplicate_operation(self): - log_operation("write", self.test_file) - self.assertTrue(check_duplicate_operation("write", self.test_file)) - - # Test logging a file operation - def test_log_operation(self): - if os.path.exists(self.file_logger_logs): - os.remove(self.file_logger_logs) - - log_operation("log_test", self.test_file) - with open(self.config.file_logger_path, "r") as f: - content = f.read() - self.assertIn(f"log_test: {self.test_file}", content) - - # Test splitting a file into chunks - def test_split_file(self): - content = "abcdefghij" - chunks = list(split_file(content, max_length=4, overlap=1)) - expected = ["abcd", "defg", "ghij"] - self.assertEqual(chunks, expected) - - def test_read_file(self): - content = read_file(self.test_file) - self.assertEqual(content, self.file_content) - - def test_write_to_file(self): - new_content = "This is new content.\n" - write_to_file(self.test_nested_file, new_content) - with open(self.test_nested_file, "r") as f: - content = f.read() - self.assertEqual(content, new_content) - - def test_append_to_file(self): - append_text = "This is appended text.\n" - append_to_file(self.test_nested_file, append_text) - with open(self.test_nested_file, "r") as f: - content = f.read() - - append_to_file(self.test_nested_file, append_text) - - with open(self.test_nested_file, "r") as f: - content_after = f.read() - - self.assertEqual(content_after, append_text + append_text) - - def test_delete_file(self): - delete_file(self.test_file) - self.assertFalse(os.path.exists(self.test_file)) - - def test_search_files(self): - # Case 1: Create files A and B, search for A, and ensure we don't return A and B - file_a = self.workspace.get_path("file_a.txt") - file_b = self.workspace.get_path("file_b.txt") - - with open(file_a, "w") as f: - f.write("This is file A.") - - with open(file_b, "w") as f: - f.write("This is file B.") - - # Create a subdirectory and place a copy of file_a in it - if not os.path.exists(self.test_directory): - os.makedirs(self.test_directory) - - with open(os.path.join(self.test_directory, file_a.name), "w") as f: - f.write("This is file A in the subdirectory.") - - files = search_files(str(self.workspace.root)) - self.assertIn(file_a.name, files) - self.assertIn(file_b.name, files) - self.assertIn(os.path.join(Path(self.test_directory).name, file_a.name), files) - - # Clean up - os.remove(file_a) - os.remove(file_b) - os.remove(os.path.join(self.test_directory, file_a.name)) - os.rmdir(self.test_directory) - - # Case 2: Search for a file that does not exist and make sure we don't throw - non_existent_file = "non_existent_file.txt" - files = search_files("") - self.assertNotIn(non_existent_file, files) +@pytest.fixture() +def file_content(): + return "This is a test file.\n" -if __name__ == "__main__": - unittest.main() +@pytest.fixture() +def test_file(workspace, file_content): + test_file = str(workspace.get_path("test_file.txt")) + with open(test_file, "w") as f: + f.write(file_content) + return test_file + + +@pytest.fixture() +def test_directory(workspace): + return str(workspace.get_path("test_directory")) + + +@pytest.fixture() +def test_nested_file(workspace): + return str(workspace.get_path("nested/test_file.txt")) + + +def test_check_duplicate_operation(config, test_file): + log_operation("write", test_file) + assert check_duplicate_operation("write", test_file) is True + + +# Test logging a file operation +def test_log_operation(test_file, config): + file_logger_name = config.file_logger_path + if os.path.exists(file_logger_name): + os.remove(file_logger_name) + + log_operation("log_test", test_file) + with open(config.file_logger_path, "r") as f: + content = f.read() + assert f"log_test: {test_file}" in content + + +# Test splitting a file into chunks +def test_split_file(): + content = "abcdefghij" + chunks = list(split_file(content, max_length=4, overlap=1)) + expected = ["abcd", "defg", "ghij"] + assert chunks == expected + + +def test_read_file(test_file, file_content): + content = read_file(test_file) + assert content == file_content + + +def test_write_to_file(config, test_nested_file): + new_content = "This is new content.\n" + write_to_file(test_nested_file, new_content) + with open(test_nested_file, "r") as f: + content = f.read() + assert content == new_content + + +def test_append_to_file(test_nested_file): + append_text = "This is appended text.\n" + write_to_file(test_nested_file, append_text) + + append_to_file(test_nested_file, append_text) + + with open(test_nested_file, "r") as f: + content_after = f.read() + + assert content_after == append_text + append_text + + +def test_delete_file(config, test_file): + delete_file(test_file) + assert os.path.exists(test_file) is False + assert delete_file(test_file) == "Error: File has already been deleted." + + +def test_delete_missing_file(test_file): + os.remove(test_file) + try: + os.remove(test_file) + except FileNotFoundError as e: + error_string = str(e) + assert error_string in delete_file(test_file) + return + assert True, "Failed to test delete_file" + + +def test_search_files(config, workspace, test_directory): + # Case 1: Create files A and B, search for A, and ensure we don't return A and B + file_a = workspace.get_path("file_a.txt") + file_b = workspace.get_path("file_b.txt") + + with open(file_a, "w") as f: + f.write("This is file A.") + + with open(file_b, "w") as f: + f.write("This is file B.") + + # Create a subdirectory and place a copy of file_a in it + if not os.path.exists(test_directory): + os.makedirs(test_directory) + + with open(os.path.join(test_directory, file_a.name), "w") as f: + f.write("This is file A in the subdirectory.") + + files = search_files(str(workspace.root)) + assert file_a.name in files + assert file_b.name in files + assert os.path.join(Path(test_directory).name, file_a.name) in files + + # Clean up + os.remove(file_a) + os.remove(file_b) + os.remove(os.path.join(test_directory, file_a.name)) + os.rmdir(test_directory) + + # Case 2: Search for a file that does not exist and make sure we don't throw + non_existent_file = "non_existent_file.txt" + files = search_files("") + assert non_existent_file not in files + + +def test_download_file(): + url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz" + local_name = os.path.join(gettempdir(), "auto-gpt.tar.gz") + size = 365023 + readable_size = readable_file_size(size) + assert ( + download_file(url, local_name) + == f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})' + ) + assert os.path.isfile(local_name) is True + assert os.path.getsize(local_name) == size + + url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz" + assert "Got an HTTP Error whilst trying to download file" in download_file( + url, local_name + ) + + url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz" + assert "Failed to establish a new connection:" in download_file(url, local_name) From f77c3604ce8044ecb493c2736675470e554e5caa Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Sun, 30 Apr 2023 02:05:43 +0900 Subject: [PATCH 194/222] fix typo in testing.md (#3537) Runing -> Running --- docs/testing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/testing.md b/docs/testing.md index 47cbecaf..9a173596 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -28,7 +28,7 @@ python -m pytest :::shell pytest --cov=autogpt --without-integration --without-slow-integration -## Runing the linter +## Running the linter This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. From 095883ca547f0d51522c1758abbc6fc21f638d26 Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Sat, 29 Apr 2023 19:16:16 +0200 Subject: [PATCH 195/222] Removing duplicate tests browse_tests (#3535) Co-authored-by: Nicholas Tindle --- tests/browse_tests.py | 29 ----------------------------- 1 file changed, 29 deletions(-) delete mode 100644 tests/browse_tests.py diff --git a/tests/browse_tests.py b/tests/browse_tests.py deleted file mode 100644 index 90692d88..00000000 --- a/tests/browse_tests.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import sys -import unittest - -from bs4 import BeautifulSoup - -sys.path.append(os.path.abspath("../scripts")) - -from browse import extract_hyperlinks - - -class TestBrowseLinks(unittest.TestCase): - """Unit tests for the browse module functions that extract hyperlinks.""" - - def test_extract_hyperlinks(self): - """Test the extract_hyperlinks function with a simple HTML body.""" - body = """ - -
Google - Foo -
Some other crap
- - """ - soup = BeautifulSoup(body, "html.parser") - links = extract_hyperlinks(soup, "http://example.com") - self.assertEqual( - links, - [("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")], - ) From 4f72ee7815e30dda6a3fe3371d43a79c1f71df96 Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Sat, 29 Apr 2023 19:40:32 +0200 Subject: [PATCH 196/222] Refactor test_spiunner to deprecate unittest in favor of pytest (#3532) Co-authored-by: Nicholas Tindle --- tests/unit/test_spinner.py | 70 ++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py index 16106f42..1c5c3ac0 100644 --- a/tests/unit/test_spinner.py +++ b/tests/unit/test_spinner.py @@ -1,6 +1,5 @@ # Generated by CodiumAI import time -import unittest from autogpt.spinner import Spinner @@ -29,40 +28,43 @@ ALMOST_DONE_MESSAGE = "Almost done..." PLEASE_WAIT = "Please wait..." -class TestSpinner(unittest.TestCase): - def test_spinner_initializes_with_default_values(self): - """Tests that the spinner initializes with default values.""" - with Spinner() as spinner: - self.assertEqual(spinner.message, "Loading...") - self.assertEqual(spinner.delay, 0.1) +def test_spinner_initializes_with_default_values(): + """Tests that the spinner initializes with default values.""" + with Spinner() as spinner: + assert spinner.message == "Loading..." + assert spinner.delay == 0.1 - def test_spinner_initializes_with_custom_values(self): - """Tests that the spinner initializes with custom message and delay values.""" - with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner: - self.assertEqual(spinner.message, PLEASE_WAIT) - self.assertEqual(spinner.delay, 0.2) - # - def test_spinner_stops_spinning(self): - """Tests that the spinner starts spinning and stops spinning without errors.""" - with Spinner() as spinner: - time.sleep(1) - spinner.update_message(ALMOST_DONE_MESSAGE) - time.sleep(1) - self.assertFalse(spinner.running) +def test_spinner_initializes_with_custom_values(): + """Tests that the spinner initializes with custom message and delay values.""" + with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner: + assert spinner.message == PLEASE_WAIT + assert spinner.delay == 0.2 - def test_spinner_updates_message_and_still_spins(self): - """Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning.""" - with Spinner() as spinner: - self.assertTrue(spinner.running) - time.sleep(1) - spinner.update_message(ALMOST_DONE_MESSAGE) - time.sleep(1) - self.assertEqual(spinner.message, ALMOST_DONE_MESSAGE) - self.assertFalse(spinner.running) - def test_spinner_can_be_used_as_context_manager(self): - """Tests that the spinner can be used as a context manager.""" - with Spinner() as spinner: - self.assertTrue(spinner.running) - self.assertFalse(spinner.running) +# +def test_spinner_stops_spinning(): + """Tests that the spinner starts spinning and stops spinning without errors.""" + with Spinner() as spinner: + time.sleep(1) + spinner.update_message(ALMOST_DONE_MESSAGE) + time.sleep(1) + assert spinner.running == False + + +def test_spinner_updates_message_and_still_spins(): + """Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning.""" + with Spinner() as spinner: + assert spinner.running == True + time.sleep(1) + spinner.update_message(ALMOST_DONE_MESSAGE) + time.sleep(1) + assert spinner.message == ALMOST_DONE_MESSAGE + assert spinner.running == False + + +def test_spinner_can_be_used_as_context_manager(): + """Tests that the spinner can be used as a context manager.""" + with Spinner() as spinner: + assert spinner.running == True + assert spinner.running == False From cdd91f7ea38c468a1f090a65d1ca1292e2bbedcb Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 29 Apr 2023 13:09:58 -0700 Subject: [PATCH 197/222] Feature/challenge memory management (#3425) Co-authored-by: JS <38794445+jonathansheets517@users.noreply.github.com> Co-authored-by: Richard Beales --- CONTRIBUTING.md | 19 + tests/integration/agent_factory.py | 47 +- tests/integration/challenges/__init__.py | 0 tests/integration/challenges/conftest.py | 17 + .../integration/challenges/memory/__init__.py | 0 .../test_memory_challenge_a.yaml | 1454 +++++++++++++++++ .../memory/test_memory_challenge_a.py | 79 + tests/integration/challenges/utils.py | 34 + tests/integration/goal_oriented/__init__.py | 0 9 files changed, 1649 insertions(+), 1 deletion(-) create mode 100644 tests/integration/challenges/__init__.py create mode 100644 tests/integration/challenges/conftest.py create mode 100644 tests/integration/challenges/memory/__init__.py create mode 100644 tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml create mode 100644 tests/integration/challenges/memory/test_memory_challenge_a.py create mode 100644 tests/integration/challenges/utils.py create mode 100644 tests/integration/goal_oriented/__init__.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e736a036..9af38b29 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -127,3 +127,22 @@ When you run Pytest locally: - Or: The test might be poorly written. In that case, you can make suggestions to change the test. In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break. + + +### Community Challenges +Challenges are goals we need Auto-GPT to achieve. +To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on. +- a challenge is new if level_currently_beaten is None +- a challenge is in progress if level_currently_beaten is greater or equal to 1 +- a challenge is beaten if level_currently_beaten = max_level + +Here is an example of how to run the memory challenge A and attempt to beat level 3. + +pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3 + +To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder + +Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline. +Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run. + +Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge. diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py index 1cda93eb..8cb622ec 100644 --- a/tests/integration/agent_factory.py +++ b/tests/integration/agent_factory.py @@ -3,7 +3,7 @@ import pytest from autogpt.agent import Agent from autogpt.commands.command import CommandRegistry from autogpt.config import AIConfig, Config -from autogpt.memory import NoMemory, get_memory +from autogpt.memory import LocalCache, NoMemory, get_memory from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT from autogpt.workspace import Workspace @@ -19,6 +19,16 @@ def agent_test_config(config: Config): config.set_temperature(was_temperature) +@pytest.fixture +def memory_local_cache(agent_test_config: Config): + was_memory_backend = agent_test_config.memory_backend + + agent_test_config.set_memory_backend("local_cache") + yield get_memory(agent_test_config, init=True) + + agent_test_config.set_memory_backend(was_memory_backend) + + @pytest.fixture def memory_none(agent_test_config: Config): was_memory_backend = agent_test_config.memory_backend @@ -101,3 +111,38 @@ def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace) ) return agent + + +@pytest.fixture +def memory_management_agent( + agent_test_config, memory_local_cache, workspace: Workspace +): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="Follow-Instructions-GPT", + ai_role="an AI designed to read the instructions_1.txt file using the read_file method and follow the instructions in the file.", + ai_goals=[ + "Use the command read_file to read the instructions_1.txt file", + "Follow the instructions in the instructions_1.txt file", + ], + ) + ai_config.command_registry = command_registry + + system_prompt = ai_config.construct_full_prompt() + + agent = Agent( + ai_name="", + memory=memory_local_cache, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace.root, + ) + + return agent diff --git a/tests/integration/challenges/__init__.py b/tests/integration/challenges/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/challenges/conftest.py b/tests/integration/challenges/conftest.py new file mode 100644 index 00000000..dce4518d --- /dev/null +++ b/tests/integration/challenges/conftest.py @@ -0,0 +1,17 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--level", action="store", default=None, type=int, help="Specify test level" + ) + + +def pytest_configure(config): + config.option.level = config.getoption("--level") + + +@pytest.fixture +def user_selected_level(request) -> int: + ## used for challenges in the goal oriented tests + return request.config.option.level diff --git a/tests/integration/challenges/memory/__init__.py b/tests/integration/challenges/memory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml new file mode 100644 index 00000000..dbd63533 --- /dev/null +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml @@ -0,0 +1,1454 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 3251}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3383' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5yST4/aMBDF7/0U1pwdRIAFNjcuVVftZaVWldpUyDhD4l17HNmTLhTlu1chAVZQ + tdVe5897vzeaA5gCMtCVYu1qmyxWT3biH+vV5MHvF7Pdz4/mw1f3abN4xG/vQYLfPKHmYWOkvast + svEEEnRAxVhAls6Xk8X99G6ylOB8gRYyKGtOpqO7hJuw8cl4Ok5BQhNViZAdoA7e1bxm/4wUIVvM + 5hIu2ud6Ol9KYM/Knkv36ayVoCtvNEbIvh/AYTzJBm8RMlAxmsiKuIP0xEhdgENOQgiRA1e+KSuO + OWRiKA4N3HFXzOFBEGIh2IuAqhBcoTAUOTS644vrdMQ7FltjsZspkDE4Q3gcJNyx0N45RUeFJuIo + B/naKKCKngyVvdvnv8h3/MpQvGEQXCkWF9Ctt9a/XDvVVlFvkogvsQfsIq0H8TPlv3KO8pwSsSJl + 97/wDzBvuIIOho020V3fHCk2AU8BlRuyGSpvjbUPATXbveiMcLs12iCx3V+7xRrV88npxVjbIb39 + Hke7/4ncM7Ty9H3DwM3zkXLY4515rhKoUF7/bN/oZi/rt8AniCPIwJNTC62ErSETq3X/kJBBZF+D + BEMF7iAbtz/ad78BAAD//wMAZaybHTIEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9a9ff2d0d6428-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:38:57 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9047' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85995' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.67s + x-request-id: + - 4516accaac0d04bccb540241a7538089 + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''system'', ''content'': ''Command read_file returned: + This task_id is 2314\\nRead the file instructions_2.txt''}, {''role'': ''user'', + ''content'': ''Determine which next command to use, and respond using the format + specified above:''}, {''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": + {\\n \"text\": \"I need to read the instructions_1.txt file to determine + the next command to use.\",\\n \"reasoning\": \"The instructions_1.txt + file contains the instructions that I need to follow.\",\\n \"plan\": + \"- Use the read_file command to read the instructions_1.txt file.\\\\n- Analyze + the instructions to determine the next command to use.\",\\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": + \"I will use the read_file command to read the instructions_1.txt file and determine + the next command to use.\"\\n },\\n \"command\": {\\n \"name\": + \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_1.txt\"\\n }\\n }\\n}''}]"], + "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1151' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6Ww+6vJrv/foUb95b9orIqWXdISAiYKuAipPJDiAgKHJsgU7mu+/gf2V25sZE + IRTb5/md2v/6x19//d0kVZaOf//rr78/5TD+/X/W357xGP/9r7/+4x9//fXXX//1+/xfd2Z1kj2f + 5bf43f67WH6f2fz3v/7i/+eX/3/Tv/76e/ucYnq+jTVb7DzkoCG8cqzxm6mfWUBSmG/yFPllXybM + xmoNT99UpwHc8qxz66qA4iFv6L5LCqPrghnBtog1vL+dHwF7FY0HoqWJsd32WrJlB8eDp3uDcXiR + mMtSLDVwDHZfuhvQYozNd5vCunUT6unj0LP+pAjgfere2Atkvp/EtqoBEdKBnr+C2y/qexuDszg+ + CFDMfcK28KtBl8cOGcOQC8bj6BOYjB7GRq99Epb4TFGfQijg/TXZG6wzXrpqVFFCnW53ZdONjQP8 + jpTgI1asXpTRIVNGzVcR976PbKDmOQZKoZzxvntZ/eIxvMi1xHZYcwIMpvxqSoAr1TM+gixjU3a/ + 32B/uFVUz8i+Ylof2DBkN5NIJy01Ftl/e8CaCoJ3Uj8EXWXnIcwHspAJObVLEOkURVL7O9pm5pB8 + I3e8gc/1Dsl8kj/uUlnqAL9d3WHNObUBFeRqgs4zZTi3hrifOEw7OMVnFx9qmwSTny8SjJbshKbm + 86gIvBeh2goXQt052wV89Yg8BXYng57V8pKQw9VwwHvTxPRq+kXCLCusoTubFoLj4xSw4aRrcren + B3z0hjsY/XNQwthqLHzxyyFh5Nbr8LDNE7q7h89guC2mCaGcYnor06Wfu+pcQ3lzZkh4jSYTsmbW + 4VvbCPQQyobBgrFoIIuOPnYi7t4L1v36hq+t9aG7bGNV4uFy9yB3jhjZzFGSjAlLfdgu8IlvddCA + +VYOA3gZJqXO5iAmfdUdPagmjY/vtbJJKC2goATn4IEAvr4S8ux5HxKhkbGD208/GK9pgKU5uPhu + QNdlWtfd4IuXIrpL7HPCOl3yIH9cCHZabLLp/L5P8HM9UmppZ9qPp00vQaZUJfVk4dtP6PQaIK6A + gneJG1RCE2UmSMYoRfdn/nUnq5UssB1jDdve/pUs3cs6g5f0EPBhyXV3mhOUwvb8PqEFFaBf63uQ + DKHKicSOgC26mbxh9uV8rE9DBqbpbAhqFpUyUdKl65dnb6TK59JdsF72eiAouIBq7zy/BMzZK2HZ + 49JtPu+vTc3+HVVTx19qdbvhnthm3sZY+gwI0BZuBsU7dQBUZ3EBeQ40FBe+zEaphBF81pVB9Ruy + XXrXSvJ7HyKAwADC9fBQoPIynzg+R6xnWV0PUC/dB1FcOrhzdT8p8MP4EAmmLAAm66kO7NDR6G6w + 3WpO5+M6fnGmh3ZjVvx9KjO4hyGP43RxqkVwr50KPv4N66gA1fD+GB34du+O7qV8w5pYnDWwlIKL + jds262fzGUBYXPKALLdLH0zmONjy/PoeyXLqv8FinA86uHvWmcAVj5bGPWrwEfJPugdvkwlDMtXQ + FOMDIXL46aen2ytKsHEcfNBImhDvs1N+60WmH776GrSVx94TsaNc62Q6bKtS1b3GxNfry62Wrrhm + UPDtF1I682HQ/C7p4GrlNnbPJg6G63eXgvvr+8E2aOVk3MDUAd62e1D99CnYWGdhCnNouzgZv727 + YAkSWCDSYG98nBKSCYIEB1cSiHQ08qB/EWKBnZvoaJmGjM1IvWQ/vKThlbbBJC/JArPvsqc7CwE2 + STzxlKXSIgSiUahYUR55uP4fdKUHp5rPllJCCFlBRLC33HmjbheoXFqTGjfNSaawDRR1xSfq75V7 + tcTRLgPOM2MY54Xdj1EmNfBQDjPV2Vyxjs6qAMp7tsXHT9UayzM/a2pqHDhq9FIK5vOhD+HzfhGJ + xM07sPJBBK1xMmh4HSyXdIGMoGSTDnFulfcDX7BIlfrgQL4lCsHMDroHo+CT4EMnQpfwO9hB2nSU + HuvomczDYE/wCMyKXvrcdJe35SmgWfZbMku9l4yElzglDMUBSd5+lwgzuAlAzyNCLXA7GKJXbmq4 + 1h9aYsWvhvPtosCNjlIk9PsHmI1MrcHjqnfU0k5esODhnKnTk7XUjTqWDMGzyOB2A5/0+FoeySwX + Lyg3+d359aO7vLhTCq6PZIOYptiAbemFUy+3JcL6XtcMdnlLGcyfwQM1zIqqAcplCNd+x/gtPcGs + 5Fqoxo77JiXjvysfDR5wX35ARPESuJ9hLCM1Ch4IH7Q578fXTk5/843g9lQk5JlHGjyL9IFgmzMw + PSy6/PCf5lsPBfMo8gi8kcaRbnO4J9Nav6qcShXNnWcXDGWzGRQSnlvqNpzBlhMNCHxtz8s63qan + 0JYX2FYgI68B+cYUwasErdelxGhb+P34hGb2p751kWg9TYL3AESDc/BJf9iBcL3mk/J5pSY+giTp + 5+qoaWptMZf++ol/nRsdKpdMxE5KD8ncxEUM1+uElfErYNtJX9S+LWqcuFlvDIOLBwjlDGPjqn5d + RsoshQO5WHjFo/5rbE4cvD4em5Xv1KDJjo8UPk2Hkf2T+7Bp9xkm+N6cNfycRcUg6/opK7/ieJQG + YxL6kwdjSwio7t8vgP+8fQHe5yQizDfGflLouVRKuZF+fJ0wxz0PMLzcEOE3S1nNzFcbYL2CElsg + MNgcxJ8QjmV/wnpj3KvRkZEN/aSQ6K6ERsAseToDrtAfVMtvxJg4Pa7hnAkZ3ovTBQz3qUvh9Jxb + 6nivZzB4l+cCHvvPBYdm9UrGs3Uk4HylFT4hoezZXeuIYrm1hoTuVCaD2CgTWPsZ31+Pol+sK+PA + nPEZNVb92GBPs9Tt5PT4N1+TMdEYnHVOJvA+OYyN0TaVu2uh0J/+nO8v14SdWExIvPJPsKz4q5Z3 + v0GDp2fVZLOL9Zt//LC1MZg6/lHD3XOTU1PkaL8Ui6+r8unpUKssZXc0nwkHc8e+0uOnHpKVP2sQ + P+KJcIN6Yfy+fwzyuzjpFM9XyR1WPQXg/ezRHMUHlzzqw03xXdnHO1iW1fT+GM2vfumeOiewiCHX + wDSPfaoz/uBuK6ftFJ6jCbbEfGLzt81NOMEuoebRx8ZCp3TVM16Nj1isgplbgK24+pcQrjWkZAjb + RAFr/dMdT2jFJKEoYU63ApKOxiaYHHj0wfp86qnTzpjFjyXAqm8IvbDNq19e0xSpvj0hjJtFTEhl + qQQeuQdPvSjdVqOZeROgTUNRt3SqO2xOVwT5035D9Wy2wcRur1pFaUTpsS0+YKbakMJ1vfFeZFuD + VEBHatzqN4rVZQkmeS4hrKWaJ9tVPw73OTnD+FGrZF7xlO0PAlKLlynjS2KfA7Z8/bfyNO8akVpS + 9bTd6w2sPAbR2N5hRatu0eEPj1DVG8ZyvebLTy/g+/MzVhNfgAh41oZHy6l5G/T81RVYylmPtmxn + ugKWxxKquTsRrnEL9qff1fyq4uPGn9k0OJkCdoneUKNTS2Pspk8H3+cHXvkGAjq4RaTOs16S7uPb + 7jSh0oO0nxk9GLB3Z6O4I7AR7zdqr/zKVvwHUO7SVc9WxvTKPhJY9TLFQzv2i52nUAov9o7Ii3Vc + +cur5cwGAdWSNKymzGEWNK/Rhcjh81AR8dDWcPUj9BR8puCz9h+cnO5DZA0/e7J35BBUKb5SVwdd + slwM6w2jxdfwfn/gq5nb3CMg0/6EbdG4JlvEMQuu/oOa3XLoBesKIDj2qYqfTbpjoig+Qti34QNf + j+acTEVQ+bCHwZm8p4NcDdBCHbjnOCRiFoJkEpsjB5/3QKT7+nsypq/89aGkhBaO3YUmU6epCpQI + 5lY/F1bsa3MDyIcwoqd8803Ywbjc1D/9h58Pdwo7j8irvqZaoUeMtDejUdvdCxF1xZuhV5f6V1/Y + pNniLmdDjCGzq4GIB+FdLa9JiuGPX/br+ouitECYO2FJqMxtgmX1P+ByeHnkehqlarqU+xtM/buD + 3fn9BXP7Xjwgn8yJrv4smfxcUWC+CTDVmlJY9ZjiA1N8h+jHl3/qp3ifVHzcqporDnFugbMIajKj + XnBpUe54NVPAEVtwvzVYj6pG5fvlTcSl/CT197EhingIEnqKkAgmia892OnTGZ/RQWc/PfqHb0z4 + Zmxh2/MNxlZnUeO25Sqi783o3/xUeX6yOC1ZwAU+ZZqmQc/qrG0FYAhXn6Lmcgimy/VlAXP/zqhP + lbGnyilrwJ0zLRwdPlW1pONVAj98wzvl2JOAazso01zD9uOOwcqPEyjtU4t69z1XI2xjW2bKq0SZ + eimT0b+YIYwqjf/pu0rcqncBFhPJ0XZ/CPtPJi+6vI6H3nEmunTNAwBK3xBrW52BxUCfCNiKQbB7 + B59gjGtJg/Zd7Ml8S/fgh9eQhJlAoBucqyW+n5uf/0fDvTMT8fwFA1j1DZru+Z0xQ6MNWMxjjfFN + MSteoedCHRJHp3Y7W2z6ytSHOxX12PCNUy/A6GGDU6S32DbIKRmb5IFA1fsh2QRHHszXt+jA/YN7 + Y92/z4wdLrkHdM2t8F56PILpdYl4EApiSRrQPoIltFkJrUsLsfWAW+O7eV5j5XMRMozmIe9nbxtb + UKyLL0aWrrGtMooakNPhRuPQqyu2E57Tz2/gPQx8MF4/Xw4C6/HG9pnH7pJvrz6c+SvDJwcejXkY + 5BqmeBqwqQ29wXbtXKgsFHZkie1Xz/L7pIEY1IwiXKTJpCU+p3qmplLrIaB+fu20RtXApiXqu2yC + Dkv8AO6vzYa6Y0eShQlHB/I9N1NbNLYB8ykw4aG83Sm6u/t++T6QBDffcaL6N2x7ZmNvgdvnEiM5 + LBzwyyfA9hLIaNslmjtE5Kyrv+fpU5tUi4FvSCm7ZEBKJF0M9oVxDBBXLwRieGfEafb2Ly/BKx/2 + veNGA1jxlO7UkFWDMX0j5ec/zFoixnLZap1aeTPE2Kp5Y66ffSOPZXuiltfVfVvdBQIvst3QY6ru + Dd4kIffDC3xY8WSCkj3AXZftyCd2FvbhFj1UBzXc02P7LPv50bIzrB/VlnpzUIO5MBITms3dJtza + D/PkUgs4CH8RT7PFYF6oKSquZIW6ivkJqLG8iHrW65xIT2QZzFSHG/iOMsBeem76+RziWlkC9Fn1 + xcgoTu3zH/1wxEpdjWLUvaFtDl98r/euK16FiP/5Kfw1P0u/CEdeA7cRZ9i2w/n3vDe4wU5c+7vt + pxv7ENjD55Nsmm1cLS81bcB7k56weaXHZPk+NgOMH15Fiv13nwiPGt/AnaQF9Tw96xf9vOGV/WO6 + ExEocfJ9OC6ByJuPOPqIYzKMtuqDbVHtKcLxla15SgiN8Ijp/tRtA5IWcwcqDrtIaucaDFY7WRAd + eZ+e1FwztvG34NXFdGvqAEepRs1ltRosF4zxfYHuvLOlGwxo8cZr3tYz9+45EBh+QY9cUbD2SRsE + J3jeISUCYbAdBm1S8yO0kbT6KRpfRQfe2kFD3Dxsqn71E9Anhyvdm74WCD8+McU6xPu6OwdTupEi + eP2UB2rQ1utFnXdDCA/ZA5tH7LLlc84I4ArtQf98D7hXp9jfi0n1jPcMMY8aHmwvFxkfnmUXrHgX + w1F7OtRxFxwM7sHlwFr/FAcGSsStmvPQ2TTC2u/UHaZEPEMqWTb95W9LWtjCzx9Rfece2cLRA1IQ + N21Qr8iPaimWWAfXT3FY9ckmmYTnZKtSbyrYk4VDxRgTBCjW5Zc6H79xB273WkCeKhu07QOxIppy + HKCa6gPFvVqCxs8XBa54Sh0Do2p+YVTCz9WlRHw0E5vI/ojgM+DqFU/HflRGhCDZEQ9tpqICc2bd + bjBx45p61lsHyyNSznDGX5+wnUYDFm12ukqtc77mx0UwBkhWYFC2LXa5nW6I+WcrwWX73uJjoezY + LL1v2k/PYmdjhsacHbCjpPNSYb2xYsB22wHCi8Ql6HG4fwCjhafDnx+NP1c3YaLkOOCHBzue4Gp5 + cYKvCDq1Ud5slX4+W0sBLwd7QZ/N69Xzh91dU8p62JHNnUbghWHR/fHjvEZRIm0HwsNVv6M514+A + 58urBkNWXvDhog+s3jvzDV6b1sWrXkwGTpugCm47h7qcIKz+LiLiBV6GP/g1Bc8mA8tng5FIeup2 + q16D33gS8e05NS4LyCWDn/fHJjy7NsFceq4PfLK/UnS39X/rnZ370Km+Q6MxiM0RwjDsc2xIxy5Y + rtbLVCtgHRAsYOB+ntBMgXn1TOrv0MlgV3FJIfuiC9ZuXVXNQmwi+NxyKXU4v3JnuzybqsohSu6v + 1GCTvfFK+MuXfny5cK/dDT5ENP7yk4Buns8IAukR033JFINsufkGV79FYCDz1bTtOR1628yj7jOI + +8nblzew+kGM5vrhMpTKJhyIJVNTaw/sh5ewljYIqfKnrqgauG/466/9NWuS7hDGN4hoxbA2pJRN + JkkhEG+EJyweQrblNnkkr3oIe1tIDIZTzYffO6+RXz69KOfLApk9ygSMHQqEvNE8UDCxIuymdckE + AL/88mmMneoMVv01gYdynYlYK3mygMcywen5SbEuxX0w6FV8g2veivdaVLOFCTtHNfdLjw+le6nE + g7lAWKChoc5My4B9wesNDJMKSFWemTvdpzJV13xlXd8yYTX35gH/iMOfvg/oKYgsePa9Pd1LKaxG + p9d8teqzF8WFxxJx186lmmJOJmIIrYSt+hS62MJEnfUPYNNTfsP7iy1ksxOtYBYfQQQdZzjS3S7t + wOgMrQQQp3pka16NisxD48CXP1yJrxpLTzuj1aBtljEZ8RrxoiwefvkHYp2qG9OEEw2u+Tb1xnSq + JqluQkhCv0VQlrs/ehe8z58UO0WrgXEfRwS6OgPY2FHVZcs3riH4dBM+hkUH2GUOCbg+jA8RbDYF + barWFpSXh/THn42HGa15pN+hL1DigPzwmX9EIdluXrtq+eWFAzFlMjs6Yot4eL1VjOLPn3x/TlVi + ARa5PuHjhLHm279i+OKViGL56Brib/427zjHWLa+YFGQ5EDxBnZIOVhvNvrnpACI23hYw/qzX0jN + vF9eg3izfvds5f+fP6b6kb8Y76abapXvpzc9bHjGxuXANCl3biWaqpMZTOwqLcDc1xkSl09TDfGe + g+DDay+aq9ormHsK+V/+jrgGvV2yiN0CfXIpV7zZBMP8GHjIFNemjpfF7vLRIwGe35DiA1CfvSjv + eu+3HtQ248hdnKSRAPvWPRlXffnHD675Oerw0LDZyLZvmEzVC+PAIMm0+v1f/v5n/218tE4KI8GV + kGjnw2//CwEe1He8k5rWHe+Ff1bllNwIHB/jiketCfdO5aJaTE+VuDlrN/UQBBHdSZ/9ur+W3+BJ + 0XO02akeW/WUBNM88rF5vPoB45enB67XIcSn5FYYy/gddFh2p5xma344D/JTgHWt3+nRtBj7flUD + qUTIBnz8tLSil8/rDKXIMRD7Gk01ACVwVNjyLdXQoWTspxfW9cXu4RsCet3jWhln5YHg4nLBul+g + Q1xEKc7X92vXPFm5e+aZ4pt6YLN+DWvIVccndUZu6X/6FL6XOf+Tx/WD20QQV3REyuZwDyZWcR0s + 5e0e7/rymyxZFzfweDwrODqURTJL1Qf99ieJrLRdNclzB3/5EHWeb9DP8bfh/+T5uIpLl13FXQdf + Prlig/dO1WAJYQjDnbTFXphHYEGjYkFr5M7YiNu5Gg68QmQzkinV8qADs/hBAkxztKend4MAX49A + gJuLoK95pN7XTvoYoPZIPNSdr1Mw8a/BA2nu7fFNc47u9jBbE+A9sCChfuBkjJtLp4bvYI/3mekF + yyJ205/5e8771qA6b4Tqyt/4pDw5Y07nnQClpH0hLrgv1cu/mDfYMrvDrs6N/YQ92wIF21YUD8OZ + ke9DJFCf64VwVV+5y/chDmpU8l9sK/zHXeQLiEBFPh+61xLHWF5q2EBgfWeMdLfrl/bq80r3Xixs + rvkns/WiVHgQO/SEBL2fpmTjgzEn19/8sGmbXxYQLHud1PWDJl0bdhrcqXqGf3p3cOyxhj88OGK+ + MNZ8Jpa54CVS83TcJ1tsh/afPObk6rTqbn5aQj2vAXUPY5IMCvy8gXg/oHW/EVfrfnoHLvJti073 + /Mi2k5crkNM2MXXTrE5YKYkKkDpppKjqK4PJeqirf/9OBfz3P/766z9/Jwzq5pl91oMBYzaP//yf + owL/jJ/xP3le+CcV/pxEIENcZH//69+HEP5u+6Zux/87Nu/sO/z9r78E5d/nDf4emzH+/K8L/1jH + ++9//D8AAAD//wMAP557oeUgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9aa48f88a6428-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:00 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '105' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 4b63030d949781709e3e3da468f83709 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to read the instructions_1.txt file to determine the next command to + use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions + that I need to follow.\",\n \"plan\": \"- Use the read_file command to + read the instructions_1.txt file.\\n- Analyze the instructions to determine + the next command to use.\",\n \"criticism\": \"I need to ensure that + I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_1.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead + the file instructions_2.txt"}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}], "temperature": + 0, "max_tokens": 3021}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4440' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5yST4/TMBDF73wKa85JlW5puptbD0jLiQsIAUGV15kk3trjYE9oS5XvjtIk7apF + gPY6f977vdEcQReQgaolK9uYeLV+Nu92SZqo+Tb5YR+//Pyw/py2ZfpVl48QgXt6RsXjxkw52xhk + 7QgiUB4lYwHZPL2/Wz0slm+TCKwr0EAGVcPxYraMufVPLk4WyRwiaIOsELIjNN7ZhjfstkgBsod0 + FcFF+1yfp/cRsGNpLqX5YtlFoGqnFQbIvh3BYph0vTMIGcgQdGBJ3FM6YqQ+wTEnIYTIgWvXVjWH + HDIxFscG7rkv5vBeEGIh2AmPshBco9AU2LeqBwybuxnvWZTaYD9TIKO3mvA0SLhnoZy1kk4KbcBZ + DtFLI48yONJUDW4f/yLf80tN4YZBcC1ZXEBLZ4zbXTs1RtJgEotPYQDsI21G8TPlv3LO8pxisSZp + Dr/wDzCvuILymrXSwV7fHCm0HqeA0o7ZNFW3xsp5j4rNQfRGWJZaaSQ2h2u30KDcTk47bUyP9Pp7 + nOz+J/LA0EXT940DN89H0uKAd+a5SiB9df2zQ6OfvazfAk8QJ5CRJ6cOughKTTrUm+EhIYPAroEI + NBW4hyzpvndvfgMAAP//AwD56qfzMwQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9aa4aaaa56428-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:10 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10495' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85996' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.668s + x-request-id: + - 8a4f2cccf4cb6192b63c911d82abd833 + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''system'', ''content'': ''Command read_file returned: + Read the file instructions_3.txt''}, {''role'': ''system'', ''content'': ''Command + read_file returned: This task_id is 2314\\nRead the file instructions_2.txt''}, + {''role'': ''user'', ''content'': ''Determine which next command to use, and + respond using the format specified above:''}, {''role'': ''assistant'', ''content'': + ''{\\n \"thoughts\": {\\n \"text\": \"I need to read the instructions_2.txt + file to determine the next command to use.\",\\n \"reasoning\": \"The + instructions_2.txt file contains the instructions that I need to follow.\",\\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file.\\\\n- Analyze + the instructions to determine the next command to use.\",\\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": + \"I will use the read_file command to read the instructions_2.txt file and determine + the next command to use.\"\\n },\\n \"command\": {\\n \"name\": + \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_2.txt\"\\n }\\n }\\n}''}, + {''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": + \"I need to read the instructions_1.txt file to determine the next command to + use.\",\\n \"reasoning\": \"The instructions_1.txt file contains the + instructions that I need to follow.\",\\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file.\\\\n- Analyze the instructions + to determine the next command to use.\",\\n \"criticism\": \"I need to + ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": + \"I will use the read_file command to read the instructions_1.txt file and determine + the next command to use.\"\\n },\\n \"command\": {\\n \"name\": + \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_1.txt\"\\n }\\n }\\n}''}]"], + "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2085' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6Ww+yyrbl+/4VK+vV3hEFqWK9Ifd7KSBip9MBRQVErlVQdXL+ewe/k9M5LybI + pW5zjjnGqPqPf/31199tXhX36e9//vr7U47T3/9r/e+RTdnf//z1v//1119//fUfv9//8WTR5MXj + UX5fv8d/N8vvo1j+/ucv7r//+f8P/fPX32oyFiQ01EZlBz/q4eb8PiG137Fhaa7SDLm22gTQI99h + kdSphywYbRI9dmWEvzyRwSlFAtES/Zj3tDyKMH1vrsj7dk+VoVvoQdF7BUiPjhsw96HjwYWyGenH + SQT0DEwI8118J2Z1HRmeHrMGPZE/EfmlzoDOapuAhxVUyOXNeZip05XgvIUFiRa6B9PtPGqilckK + Pm3xbhiP/KeFMzm0+IzTo8qEQtnD6us2AbXGZKDyVxUllTx2yLKeD8YARg68sU9E1H4XVfPmebpL + JywcUdaE/MAakjjiWwxhIIwXnzFNucViEzuP9X0JTM/LTgBiKopI32dxTs05Sw+4+hTIu493dT6Z + AoUFO1dEq0pfndPq8oKg216DOhiqil4+nACqh6sGHOi9oRvuqIaHjXAl+qiyYURcQkGlNErAVTod + 2jMRRTBc2isxXlE4LGzpUnjBzRmh7BiprC0aCMU2ztHzQ9SBCtdNAbPBH5E2zU1EF7po8MWQEoCi + 3A8tUqAGkX5ZiPwVLbZ/ad5d5EDNyEPWzwP+JKw8fPrxRlJ9e3FZ2dIMOnVZ4W1x0RmFXGCAB0Qt + 8o/3ozue2zEEi1xfUKiHAlgMxRXhNmEv4uCHUo3VSYkhqj1GktuTd2d8viaw3y+XgO8Xp+KeTOxB + 4qQFyc71hdGgkmQwc9RCxjQXOW++RAG85f2B2IWuqfwJHVrIbzYCbpU7Gshc3EWoPWsFpWezypcD + e59gdPA5om0iyf12Pj9DVTJH5NHFz2fpXWZw0uIBBXdXrogkWCV8buccKb7c5digdQxdNJ9Q9EYv + l53QoYejyVPMl2KY07g1OHhMmw55NW+q1N1oM3yEy4cYb98cRmLuZ5i+4ZVkWl0Pc3A+x3AmoEXG + a/bVXZprLbjehjOWbicfzH62iNBWCwN57iZzKZmDGLz8NETBJz4Mk3F6W5LAITso9x9uGAOJj8FZ + j054aTQc0eR+j+G2UCyk+6E6sK6qeundqQsW/NPgLrdUxAD7AULyEBvVbp0fiX30FB8eLnGXz/Tu + t7tm/ybmU81Uin1ZlG588EXKg6eM5Wejh1933hMX5FU+bXmgQcGuROJotVZRJ3FaeB83O+LGsFK7 + sDgWMI6fJW6Og5NzlZhk8C7WI4oScwHLveVlWOx0H0MvnFxqp/cGJlbDBfyd1/K9D+gehk71wjPO + zhV1vd6AKT68ia2/HHXfl9EG9jJs0BW4t4pFz91GWuMZuW8YR7Tv+vpwjseFKPnVUftPwl4ApVmD + ZJydh1mc7gKY3qlKEA3FiG728x7sR20h+sc4RVTGWQlw00a4HPXRnR2YnmBniwPe2l9N5a9QbwAn + X42AiodvvrDKc8Bs4hkdL37NHq/yGMBLDyVcHgmLZkU9yCBNHAcdZaUd6DxxmWSG0EKxe9YZRe0B + Q/n8IsHmfNVVYhUvCMZq2CITGZ9h8MS5hk/x+kB6K1nD4owzFWe6KUiQxlZOzFt9gu7uKyPV0ypA + oTJk0OXTHilr/0lqnSB8GnGKJSzUVa960AHnEJjBzgBlxaxrmMB4jghxN28czRvlsYEScI5EOWWm + uru4cgLg+bMEoJneFb1M7gt68wkRY/km1ewNnQH1Q87jzpJDQC9FE8JLOOsEJU4GGCfWpTS87zU5 + BUIcsQhHnnikXw9Z6futYvO9s+Bju4cYtHfJJX5/zIAWJDwyz3bEqHaeOYk7MJvI5/ThLvB2e8Ff + f4aiTAZsp/ca/Np7FlSO7ltZU+DpeHKRUrayy6YtO0lTRD3cSBfTnRtcCRC+jyFyvUVwx8UDBfRg + LxIvGp1hjmsrhYf36U1++b+kHsmAtdujoD31FmBhaYrg+5CPwabFfD4XeLcRFO+LMTuzrToHeqBA + E7hyQMtWVtmTq0V44g0xAO394TI4SMEfvFPry6AyDTg9bApSEJcrXjnJ+Df+Uz+Qy26AisFDFIvP + MKLgZEv58vIeMfDKPg2A81Hc+eHOpXSYBg8h6xRX9HkTEri13S74cluV/fAD3mGaIFVjPqDZ8oih + qg4dHtZ8WdSqPwHDK0YsnE01wmp+TqU31Bokm+DsTlwYQ7jnxU1wsMQvaxw5mWFzgSbSxnIzzLts + 2kNOnzVSBNmbMTE8xeBe0GfALZELls5eYsnYCTPJDWyzrxleWhDvnAAvlemqi00UDI3mgZDuXOR8 + zRcFbszXhAGbQjZvlAuElpe5yDTHL+izPsDgtsskZHynkzsWUu6JuXXWkCIezIifglMNkk1eBSwx + z2Ah91cqZeLgE6WI/YgzXlYP4295RW6xPbvMxZ8QhN9aCPbtsVKX/UlVJOYJT/QwXZh/dO3B/fAd + OTv+lc/27EE4yqKODF1v3SnmcQLP6qMhK15XGLWHEXpDToKlYGm0pLnXwufTJMj3tzzDzEsUEUiy + h54DXarFh5IGKeFNcuy+Edh3THXAIjcXzOUDB2iTzwEcDG9PzJETAD5FOgdnqXpi+DJubLYshYN9 + trXQsXlbbF70moMk4F2kGC+5mvT3KZbYpeOJucYDK6PgBawmw0S+Va1Kd8ii8LXcUmQ5cxkNv/p6 + XqhNVGcPGB5VVAIv8nj0iCFwJz1kBiz2Io8s/+S6VPU4C8Q34YWBwp+HZeVn4C1zB/SkYZbPGrPp + gbXj+Gc9x6l7OLAQ8BspqqGw/fx8OiDklRvehyVj+Hbq9oenMmkkW4zI/eE5PFSGG2C1aXLG7ONe + 6t5vHXde/KlY374SqBBDQ9FA2mrZbm0M1/gguvIlYK6NN5W26cchqsYmgMeXo8BS222IP23QwJ4A + lGCtX7gawkDd70xqAWeOZ3LE6dudVaxrYl/Ud5Kv+Nd/+a98KDno/qmn9Bm+R3jKjiWxH203YCt6 + ChA1yoMEamPk+5UfihanfJD+MeZo/iZYg+GHBeSoWixq4eWqQWq2CB2340edLRkGYuUeBLx/+9+B + AGw6wF2kkVh0s1HpGaANKF/6iMVnAVV2PR9e4iJ4mJx2waSSe/1q4FU9qSTRdctlonscJTHDdfAe + TTiMzfcRQnOqbAw3ShVN+HTRgDMnM56HuBmmi3rAMFHtX7ylYMZfNZAcBjWCWjTm81QGL7g1L1uk + H17PiOTnoIePyz4jpmaXFY3bYGWTmkxs/vTK8ZiC9Ievv/UFRMV8Iu0unwRddFOocD8fPHFERoa5 + K3cfSFDS+YeXmBHbiOYrp2EoZIuOjFA/RNOueIXSGm8oc7VJpfuerw/cuT4j++jx0ejsjj18SnBB + WjCoA8+JYwmLvcCTnz6ZlV2EYQbvPDIucKloi40CyIoZEFnWbu5HkOw98DscEtm9BGCOTV+A6jwd + 8RySccBGbWogv44fYktO82d+4FGvOCL3RsFm+bBtwPpR4vleqS4rfoI4NfZElzjqsnvd1uDi6QDT + U2a68+mQ30HGqweiMAWpc3VSEphq4hYLnzeNOmu591Anebzy9RYMLbVE+P0qNa5opblLIt40IGRM + J5Z9o4CV81GG369cI0vf7lQamlEMfnzVza0UcLIVixBmrUrUR2oCvg+5Ozzr5xNKqj3PqHEQIfw8 + tBKlK39kl/L1J5/W8XvV9IDpBvTypsHwcN67DL3a/S+eiHpmT3V0J6ZAralmZFsyBcs3eASwt4mE + 7OQaVQypTfzjo1g0ZjGfL9E5kDBTtphKe2dY7r7sgbpYtkTnF5WxJA81aXZjLaBK1+ZkibkE8DOt + kFHteUDLbVvCYHIEvDTvli30kxiwuAsx8bGgDXtNzWsQW8MFCyLiVMrzisCfAnQJptvURvRwBw68 + oh4jU8RP91ePwPTOVExXvj3jrxvAfR+ZRLPFJJoMhhuxfJkjnrfn3GWeODeggoqO3E9C3XkbdCE4 + OpASE7ScOz0mNkr3o9Mh434Xo5UfnaTHaQsw7nUDdHsLZmCt30TWQxnsFuXUwuyleeiedT6bzjeN + wjU+gk5yGsaMPiqgwZkG0bAvsyU6GhDOUSkhnyvSnPUhV4C9YiASr/pnKuicwe4We8TfhlE1Ny+j + F1+d9ySZu1HcMTjfYsDblo2u96xj9FLgE/g4xwkph5IH43e8baCRhgekXTk4ENNFMrigCQX9UvkV + 6Xoagiy8X/6rHuenOIVYl74IGb4R7XTJdCAobCfYja9AXXaukQByfyj4iyrZZdwReMD1YYNstxJc + ahwoBKv/gJxvZ7qtEAkyDF7Uxx87d3Om7vUNXBEGtwd6q+YriGIRmp8gAFwh5/sLF1owFDh1jf+L + ugg75v3Gj/RKVVQ+fw4CbIloERnwj4g+b3MiPRzFR84L2QOvKecYSkFnIavb6S75gvcdfL6mjjdO + 1Q3zRzo6UKyJiaw1nhbnU1owK3gZ/cG/TT+9QLmtvKBOP4764+uw8O8M2Y/Wrob6soRAj/cRkneB + 7y4duCtwCWiPjO0bsmVrpClIq/lITpmsVHSpcw729iQhWdYOLol23R56w40gK5sSQNFsJTDORwcZ + 715R5+r4MeBtK22QWYo0oj6gnOQkiY+bd1+6i2GHr19/CYr868A233MiGU6nEN+F3bC89GCEDyM9 + ECTK/br+ZwiC1+yT9POmOXsd3g00ZO1IrkE3qHPUZDGUCqMjunTlB/KS55Mkq4JA7PozAhbkowOL + neljsNm0gLym5CQCkl2Cw1Gqq+HVoB7G1LwROaYZY+ZtDMHuI/PBllyu6nwmVIQtGC8rnirqMt52 + LQQgKJHOLxWYpoLG8O6/Y2JfHpuK0LDMgLiDIbE/zzej/bwEUs47BvLIlVTME4UGPHsAMRfjMyOJ + x+2hwHMPonw+arTWfwdqQ4Z+fF39owdmzbwHOya6Fdl8z7F0TOYtFjZVA9jpap3gD+/1GDVgaa67 + Gdaxt8G7VmoHCpUqAw7R62Ap/b3K4OO5gai82eSnV2fjJfcSyN573N/Iiy133woAfbd05XMtWOc3 + AbqtGMSOsKpSz0kt+BlLlWig96pZBKIFv7d0Qul48cHusXNieCD3PWrFEQ+0G+ALrHiDlKSB4M/8 + Hd7hG5lckAwL60P6qzcBHj+oospo1XDVz8jvegjwGs9wfT8Qb6GR72TdwlCZThneYgEOZPNSsIhN + dsbiiierPkng0tc8ytLdALAknws4yItATH7J2exdJA8KQXojv3xko+KHoEcOCOBbxmDVqxt4kD87 + ogoNjej1fZalw0lNiFveMJvEj6ZIfjeGyO5bEbD+fcPQPDtFQNfxTV4sbuDuo/DEf9Svavzp08ux + FPF31ZO7DIWWBM70iduzqebL7nrrYRuGYXCgcKomc3sQoN3xMTGG065aTuUngWg+ykgT6jdbxjYU + 4P1UO0QvG3PYd+JthrDuG+Tnh0JlS3VrwOPCZcS2RJPRtzjswZ4XNsSRT2fGo1vmiWQafWSv7S3N + 6evAq4NmLFK5VQlSm+QPf3oyD+U8fwssCDrpuuK7kE/n896CK18ifm/y+RLPVwzvxfwk2hrPix92 + 8iGhshD03OvEKPMKBdjq3SCK0lk5F0jbVf91HULXjcYYhfsCwqtCiN7Iej7mtk/BUIVB8DYPckUf + aA9hF+o1Ob6Ock7rb4nhWq/w9ng/qGN+umewHU8c+fmNy2UXZ3ARlRRZwO7UbsabHtafwg6a2/Pq + /tF3P75iwJy6Y+nqLbz24h1vrYmoNAzeGymt6BF570cXTdxtaCDUlxr52k2LeMeoLPjTI0de21Zj + eSsFSCbsI783r9EyNfIG7uXwg6xrdHMXp8hfsDb3Ia5I8wJUjR0Kt1nKkVg1MrDcnMdd+OGZcY2O + Kp2SdwG/+0IPDms9mj8d8YDYlEsgBM7L5bpbyv30JV7jjQ0/PrL6F4F4yQP34Fw9AVpe6iKb3CyX + e7jCC+Kmj5Cz5veY9rUDo+MnR3rtgZws/cuTNherJurK5/jhVJZ//MG5vqrV3oqeIkBJA4PvyKUM + d8y1oI24BT1gV+VzOL02YLxpEvH27ada6KcwwC+/3PxgqIzeDgrg1E2Gt/7xyGj9ijUoNN8QBUL0 + UhcvplDCdgOCvgn5qheDhwDyjcmIdo45dSLC1YPmMD2RHeFKXfWeA9v82JH1fXeJ5+cIH5tjiKW+ + PrPl2AYOVDcgCebKBsMCSmqAq89rWFrXZ/L8Mf7jF/gpi6LlMQEM+HmusCB1C2OYeAb8fHWdyLvh + O7DyXc3A3nVvZK3+7hw93RGGyXUiOnDf1RKwewn3ZNTR73vkZA0y9MTdCTmcNETdD68eS/9FlgWF + iEnlvf3DB3/93cmCFIPJcC7BgSppRWlANiCdnSkoSXuOMOQMA2bavgj4tDpXO7UXRNH9SjaxrceU + r/wigOfx0CE1qcd8zo+8BUZZ0IlN9uUwPxZTgPn28ED+o5aH5pOAEl7zAiAZ86NKhalMJF3PKFJ/ + +vOnJ/HlZmEeGZ+KbZ2CE+3nKUTHsp8H+nWPsvTys/Cn3935tbSj2GeShbQ+sao/9fmnHzhusquZ + u28pbIXuReR1v2RnbhdR2ibLC/PYtfJZ0r4CiMrQxPAIPJdFOPcgGJYjkenZZr/xwMXbyljw2DwQ + 80UF2EmHGUvrfFMLywXccfc0oNK+H6bAtu6QCZYfbE72I59OHYBwbMsiCPryGC3DAu4Q2qlAXHVp + qllLzBM05icNqOJNbBa8LhO/H08MZrPYD62oWnsYDNcAOWs9YUsME7juzyCNZ1y+1G29AYlVc8TD + D7HCommG8PK0ZmJW3Ddf+aoCt7bd4V3eaXmvPIYEPNj9HExdfwe9GwkYXp8Gwtyf/ZKhvEv3wXVR + MD21iuX46MBANt9YFK62usYjBeYRS8Sjcch291lvYYA35a8+AJpLbQ83ySYIuGk2Iop9SwD1e79D + f/iBfSs4Mduc3sjfpX5OH2izAYmTFUT7fEA0rOsnMe51wDvSpKyjYz2C4V3UASONzFgSuwW4ocZf + /SpVxU7i9KBk1ptkyaVS2evQ1X/4ypzYTj41tW1AlKYNCrhDGS2nvBhhAJsb8dfx//Hfi6Nhod81 + t3jgDkc/nIlvtgmYvQDXYOX/+MeH2GJcjZ8fFexs4cPmXq0hPAa1gU6BwOUM7B7zn/0LSy/FP/4s + fIsnGLT5d2BzXMspgJxhIu3zyXMKPBVKwl5mf+rtqic5eKttGmw698w47fEQYTapCQnSRWEzOLwC + Sdttz8HHe++jiS3v9LdfEOxvyr0iZKTOodlmDrK15qIu93arwNUvIaFnxPl82HgGPGZZtPq9/TA5 + n9KRlPYGEOLVbzXJujxC7/ktg02QHdm8eKyQVj2Cd1fqqTQU9gpMqCIgT0mbAd/YNzwsrp0E5WFL + IzrvXAVuC9lC0W3jqNP5FaXQRfRE5PpaVfhRxAHcE6z/8aOYLgj4N158+A7XYThTXoPn7aYIgB+E + FfMTUvzhF+rx1OZsMm0ImavtUHxxvXwWm+AEbv6Kn89jV9E7cVN4vXVn4gNhB1j23nFQ4PcPcmTv + zF3a6CxCZ1PpyB/AMS+7gXvBn17ZRccCsMNmn0CNnwFydpnOllc0bMQsLC6rXpLyxX5NBWzIpiNI + SQKXP/h5D7f83SP+vR+q8e1faxh7YYe57btmy2YgM+hCs0b+UBhg/6s/jXIIkdIOQb6Qy5n++BgK + MimtJl7wLbj6bShdx0O+XMhJP71l+iGolihuRbiAvRIs3mcbDVsn2cOrGqqrHgcu48/b8jBpyUD0 + xy6Lxtqfsz9+wOpPuGydX8neDe+AM6TIZXkjloe1/R9eVyybIw1OSnVEapbXAxWmPgFH81IgvZE/ + Od0d7hwQDr1JjstFy+ncTBRcEEHrdR0x/JXu4s+fkeRyVFvzPKTwOmIDKdnjorLNy8Hwcx1qJGs7 + I1rzhR6ux/uN+M5HdKnnnCzw299SvAoDTEtbhGHgcMTbKGo+9G2biPFNfBHr0Wcqs/3DLK77s0GY + iajifnp9xWtiwtt7WPWCA/TNfoeFMv9Gy7OOHOnv36mA//zXX3/9n98Jg6Z9FJ/1YMBULNO///uo + wL+zR/Zvjtv/m+z/nETAY/Yq/v7nvw4h/N0NbdNN/3dq6+I7/v3PXwLk/5w3+Htqp+zzP278a23v + P//1/wAAAP//AwBJaieS5SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9aa9c391c6428-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:13 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '121' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - bc3576feee112cf82bb99b1ab4530d16 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to read the instructions_1.txt file to determine the next command to + use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions + that I need to follow.\",\n \"plan\": \"- Use the read_file command to + read the instructions_1.txt file.\\n- Analyze the instructions to determine + the next command to use.\",\n \"criticism\": \"I need to ensure that + I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_1.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead + the file instructions_2.txt"}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}, {"role": + "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need + to read the instructions_2.txt file to determine the next command to use.\",\n \"reasoning\": + \"The instructions_2.txt file contains the instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file.\\n- Analyze + the instructions to determine the next command to use.\",\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_2.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 2801}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5473' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5ySy27bMBBF9/0KYtaSYVf1o9oF6SbbIF0UVWHQ1EhiQg4VctTINfTvBS05Duyi + LbKdx73nDuYAuoQcVCNZ2dak65tHc387//lt3pkm0H3/rO7K5/bhS3e7W0ACbveIiqeNmXK2Ncja + ESSgPErGEvLFavNx/TlbLrMErCvRQA51y2k2W6bc+Z1L59k8inVB1gj5AVrvbMtbdk9IAfLFYr1J + 4Cx+bqw2CbBjac6l7NNqSEA1TisMkH8/gMVwEvbOIOQgQ9CBJXHEdMRIMcKhICGEKIAb19UNhwJy + MRWnBvYciwXcCUIsBTvhUZaCGxSaAvtORcCwzWbcs6i0wThTIqO3mvA4SNizUM5aSUeFLuCsgOSt + kUcZHGmqR7eHv8hHfqkpXDEIbiSLM2jljHEvl06tkTSapOJrGAFjpO0k/kr5r5yzoqBU3JA0+1/4 + B5h3XEF5zVrpYC9vjhQ6j6eA0k7ZNNXXxsp5j4rNXkQjrCqtNBKb/aVbaFE+nZxetDER6f33ONr9 + T+SRYUhO3zcNXD0fSYsj3ivPRQLp68ufHRtx9rx+DXyCOIJMPAUNMCRQadKh2Y4PCTkEdi0koKnE + HvL58GP48BsAAP//AwC/PogcNAQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9aa9d7a3f6428-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:23 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9467' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85995' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.67s + x-request-id: + - 10ccfb983a68d5f1e7b284af0cf6d8f2 + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": + {\\n \"text\": \"I need to read the instructions_2.txt file to determine + the next command to use.\",\\n \"reasoning\": \"The instructions_2.txt + file contains the instructions that I need to follow.\",\\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file.\\\\n- Analyze + the instructions to determine the next command to use.\",\\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": + \"I will use the read_file command to read the instructions_2.txt file and determine + the next command to use.\"\\n },\\n \"command\": {\\n \"name\": + \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_2.txt\"\\n }\\n }\\n}''}, + {''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": + \"I need to read the instructions_3.txt file to determine the next command to + use.\",\\n \"reasoning\": \"The instructions_3.txt file contains the + instructions that I need to follow.\",\\n \"plan\": \"- Use the read_file + command to read the instructions_3.txt file.\\\\n- Analyze the instructions + to determine the next command to use.\",\\n \"criticism\": \"I need to + ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": + \"I will use the read_file command to read the instructions_3.txt file and determine + the next command to use.\"\\n },\\n \"command\": {\\n \"name\": + \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_3.txt\"\\n }\\n }\\n}''}, + {''role'': ''system'', ''content'': ''Command read_file returned: Write the + task_id into the file output.txt\\nShutdown''}, {''role'': ''system'', ''content'': + ''Command read_file returned: Read the file instructions_3.txt''}, {''role'': + ''user'', ''content'': ''Determine which next command to use, and respond using + the format specified above:''}]"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2083' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaSRO6Spfm9++nuHG39hsiInm4OwYFZMhEQNSOjg5ARXBAhkwgK+q7d+C/ojpq + 4wIzQJMzPM8vz3/866+//q6z6pb3f//z19+vsuv//l/ztWvap3//89f//tdff/3113/8Pv/Hyts7 + u12v5af4Lf99WX6ut/Hvf/4S/vvK/1/0z19/Hw3JYRmNmDGePm2CkP/dEY/auTupWfFUFHF6U+W0 + fbUT32UybI5Lxuzp21cd2voFGtpOZbE9xpztskoGPU/feOqyrzGgs1gDyXOR6GTo0XAg1wEON3Yh + 2tn+ZJyEsYqq4iwzHxaK8V2cYgAmrA1mhTrPxk2tq3JenhyyX4V1xk/2pUabtfplp720dl803VMU + BYqC19JBa6f13bAB8Xak7GyLVceKRylvoN4SsnMdNB5WY6Jc9QUi6uNr8+F61xvFXrwoM2z48MGd + cglUOW5JclVXaDhvCwlt79sQK1v1w+l0egEAi9bE2q18d7qpbY7GfBcQbbf9ZpNx9Z9IeIklMfTd + YEyrdF2C5z9ChpefUzW863UM2HlV+BnVXTSuQa1RgwMNL15ciDo7DT3l8xpCtt+mH/cbk7qW7mp+ + xJDfVgZTl9cS3WIRM/0SnNHUklsAh0tlEeP7KTkvv0kBF1u4kODs2e7kLlIdShwvyPYOTz5ode1B + 5OkuIfV943aVfJMA7uWZ+S4so/GzYR3a7gPCzlnGo7r6pDF6hfmKHd/1vh1cYX9DD/3AmO47Tz5U + yqCjl9VaxA/5yXgRnoawyJ9fcnRTlg1rdX+DLLivmb+4my4bF/sConUdskA2Vhk/TzRFg/M1qSij + NuLeq3aQP6gndnXhHvFnX9pQgCwQ7XE7ImHPViIoL9Yxo3B0YwVJmcPb/074TLDv0vaFFmDmdkVS + 7V2gCWtVDdslNZhfXBcZ+96+NVDDO5BQ129tvztECbgH5BGVR4x35PER4XZ5nci23rdZLX7WNnSt + NRJHfooutZ/NBIFPz8w/FGE7tFl7hvMVh8R/tjqfXvezAMlrsWNbT7ghWtBLDLcb/7Jd6MSI78xp + gAutcxLrGonWn1x/AqKBiOszPbvj6VMl8MYmIlYOR3e0k0MOfKXuiTVGp5Z7RM+Vwd0Soklcz6aW + JCH63B87CifbjPgVCwMcN9KKkI8uuT2qhVLpDwuGy+Szazm6ZQsUNsqB7O7S1lh3UiApXl3VdJO6 + ajYcm/G9RNV1z8yocDiXxsJU4vrWku0j5NXo544Jp2wxUMmrt+7QT+gJx6VlMW233UejvUQdRLnH + 2f4U11UT6IkDfooj3Nl4iwTfvVDYxwIi8eCtsjHXdzZI8YJSMZAfaHqcqQPUPlfEKY11xtO92MDt + 8jlhaTd2EX8oVwfwqJ2ZBf4mmvNThuVG25Nwjkfe3GNHYfebTYxbrxmD+t47G+31+DKv8bZZ9/qO + Z2RYrydxz+UR9U+XxHI3ThbTzag3eCpOOgoqK6JgKKJBM+UgorsjbLAwxwM3LlvhFw+0PG/Nan1k + sQiitt7g9jgF2UBvsJCpPtrEpRFxWbmwGjiXL59hxS74UKIA4NXLEdHRsWk5wx9T8Ranluhm5BtT + hvoQsOg3tDmyBX8d34MA5140iLmpTT6eIM/R+xZVxA6eGWJC4oVIN98VI5ZkutOxuKiwcpsX2c35 + x596lkD6XA8Y2S7P2OX+EMAWfIfybMMzyhJ1AaeAyXhpvh7VHF83lMatzfQC61wY7uwG/LJ/MOx0 + XrTW6aBDGJx3eFM9KB+dVtnC3Q11Rm5LanASq2+wl/qaitHl3E7KJxXhUZeIClfybbs2q1IFGcGD + JdKxjIb38qgjiKOS7BSxQow3ugQneD6IsTzs0bgNahHuquERfa/WFRdbJsFLNzpml6Hr8jVDBWyX + gczce1Jl/CKEAIb1eTLrPEm8qcsyBascVbI9TXXL5nxTppL1uLbcyh2fkt4h0fu+iR/EajYa0q0E + ozIM5uH1CfG0VCUgXoDZSV477XDWhQRGkyZYPr+Tdrw0Toq09u3TX33hahcAityXjmV6Kw1u9MoC + 3DwaaKrdcTWi2E1BeTsrPK3Doh3kTgmQPnkNU7mnVNOtlUOgvtsxlQU71ClXPQQprl5UTtU+42vz + EMAvXqwx3aNVuQ48qJgyEDuY1Gzo94dE2aeGT8zQFIy+OzUNzP2Ivt9Pm3/dk5jDCL1GtHRVtlPF + FjGEzqnA45owY4zL3kNH46oxc3DfBiMBy0E8SVesPNuScxJ88l+9JjtR2vC2k84SiGHnku3S37ur + cfJS2IwZZrq+D/hcbwAd7k6JxdB/Z9PrHgjKZjdaLF59Rbd6tsoTDYUU4eO9WVasXAoYcWG/JqYo + bFxambEJK1WU8NQsbGNcqOsA5FBdEu2srdzavQcJaK27JfjSF1nv0diBz9F5YLn9OJGogLuQi8M9 + wYqITy2/OEOnVNtlxvz45ESry/EQQq1ONfGNzTGb40tG+joF2h0pqaZoqkzlqgMih/ApuP32dQDl + 29cBMW5vjPg7C7aKo0oucb1c4+1nKkLFCMWM6WStGnwzvNXf/hEsLZ7V0O78DvA7d8k1UaKsX1eL + VMZovSP7Tf6c88/JYX1JPmz7zQkStZSWSA91nan4JrXck54UVok/sd9+TwavAvDjt0SR4gd8ksQs + kXeFFRMyGZ7BH9EkQZ3sS2Law6tlltUH8Cq/V+aH8jcrysBx0NbwW+bm39qYtl5tQtL0mBgXTzI6 + A04BPKidMNdnZjQeHjZGbt5t/vTPITmaKbgfRshefHWIPc+jCiFZIDxYNHKnaK8KEKnjkQRdfs6G + Sr7Jcv04X/CmxE7W0VXagL1U12TvPY1IaCfNhLn/0G/qFhHfTdpzvfQFl6XK2UBT8nAL8AybErvf + pe0I2pgox+XOouPrceecoTIH/bAfyKG77/kQ4CoE2x+PzCFm247f+GAq5eUYM9/sK+M1msob5PxS + McsiBhqOiXqTTypkWJq+frV6v4YamVmfMptNRzRdQ6VAVeCF7LR5vo1Zv+rA5NuNntJI4+Puvc2B + fvWYudQD3nNX8xRz/daZUzr7djVO21QubwtGtktT5Px4XtoQHQTCUp9bBgNvL0E3Dhbx1G1d/dED + JzdZ0v4qtO7zMjRYvo8hZVvykI3pcHknsGfrgD5XX9Hg68xJ5bXdiSw9v5Nqim1zQptROLHb3dFc + trsGuULW8hGPpIesm+8HR0N2KDfOcdVf1NpGu+Yq/dmPbrh/bgAxbZmhe3o2WksNK+Mptpj2PPYt + /wyog5V5i/BqLNuIzvEKXVvIzFP0Kx8yS84RNfCB7Qshapl+TmMQtmPNtDmfusEREwW39ZWctzeL + dy9YLWS47wj+npt1xu6EBajPh5xa9/Jm8FKrRXDckJG9dz0Y480MJmXOb6LeVsgY00aQkFsWCvGd + oTeY3hs3UBBIuK0VtRUKvzUhsLcK8wUUGRM+7xNYTOeeeFOiVmKQFinKD+zCPAepxpSFhg5a0yOm + +uE7++ljOEbtl4q7jY8mFOg1BNUuYn5NOzRE0SMF+8xbZirtwMeuiwZ0ek83tnVvgTH1i6GD+Bwa + DIdi3f78ljzXOwqzPhnqXgc09ydGokVqDFUgNCh0pi8d0vaG6Pz7lKk+2Qwf74h3/PDVIVfeKrM2 + T9P9/X/QR/5g7mknoHHzPuagxmVNPGU/GENQ5SkSS5mTfa9dkGA2S4psnx/xEtduK6iyHMhzfpBj + Eo/GBO86hobmX3K4ZC6ahnOhwwL4gbZ3Mhr9U9Ipeh7qnNhsWqFxR94TrA75l6kh+7gf/502yIHi + QE4WqRAXlrQE23ZlYtmvqZrfbyjrh+eGYWsltIPvHqiyAsmim5WbuNw/PEqka+mBbRvxXfHuwT1l + n94SXByOQ/T1d126Nmy9J/j6ldyxdHIRljyjWPb5x5ge/SmEdEV15h9HzRUKaSOBe7Ju9JefP72C + 1KUl4LYWacSf543+x596E8HZVK/egJCtNXhD40U2vp6eCRJbGsw8VTGnqLIwqBcqY/m0P7a/94+2 + dzMklqXds/F+rRcoKZKGzno/40KzOitbQU6IPfurKVkJtaJeOpnsd+9Nxja2lKDOdFS6bvIl4sJp + EKBz0J54izyu+tFcvdGY3Fd0PfU04t/bt4HGJlfmzv6dfaYigNXh9iXb/n5BfO7fSN0mKouuURPR + X7w72fXEsG1WWZ18lyXap3nCbi8eR99GDXP4+eXAzGg1qkUkQGwJMnE2zj3rX5KCYRxRTBx6092u + n/gT9IS7GBVdaEyz30GLZInnfm9m3BF3NVz9oSbbRjQrgTquCOdeMIh/2u4qJo3FFj0s5ODPvVy4 + bbxTt3Cl6YXsxmzjDr576TazPiW2YDzbpl5RQCszjzA0u8mtLwYV0Zx/RC2hjwYlu3ZIaxiii8nq + 0LiVjS2Q9dXDaPGxqmnoD45M9eMSr797FI2y0tdg+HhJh0l1oyF4qQtltQ464hLdQatpH9egqTYj + bhO83JGl4KFRK7aU3/qHy/U2keWZd5Dt7Oen5VEd/uh7/44ffHy6JEGbUTxhfgzUaLXYnlOIrctI + 7Oiwdr/LnSOhuV4Tsscxmi4bjMFxA0b8QzFVs9/OkanZmF3ehzgaN4dnDNRSUuLz1zbqS2+fwiJR + MHFW+NOy2T9DuHMxXjPF5lOzkd9/9N5OU2j10wPK0TAkujDLwOWe1HXomRvqr16j4QAH5w8P0VbQ + tEPyXRZw3MgrOrl9hcZxOIlo9p/sBnKeDa5lmj/ewOLH5cX5eMA2rPlGYISqpTvmxtgp836zPaMr + d7q89w3c3+GZTutQbblTaVv0Cm8rKg6uaYypljYgnuQr2y33dcSPu+kNxUG64/ykxcYoem/80/Ns + V++atr2TTwBx2NREww/HYMHiGKNhNBHdHOSRdw1rVNQ8t7vf+oq+LPwGFtcZXk1QGoMRKqrc+ILE + jL7x+JQeghKG4uqwfXbbcvFA7zd0WWQIrz6IGp1MvwKgus3wwIIX6oWiE2Dy44lZ76J2B+XqBNAV + zGemPezaKVMOAqTseyXYecnu2Gvrm9ws2sOf+w0zn1KKbC3TWS8ZfUyKRjHHxw134cIxplX+NCHg + /ZZsr/Y6G7Jb4MDmnUp//Op3sT2fARnhg3nd6lTxY9p2YB5ikVzbzHZFJVBsoBsPSKc3YzvS3fkJ + RdLWxHN61A5Txbd/6st+FdrZ8Kq3ImRuucNPEdKKpaKsQq48VSouFquMiec2BrgXZ+KcAgsJUnPD + oO2OGjO+RdDyWV/AEQeULpafdcuO56UDSnu0yD2l93a40FWDskeyZ4aQhtHgi3UBctkMjOSXncFP + vM7l9zK54qWLArerNXDQo9wUbKvJV2Oq/TFQjJocGU42YTUFZy4qV72asMzGdzv61nkBzdPc4cfc + r7p22psg3aeMWeRSGL1a2il4m/2FjgQVfJ2vn8WvHmNRwoE7Na/9Deb+iuVzb6PmXbgxXBcfjTlr + pHMxC243ZMIQEyytp2haRrEHsx6jSmMbraCh5wDNRhPJrml5Rd/iV4drrwQ/f9ey2e/D0s9+fhkb + 62vVS+gm+zZx74kRienhXMIwbhFehKJdTdL+tECBhBK2F18eWnXUjoHy1wMvr92+5XZRqtD44qzX + 7LEd08SYYOaldEin0S0lgU6ywLiCV4po8NGoXimQw3dPRTfB7XpjD7Gy25kLsrOFd8QfzfmNDFvt + mRecNXcch7uIhtc3J/v32ayGQhqlP35S3Dzitn2dxIV8CnqZ6b4RVrSVexNOu/7J1GspVrR98QUY + tX8ke0nvUWM4mq4EVitg9Fgm7bj7bHSQWCUzf3L7lln7i4ggjU3McShG/H6IRKXOjgLZCWHAB9nI + dPj5ddwGK2P0BrVDKu6exF/c3wa7ocGGcdzE5NffW9s+TVDiwSLH8rHOBsW+2JAnqKXr7P2e+dKi + gZl3s7R6qhmHgusogkvGvPactOxavWRQN5uSzHza5VPU4429zFXiH8eHu/p+jjXQrxqz/XPBOLOs + V4CE5tVSubpiV/rx1p9eWVze2F1f7l8BksDRyf7A7xXrxGcCl2XgEzM96S3XNZj1pBXRZThZaHW3 + 3EAOdLllRneyo9XyAjo63O2SjnpsIPZY7jvYGqQldla+Wv7dS4U0TskVS5JJq15wiCNb3oPTzVo6 + RP0XzAn4bQIqo7Vc9d52lCDPHybRV9KX8/elKpS1f11iGuiZW08VMjfa7qT94a3jwWkKMGGKiWfj + 5493ilDt0ifTn07adq0pBzDpO6AvcekZo1BeUpBezzsxHtd3O0idPqCnWZszb7f4UDztAU6iYrL9 + 5jhF/LsfSjj3Z5lYg/WKpn3QOaDG1p0Gh2OQTe63ltHsB4k/+/lJluQzrA0rY9qLPaPx1w+poDZE + bxa10Xm3JABlB/6vXvEpeRjlf/HvYfhU4zfsCnnhnTW6Wu7taLV+NbBJHOuKP9JBq4b8mphw3Nw7 + vPGeVdbmD1P86W2sKDsSrcPODdHD2jhs+zQf2bT8jjLci0Yhcz1u5/pe/Dnf2MKncKd6Y//hZeTH + IwfE8gTYM5PJH317ULWFMvcfosqOVQmuPdWyUkBFu3tYVgNHboA+d8BEZ9rkjvdrAYp/GR1mpR5r + +ZY1BZrrJ9lOm6Ca2k9dIn0t7IixtvzqDw883PoL89t4hcSZbyo//fZK4tH98abfeuZ4lyCb9YCt + CIF+Z6YVbathkp4e+PfzC1dUGTP206dLQ78x9dWFnL23UwqPerHF0muvo8mUvuc/+UcuWeaOhKcB + HJZDxDSCVNSZXRfAN1EQU9ONEQ0/3uCM3ZOo1zKZ9T7C6KFHDCvX/szpTW1vv3gj5nL3aSdTBBH8 + 1ItI0tCdKzwqk8LMZ5irZT7nuvH1EMmNnqIreO0AoyorG9MLKKpFHFVLn8qykAoe2zrbSzbln3cD + 6r4MqeAJNz756mgrq5O5JQa0BZ/OInNQngPB9cyjKcLJXO/Yk6k8InwdG1kBWYFU4jrLAk0yOTtw + uRgahkO7q8aPogpIPpEDsZr8jkbneEpRptIzhoJV7SDpRYl2Kb2wrYcOxujooaicssfnxy+qxlc3 + NurL/YmKn3Ufsc49Fmj3it64mf0ORRXByD+qGkvca8H51rQD0HaDg9ncL6hZSSE4zupN9ufx2zbc + sme97yFm9UHf/sm3n34uZl4jvLtzDmghf5gVHddoVEv1jELnWLA5/oxJCR8UuqL3sVwliPeWEtew + 0gKN3D/7JhuM5XAGb+Ne8Drc79pR/hD4nXcwktJ7NeEkn9DMg8l2fHcuCxbXGO4FEehmusnVzx9B + Xh4dgud+uBJW1wXM53fMm/eTlwvSwMUWL1SKzt+oq8DIIXxjm3hKUEaj+Bxy1Ny0gsz9pZrPK96g + Z8qV3ehWR/wR4gS2+5AwVVpXLsVaNeduiMnMl42hkhMJzbwfo+riR0PhV+YffunuX44xnmQmIyw2 + nGjGx476JjqpaGipSrn7rX76UgczsZbEZW9e/eEZg2sSRlxB4WxagI5UOWmZmTUf98dv4GTaPRWz + 5mOw81UJ4FM8VaKft7P/LXAD5Wa5Iyq6di7/8nSAqYeKnHar3uUbyxjgKHQVpZc3Nca19Clgdyws + 5lw2aD4PDFU06xNmyqjN6FlWw18/J9aOpqj/hs8C1gv2wKvT59hOxQk1QBfCaebndjVlykWU++ld + ME34vt2hKHMM/OI+mKEtFDSIn6UDnuFQurHUwOgOn0IFM4EDm3k1H46JnYPmLHLifDUtExG65Oh9 + sQ54KJJnNk1swEqfTznRjfwYdfQGACfYn0kyx3svbh5PZdbjRPMtHg2s+JbwdIUzRUpsuFO5e7zh + mywRFaPCQcILVoDez9j+c/7S7NEE6HsSfbyp0rwd1u05UPSDO9DK/Vbu+H5JDZr1I/O1qIv6TvPg + j/5XgrjIJtmv3pC1tCfOT28a1esM7Z26M2+J0fjEdS5vHvjNjNq88EHJjhQ5I33SSQ8qg4l9nv94 + Hl7Qbcm5j9wJ/EE/ET8+NdFPf8oefXvMdZYqWpXFVKD+tNfwatN9WuaWlg749dmwvXcdje9D2Nib + wpT3M2/MOZ/1B5SbCrBYa3suajslhR+vzKiXI36HpkYzX6T9/LyZNy2Uv39TAf/5r7/++j+/CYN3 + fb295sGA/jb2//7vUYF/p9f034Ig/puJfyYRaJcWt7//+a8hhL+/bf3+9v+3r5+3T/f3P39JCP7M + G/zd1336+h9f/Gt+3n/+6/8BAAD//wMAUfv/wOUgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9aae9deb86428-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:25 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '214' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 301bbf6b8f5f18d655731e55edb09ca9 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to read the instructions_1.txt file to determine the next command to + use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions + that I need to follow.\",\n \"plan\": \"- Use the read_file command to + read the instructions_1.txt file.\\n- Analyze the instructions to determine + the next command to use.\",\n \"criticism\": \"I need to ensure that + I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_1.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead + the file instructions_2.txt"}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}, {"role": + "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need + to read the instructions_2.txt file to determine the next command to use.\",\n \"reasoning\": + \"The instructions_2.txt file contains the instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file.\\n- Analyze + the instructions to determine the next command to use.\",\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_2.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file to determine + the next command to use.\",\n \"reasoning\": \"The instructions_3.txt + file contains the instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file.\\n- Analyze + the instructions to determine the next command to use.\",\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_3.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 2577}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6526' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6yTy27bMBBF9/0KYta0YVu1YmtXoAWSVRfpA0UVCAw1kRjzIZCj2oWhfy9ESVDs + oIsA2d4hDw+HnDOoEjKQtSBpGr24+fSs8Vf1o/ZSlH9uDz8PX77ep7vP97ep2AAH9/iMksYdS+lM + o5GUs8BBehSEJWTrdLe52SfbNOVgXIkaMqgaWiTL7YJa/+gWq2S1Bg5tEBVCdobGO9NQQe6ANkC2 + TvYJhxk+F/ZbDuRI6Dna7nYdB1k7JTFA9vsMBsME9k4jZCBCUIGEpV7TWULbX+GcW8YYy4Fq11Y1 + hRwyNoZjAU/UhzncMYtYMnLs6BUhoxoZiXAoVMmUJReDJ6WRuZaalpZ0IiZs2eeWhbql0h3tMgf+ + ku9RBGeVrYZDvtXIlA3kW9lfOxRJxETslGPJDL63R6OFHRQW7HsYqJFfkCsiTTpjIuYtBy/z3M7A + uHp81Avg/6ykV6SkCub6CdCG1vdQQeyOCROVlK3iMdJ5j5Jey71oSPQcujIheonI6E0miP577RQa + FIfJ56i0Zu379Wt+qPYNPRv0Oj795nHVq89shcHB/EL16oLCV9dzMBT6tTNilr7Yfz02m2T9cTKM + lqNsbjvoODwpq0JdDGMAGQRyDXBQtsQTZKvuofvwDwAA//8DAIII0sehBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9aaeba83c6428-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:36 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10520' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85990' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.672s + x-request-id: + - 40187af4864698ab7cafb098969e7421 + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''system'', ''content'': ''Command read_file returned: + Write the task_id into the file output.txt\\nShutdown''}, {''role'': ''assistant'', + ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I need to read + the instructions_3.txt file to determine the next command to use.\",\\n \"reasoning\": + \"The instructions_3.txt file contains the instructions that I need to follow.\",\\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file.\\\\n- Analyze + the instructions to determine the next command to use.\",\\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": + \"I will use the read_file command to read the instructions_3.txt file and determine + the next command to use.\"\\n },\\n \"command\": {\\n \"name\": + \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_3.txt\"\\n }\\n }\\n}''}, + {''role'': ''system'', ''content'': ''Command write_to_file returned: File written + to successfully.''}, {''role'': ''user'', ''content'': ''Determine which next + command to use, and respond using the format specified above:''}, {''role'': + ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I + need to write the task_id into the file output.txt and then shutdown.\",\\n \"reasoning\": + \"The instructions_3.txt file instructed me to write the task_id into the file + output.txt and then shutdown.\",\\n \"plan\": \"- Use the write_to_file + command to write the task_id into the file output.txt.\\\\n- Use the task_complete + command to shutdown.\",\\n \"criticism\": \"I need to ensure that I am + writing the correct task_id into the output.txt file and that I am shutting + down correctly.\",\\n \"speak\": \"I will use the write_to_file command + to write the task_id into the file output.txt and then use the task_complete + command to shutdown.\"\\n },\\n \"command\": {\\n \"name\": \"write_to_file\",\\n \"args\": + {\\n \"filename\": \"output.txt\",\\n \"text\": \"2314\"\\n }\\n }\\n}''}]"], + "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2194' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6Xc+yPrfn+fMp7tynzBMRkC7+Z7yrgK0KKkwmE1AuBEXkpQW6s7/7RK+dPdkn + JtiaSsv6vbH+419//vxtsiq/Dn//+fP3WfbD3//1+e6WDunff/7873/9+fPnz398P//HzLzO8tut + fBXf6d/B8nXLp7///BH/+5v/P+mfP3/nS7xmcR/tqhmFogr5at8Qa1G90Tw+dxGEk/qkC0U/ZfNg + dh5oSscYWfQ2HwONK1Df0YNhrTeqHrcbAdU2jGQTNzdr2nq2DXNyaElQRGo2aegyQnqsrmSdVMtu + FkMoYIn6ijn5jMJu47gP6G6vkrnHZol4izY2Ul7plRDfkbpJXj9SFE7Kk10Vj2f9eqIzOggyoVKa + U59mZhvA835c0IrmT/+xQlGAkkhwiRPnHhpJX7na0xYGYijNKxxHZaJadDi6TBfmIJylc97AIrZC + ojuDky2z7GwiL8cnPG5d1Wpjs9RhFLWIuOdZyIbxuQX1/RCAuCdy72Y3nG2kPrQlMdWmrab41c6w + sihl60f05jSXTjHQfhFQViJWjWHf7ZFadE+snpy6YjmdH9rPeXelktuc/HYxZSZyauzg7E6OvG9v + Won65/PF3Edz8efCCQpARdcQu59jPh/P0YgOZSGQHPWGP/7cQIBcFGJCHmi0eB0uNgg7D4Oef4jB + Z+W566EfzTPb2s0i5EJmeGqW7zD7SaeXXxyn1xlNVh+yH0mPEF8ivUVdRQ0soN6w5uRVb9A1QQYh + R0TR++fVebCs+pJkum4jcaGoOfBgkTGyn1w+4Zscg3C9FGwnE7Wb/Jh7MO/1hphZZVfiPju68Lqj + F7ObSLdGlF0j9am3N7K9VddMepxv/eq6qxkzXq7AZ64bMXS3Z4lH5F67Jve2EpRKcyKxmkvZfCzf + VJV+4ogFC3LpxmO73cDhrdzIpfaMrn3GC0kdYvOCX69ByOoglkqwMdbJ5vP7/ja9FJCuF4fYVWJ1 + fAqSEhZlbrLN3dE7TkO5gCPfVmQ3SE41rc7hFergTNhGdoyOS4UtwgZLlLk/Q9qNHcquEL5+JOKs + hphL21DdozPajlSopMbn4+FUI62iGvF9MnXTpa9FcPT1C89Ffvdr5+nb0O9eTyzsCELDECoz2su3 + gQqJ9OLTiytXILfaIV7nrDuaP+1UU+LuRpd6MyEe3d6mesO7iPjc8UN5TydRA/uUUNVIzv68LIx8 + kYCwZk6T+9WoEpKCnUslsfOG8fHUbs/w2S+2PecyopKzayC6/KyY+TCEsO8JbiAy72u2oZ6Tjf5E + BaTABVNRiXQkPThqIXw+QhIvyRtNxvr9AFS8G8qvTd/Nqa67Gk5LTle7CnVcLZmHQkMt6SBLTz4h + sylh2L0aKsvDwuIOXR0hm1Y6+eFOzMcf/WJqvXiuiSkaC4tFQawjtjFvbJejBg2uMotqPrYbYm6H + 3m+rQi9ACvsns/Sms6ZGOj7QxityhgHtOKdIP8KsmG86G1XVTRtpr8C68zQqy8QNxUH/mdFj36RU + vM6xP3/qWxUsNyfbTUT8diqMI2TPmlJhIb0rfpquG3QT4UR8RX+h8a4tTU3u6JsYxUxDapTNDKzQ + f5gVAFiD3VojevfnjODGW4WDI+17OJxlTIKTM2TDKB0FiNMqYngrvfy+WzdXcNc/W+Luknc2uixo + 4ed97Mhu9KSMrl4vETZemeOfwYm7aeiZgOqmvWM+zic+yU/zjAxlfWVEeemVlEk3DPMU9my7cJxQ + 9guzRkvlHX3qDSp+KvwNnJA/0GE/FdZ4bQ0Kz2l1Z/6Iqs/6dgtvRfeZZc6hP5r6qdQYRCO7uPkY + jnv9vEeVHTlkfzSk7Gmj6wggzVustnrV0fHpnEGExZ1sUKWG8/VFIzDVImD5jmRo3qyHCN4PACwX + TepPWXbWv/XF1l1C/X54NSXgzj9hrfCWXZ8xZ9T8Y/Em/kDCbsLnJFCVwtTw/DCErBv1oIbjWT4w + V2wuaHwQdw/XY1UyHE8VGkNNPIIWaQKGHVn6NDjsIwTi5YqVo2ehpcanGgTEDPoZtyYnu6qwGt8l + nrZVUY2Zt23B6pOZuPdq7n75LbirO+Z4zdEaJUIwXN77PUvqPO7GKXio4O3XKyqsSO7P3muIEXos + FLLRHNOXFk9sI357aViWSZ0NGvqZQRm7H7Jp81PV7aSLBPPtqf7+P/bC3EaxQBy8xNEdzYL0Y4Pn + b13cqvojHKegV1D3XolsPefHrvu53R/a55p4TpVX9HU+KUAOypkECyJXnTWFASwK40Sse6Jnoibt + z9qQIJUFZ28K6fkQq6svX0zRvOt4HexBQ3vDZbFFXj4jDgbUDK+Ejlwm1fwqdAqXgzoRt8hxx+Pb + GwOx/AuOO0fnvapJEez59kZMyM1q5NpSAAOynujMRbzJPUOC5WiKxPAau5KzII7VRWaXxJSr2ucX + OgfaB18ZXkjbSiz1H4zo9pARD+umz9eFmYJf+T5Vuiqpxjioas2di5TEpWdXPAv2sZbuHm+ylomb + TY8YrnAThRMheHKzMVfGWZuMQ/3ht7TiZ2cdwYH7BV5GURlyy+z30Oz1NclQcs1Y0ycmkh6XNbHG + ecm5eGt7WFquzkw04E6WDzdBffTQ/eqhcRkMLlp4V51ePutzuaSgknNQ03ttpHzM9FxUDbXYkr3Z + 6HxuC10BbbQIwaH0CvutflOgyJcL5i+c3mJKkKho15YjC/xEs7jWpx/+piXZ3ZzJb7rWu0I/6me2 + m3VszVBsJUT1BpHjxrgiWoa8hjgAg0RrOUUjIbsNyt6qgCe9eWT8OkWlKuBxRxK7+cm4sB094JvO + p3PUNGioFDgD37x9YsaGVo0aWdcrf+8AfuLI4F8+AjXrZ3ZdSCWaf0IUg1l5A/7wHZo6lEuQAKyZ + Pc+JNdrFqUavjWmSWHNKa5peDxfuy5fFSCZ1Ps91q9Xkrn+ztecV1kDa4AwTSU2q/JCmm9atL6Cx + 6w/MJdUhlO993Kof/qNXqnvZzA/HGD3EyGd7MjgdO/d7F3IRYtwdo7M1O9LVRsbGSais5Nzq2bpo + tI/+YjZP7ExaOz5WNzQgJAmdyuILCns4XW7AnNtwCceK4Q3sMM6J7+U3PknxMkabsizoeHJV1Bxj + xUPSTxqxbZDL1TxK+Rmt4HLAU9A4lvwoLBV+rpCw6z5aVezMpSM0UXRh6Su5IL4MFwoY8YCIAcbV + p92LuXDGATCSexmnwSGOkJnxHdUG6dnRu67nX33Fts9E7JhDp6OmRqeQ2UPVd/NHv8A221ofva9Z + w6osUu2r592umvmHXwDZ1XRihlXp/qzz1QiDvdjQlUwGnzVljbW5j1bkLM0Lztog8VTW9ZjhGgXd + CCiKoXTJk8ppk1TjFR0pHI+VQUyjsip+MgcM4/ASib2Sb9V8dD6acffoiX9x3pzqfJqBOA8bo4uz + RfINqyXUj+jANq5nWVNyWwgIbd41MYg8Wfyt2xjK6SgxQ5Z5OESe3QKeiyXbBM01GyO2i4HU8gar + tm6g8ZmFAeBqkpjRwBmNt+mlQjuFh0893ay5DFGNmr25Zp/9tiYc1DF8r7/8OCfn2wbtlw+PbXAe + +ZxoRa/ei+bATvmchaOtoUK1Lz8WLjcG8F99UO7biNl+5VXM5ssz3JrWYn7q0fDp3RYB2hRrzgIJ + 1d0843EPqu/uP+cN1nhAqfe7PyaGuJMffSyh3V3ZU3CcoJP3t0KE+7TnJHW9N+dHvAKo7vsVuSk6 + 7fh0K8vVcI1eePmUF5yVt/eM1Mo94kEjyB+LdqvCbIRv9j0PNup2DeMrNsjN9iJ/6tedBNU1ckny + iPbVBK9Hg2gQtSxwnL6bXY8cIXmrCmbvvkKfektRc96pzDnmXti/gkekiet0h+dX9QyHIzqVKB9q + hayj6O2PFkpSSA1FpbTri5BHoYQhzbbALC0xkBgxJ4YqOcp4ZXtuJZrMviIa6ynWAr0Px2ffY3hc + oy1J8mj0+bXwCxTmAWf2TkbZHEpXCa45OTBDgAsffHTy4LQ6ejgTo9ifrOApwZbi468+4/AeTbBx + oDPzZ9j54/Umjtr5rlJiXucpHKvgsdEcn5+pHhtn1Md63qL38pUzPW/MTF5hUQef84F4hW5lVKCK + iuSQNvRcz4v/wrcyOYrMfg5R2AfeRgGps23yfV750VlvkLd3Viz1ojlrBenHRTdbODKs6gf+0bMq + yHKcfPz/UA0kRhE6XX6AkHRaW6NPAkHdmQUj+KiTjqtaK4B8XbTEfMtnnx8dskEfPU2nfN7xz/6I + aOgXWzzOs99RMRQL2B+UhJhms66keIoFUApdI1uftPw7juKDKmIWI5KNZ7Ir0Ne/eadqkfEXRvYX + b8kFmqwbBm8H0N6eP5SLIGRfPwzKT5wST07iilfBfoTPfLowvTSbagUKVNdExKNoxGF3w3MBFskF + /JySNV9eHR20m/ZYsi13HhX1FCGFDZrKX78n1VNC1ea+aonDKoJaVTdGwOegYk7azGh+OAFVZ04T + spuIGs7puj6q0Fge2UxVak33drdXEacybXO95LOBLoDMvZMTY1mViDGtTZH8k14IOZGfbsoUJHzx + gXhSJFbjXs/3qpsWMYvtnPNP/YgQPuuQEIrGrP/6seX1ZBJfRVXH+8MpBURnG0s88a0ZK0INaSRs + sbiStWpM+6rRar0tvn4RjY6nl6i5o5at957eTaoUltrjet4yHyWQ9dOr6DXdKy3mTU7jU4mLD7Ro + uiWzRMg/58NdIJ0fM+ejbyZVygroooXJgo3edCPve4CfOwqocknGbMbnaw2Lx+VJkYn0bn5LJwG5 + udhgwfUuWf/xU7B18YHp5dzyMUHHEq73453KZvTiH7zFMCv6mzl5ZHb09aoVGA+HihidfOweW2aL + q04394zYU231RWsokG2PV7Z9O4+QT+GCgiJqjBgbWIZTdU4j5En4wrD4EesXz8jBXd+2bB1FW0sW + toqHNN0IyM5Er5Avp5MI1/v+jkU7rzN6mPYPrRaBU3gleTezsgogjxuXRSv51s176drA+a5QLN2S + 1p/usXiEs/O44nklS3z68XYYejGq6XLOfT5esmOuff2hHs9tNbxQ3iLxHs54Pg09Gi/KmEJdmAZz + aSNnDXrd9/A2Qp8Fdwdbv/5/emgXcjGjNVoG63YDV+0xE8qcpc/Hkj4QckeL/GjS4M+pvrHhslUZ + Mcv50I0Vcz20XKd7vOh1saIjVTYQ+H6A5d4z/d/8xO08FcsDcX1pmtIjfPOHmTZvNJzXZQ+OP52p + CtHS769BN0MmLEdykTwzbDUHm+h5epGPHiis6f3cPuB0qhNm32Sw2AqDjhxJemGhRzvrmyfCyrd3 + zHLzko/f57E+hCbzTVRUkyCFVLudapGYACjjfaZjeAXnI8Gov/vcdtwUsvsxZ5tLsg/76CamMClm + TbtznlfyUSsUbVHPR4Kb6dEN76dRg14WDgl09LDo6fUEMKKs+8035b5PMCS3uiZmCkk4Mm05wlS8 + 18w7OW63TNpA+eLpx0+UPo2meIblqItsJ6LW767ms0ZholbMCCudL6mzVdCzB0q2Sr7ik3dOTJj2 + +pN2GTmG37wV7eqgYNdI32aSWnYjfOd7SzJ2PdykK4BPFbbRqhTxtI+PMCQrlSIv1ziLQyFF9X31 + IGscbarxGlSz2lfUZjtfsjpxx3b6b56wDvI955/7Q7tc1pmXRtzvF4p6RY4kvohjDCfOAG11mIu3 + x6y7nPhsgbYYxfWypkpWxXzO40UOQ6xfcFEDD9mtj3OghXklVgNa9UrLIQdXX7eYB3D1x8DTVfh5 + QMZuHUnQvH49A9BY3NNF60HF1f4gaDuzZJhblYs+/kUFu0+Wn/xkzUW/1cdfvt1kTmFxBW1E1L9X + MvngWUXn8zkHdggL4uU56ibRwxHwaHGjT0fOsznX9aNmoc3E0pDQbig9J10dzkvMdke0seYs5AEy + BDEhThtZmeRIVxfFK2WJlY1XdfIDzwp8/CStpfkH9aUyCigwi54+19WuU0wWXKG5CjJehY7lyzcu + NZDt6p74HeFWT4Oefv0Klv3hiqhdFqDpZ+lEp9fg+UsuZd4KWPr+6EUrXK6DWkeR77+w8Ca3cHAI + ATi+lYKc1Zz9+uVvfohhozfVLBxSASXe/cJ22IvCueFIUvv74cKCPXqg/tWa+1+/Z62G2ZrvZXnV + lMz1sPzJR59PKY3VvfwzsCAhJ851vNK/+Ewsq6o53z7dFupVaLGNmp+zaf/U99qC2w+Cn9KmGknf + uWBkm4ZsJe/tz/TFjujU3n3mnOSFVX/0L0wQScypKmZNpmdREOqZ4E8eWPEm000N8V5m3t1ZZ+Mx + Hj0YrX5P/A26++MSRTrsD2ry69eGMhzxN1/AHz9Xtd+8fb2+BYSUiHSsCgUB8lxekd+8f81nTz1t + lTc1KFwsMdC4Cl+8ZfbkWiMjWELl7WXjlaAb4cTQxfyuT6e6ca0li5emalRe+8krs25eYiWHGC8f + xFlU+47vbk2DQgkv6V12fzr20WPf+yP4k9cOp4DOYFo8IMa5eYazO+1N7SGefbLNiF998pQIsggC + vFpVGh+Vc+wiiDREcEKu3SSHU6R98ju6MBH2R6dvSwVjfMcIe7tqhsKQ4A5CSSWL6OFcFXqpqXvL + YvjHETNpMYWm9hqbmulnP0fjeqpHtMDjnjl2dO/mXThftfmhpcy8zodwdLJDAQvuPhguJ9v/1WdR + W22o1npL/iYxOkOk1ReM7mRGwyilAJ/zw2rgtdl4bbc9SM37Qps6Gvw+zdIGiLW9sK2i360x1s/N + Fz+Is62oNdavxkNlBA/i1A3v3u9QqWFfFog4h0Hqvv7tVx9vjxHLpknRXLWSdt0nTw6qSTQbDwQ0 + GMxSZ8mn3/3Vs80Ti2/5yj985iE/mzosOpWXDfB6tMAj7cY8iE4Wlw83gOpYqcRgVRnytN8fwZak + B+0WTmBxjKcZvn7cps2+Wiqv/qoGfBqJP+uvbHxn2UePjTYJJOR2jHBNRX5btuSb106rsolRl4RH + vFj3dzSn8eKIPvXItm4uW8MmngVN7/iajj1w3vrl+4yqfXMheiVjPluKYK5eQXQkWEctGqRYS9F7 + r3vssveKaq5edQ+ffA0n1zz+4MVaADlz18S9RrU1yE8vgh+8S9naJw+fS1o7g1tNGXHMOUFiG/QF + hNujwUytqr/5nYB8Caf09clPxnv/MOEy1AW9lPmBz1Ox3YMQMv2Xj+c0lvfok88Qt0q8sChQqoBR + rDO8WjjPcK6mQwkfvsGj2RRobNbFXiuKFrONRdaWDNM5AOMsxkRvZj8U4SbkINQjYec0x9bYE9z+ + 3t+q9Tbhxw/ZUNc7kThasqlmT5FSVfXtPdEliKxp5pOpJXj5Yp/8xR/T7Nhqm3C6syD2JjSE67LU + hM79+eSFrjW8HKMAWU4Tyj/4OBSO4WknV04pspJVyFqku/AOBJUQhwi8nczqrAo+FfCi6qtwOgZ1 + Ddu3YpLLOZ8s+nDsHi3CwWJrfwA+PKU0haWtVcw4yFP3+3x99BedCiOtBjXWHuAdi5oEdZR+88dU + k7jrEjtuWDf9vLoNBInik+tWKrLZiDUBId8NsTjmTfXNcyGe9ifmJ4nacVI+FPjgB9vZnuhPB2eT + gnI4CMT4qe5dn8RjAR/8J36d/2Tz8XydwW6cJ3GGIbZ4HOwxIuzWMIMPLeKFOcyw9sqM4dlbdbwo + fApceWN8vzZP1FvtJgZHCFxGTE8NZ/31EGFbrKXvfqH5Hi8a9PMQMiw/ojdi7q04auWm/fB5pXC2 + 1b3iF+9vO2cMWVJYkRZwPhJ873VLNqXLEWq9KTCykiTrz9OrRJuQ36kqRWL31UNIsrWaebHnW0O6 + blW0eixmPOLG6OZ7LDea7hUWcXE+ZHx/ayT0eR9D1SXZV8O2r21g1wX51os/f/1zCcKTrO0m7vi1 + sAoA3doy75bI3ZD3LwwvOJ/Y5pbs+TyZXYTKPrLwPM9z1nBCdNgI0kS8UX9YkxprNWwKh5PtrBfV + PJ/zK3qfdxrbZZKdLf3W3iBdvsp4sZ9q/nBKmYKom5RZYfXqHh89i97ROWXWJrfC37zsiwc7mMxQ + Or0GAT75DruZaJ19358hT5Bk+n3fPCnKMtf+frsC/vNff/78n2+HQd3c8uenMWDIp+Hf/90q8O/0 + lv5bFKV/M+m3E4H2aZH//ee/mhD+vrumfg//d2ge+av/+8+flYh++w3+Ds2QPv/HwL8+6/3nv/4f + AAAA//8DAPp9nSnlIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9ab3e6de06428-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:39 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '195' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 6c805826f30e21e10011f6f0db0b02c6 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to read the instructions_1.txt file to determine the next command to + use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions + that I need to follow.\",\n \"plan\": \"- Use the read_file command to + read the instructions_1.txt file.\\n- Analyze the instructions to determine + the next command to use.\",\n \"criticism\": \"I need to ensure that + I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_1.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead + the file instructions_2.txt"}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}, {"role": + "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need + to read the instructions_2.txt file to determine the next command to use.\",\n \"reasoning\": + \"The instructions_2.txt file contains the instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file.\\n- Analyze + the instructions to determine the next command to use.\",\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_2.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file to determine + the next command to use.\",\n \"reasoning\": \"The instructions_3.txt + file contains the instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file.\\n- Analyze + the instructions to determine the next command to use.\",\n \"criticism\": + \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": + \"I will use the read_file command to read the instructions_3.txt file and determine + the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to write the task_id into the file output.txt and then shutdown.\",\n \"reasoning\": + \"The instructions_3.txt file instructed me to write the task_id into the file + output.txt and then shutdown.\",\n \"plan\": \"- Use the write_to_file + command to write the task_id into the file output.txt.\\n- Use the task_complete + command to shutdown.\",\n \"criticism\": \"I need to ensure that I am + writing the correct task_id into the output.txt file and that I am shutting + down correctly.\",\n \"speak\": \"I will use the write_to_file command + to write the task_id into the file output.txt and then use the task_complete + command to shutdown.\"\n },\n \"command\": {\n \"name\": \"write_to_file\",\n \"args\": + {\n \"filename\": \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 2331}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7669' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5SSP2/bMBDF936Kw82y4VRwHGsL2g4eOrUdiqowaOosMaaOKnmMHRj67oX+xY6D + Dh31SP3eu3s8oykwQ10p0XVjZ6vHJ+vDJ7f8oquH01d9OMT0+Rv/NH94/RkTdLsn0jL+MdeubiyJ + cYwJak9KqMDs7v7h42qdLlfrBGtXkMUMy0Zm6Xw5k+h3brZIF3eYYAyqJMzO2HhXN7IVdyAOHSBd + JHiBXw7SdYLiRNmLtLpftwnqyhlNAbNfZ6wpTGDvLGGGKgQTRLF0MR0LcTfCOWcAgBylcrGsJOSY + wSiOB3SSTsxxA5V6JghRawphH619gaM3IsQgFYGocNiaAgyL6wUXpYkyl5PA3liawwaYqABxEKoo + hTsysDvOc0yuHT2p4NhwOdh+v0JXKsCOiF99/+UFiotO9wTKE7CDffTdNxgO4qPudhq6JHtn7fsM + jVU82M/gR6DLfGMjBNrVdW9yGeYWor0Ro02op/1N0xOH6DuoEtiAqnuCGC6h34l23pMW+3ILDA2p + wwQ7Gmsh/ke2gdQmU+XjrXeNs6ppMHlDvcmifHn7WK7rGwCP1r5deP+C+gYnbPGarE83hsy5xTbB + vWETqu2AxAyDuAYTNFzQCbNF+7v98BcAAP//AwDI0C/ivgMAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf9ab403fa96428-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 18:39:47 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '8206' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85989' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.674s + x-request-id: + - 62946a2415fd4014bc6556763cc6f992 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/challenges/memory/test_memory_challenge_a.py b/tests/integration/challenges/memory/test_memory_challenge_a.py new file mode 100644 index 00000000..16f8a6b7 --- /dev/null +++ b/tests/integration/challenges/memory/test_memory_challenge_a.py @@ -0,0 +1,79 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.file_operations import read_file, write_to_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import get_level_to_run +from tests.utils import requires_api_key + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_memory_challenge_a( + memory_management_agent: Agent, user_selected_level: int +) -> None: + """ + The agent reads a file containing a task_id. Then, it reads a series of other files. + After reading 'n' files, the agent must write the task_id into a new file. + + Args: + memory_management_agent (Agent) + user_selected_level (int) + """ + + level_currently_beaten = 3 + max_level = 3 + num_files = get_level_to_run(user_selected_level, level_currently_beaten, max_level) + + task_id = "2314" + create_instructions_files(memory_management_agent, num_files, task_id) + + try: + run_interaction_loop(memory_management_agent, 40) + # catch system exit exceptions + except SystemExit: + file_path = str(memory_management_agent.workspace.get_path("output.txt")) + content = read_file(file_path) + assert task_id in content, f"Expected the file to contain {task_id}" + + +def create_instructions_files( + memory_management_agent: Agent, + num_files: int, + task_id: str, + base_filename: str = "instructions_", +) -> None: + """ + Creates a series of instructions files for the memory challenge. + Args: + memory_management_agent (Agent) + num_files (int) + task_id (str) + base_filename (str, optional) + """ + for i in range(1, num_files + 1): + content = generate_content(i, task_id, base_filename, num_files) + file_name = f"{base_filename}{i}.txt" + file_path = str(memory_management_agent.workspace.get_path(file_name)) + write_to_file(file_path, content) + + +def generate_content( + index: int, task_id: str, base_filename: str, num_files: int +) -> str: + """ + Args: + index: int + task_id: str + base_filename: str + num_files: int + + Returns: str + """ + if index == 1: + return ( + f"This task_id is {task_id}\nRead the file {base_filename}{index + 1}.txt" + ) + if index != num_files: + return f"Read the file {base_filename}{index + 1}.txt" + return "Write the task_id into the file output.txt\nShutdown" diff --git a/tests/integration/challenges/utils.py b/tests/integration/challenges/utils.py new file mode 100644 index 00000000..05f85ae7 --- /dev/null +++ b/tests/integration/challenges/utils.py @@ -0,0 +1,34 @@ +from typing import Optional + +import pytest + + +def get_level_to_run( + user_selected_level: Optional[int], + level_currently_beaten: Optional[int], + max_level: int, +) -> int: + """ + Determines the appropriate level to run for a challenge, based on user-selected level, level currently beaten, and maximum level. + + Args: + user_selected_level (int | None): The level selected by the user. If not provided, the level currently beaten is used. + level_currently_beaten (int | None): The highest level beaten so far. If not provided, the test will be skipped. + max_level (int): The maximum level allowed for the challenge. + + Returns: + int: The level to run for the challenge. + + Raises: + ValueError: If the user-selected level is greater than the maximum level allowed. + """ + if user_selected_level is None: + if level_currently_beaten is None: + pytest.skip( + "No one has beaten any levels so we cannot run the test in our pipeline" + ) + # by default we run the level currently beaten. + return level_currently_beaten + if user_selected_level > max_level: + raise ValueError(f"This challenge was not designed to go beyond {max_level}") + return user_selected_level diff --git a/tests/integration/goal_oriented/__init__.py b/tests/integration/goal_oriented/__init__.py new file mode 100644 index 00000000..e69de29b From 6997bb0bdd586c93959aa9b53e21f19d30968d3a Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 29 Apr 2023 17:44:21 -0700 Subject: [PATCH 198/222] memory challenge B (#3550) Signed-off-by: Merwane Hamadi --- .../test_memory_challenge_b.yaml | 775 ++++++++++++++++++ .../memory/test_memory_challenge_a.py | 7 +- .../memory/test_memory_challenge_b.py | 81 ++ tests/integration/challenges/utils.py | 10 + 4 files changed, 870 insertions(+), 3 deletions(-) create mode 100644 tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml create mode 100644 tests/integration/challenges/memory/test_memory_challenge_b.py diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml new file mode 100644 index 00000000..fd82ce2e --- /dev/null +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml @@ -0,0 +1,775 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 3251}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3383' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7SSTY/aQAyG7/0Vls8BQYGFzW37cUBaqVW1aqUuFTKJk0yZj3TGKUGI/14NIWwL + 0t56fe3x+7weH1DlmGJWkWSm1oP5g3v4Nv/MLX98elzO+P2nr+/axw+/SL6X95ig2/zkTM4vhpkz + tWZRzmKCmWcSzjEd3y3eLsaj6WKaoHE5a0yxrGUwGc4G0viNG4wmozEm2AQqGdMD1t6ZWtbitmwD + pvPpXYIvsy/6eDZPUJyQvkj3o8kxwaxyKuOA6fMBDYd+rHeaMUUKQQUhKxHSWWEbAxxWFgBghVK5 + pqwkrDCFs3gucCtRXOESQuUanUMQ8gKbPXimXNkSpGJQNohvsoga1uOhtAKF0gziIGdhb5Rl2FUk + QF1TrAhtGSy3Mlxh8rerZwrOKlt21k+vGMQwpOK8Ks7KYnK/B2UL5w3F9n8ZurZWIAjX4dq41mQ7 + zwF8YcpfzdaEPn3cxPqMYwzZ/Hpu5pWoTAXT79Iy5xGMbWh8pCKBJWTkuWi03kPhtHa7G3tQ9qSd + vMjmYJ1Azr8VCUPhnYlVc+0eaqZt77xTWv+nP+w8j0l/Vedl3ByVJcMdzmVxV8Tky+tb7Aqx9+X5 + LXEPcQI586zsEY8JFsqqUK2728IUg7gaE1Q25xbT0fHH8c0fAAAA//8DACiYLFgKBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfb47f48975968c-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 23:21:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '7598' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '85995' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.67s + x-request-id: + - 7763b6b11a061097597b024900075aaf + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": + {\\n \"text\": \"I should start by reading the instructions_1.txt file + to determine what actions to take next.\",\\n \"reasoning\": \"The instructions_1.txt + file contains the necessary information to determine the next steps.\",\\n \"plan\": + \"- Read the instructions_1.txt file using the read_file command.\",\\n \"criticism\": + \"I need to ensure that I carefully follow the instructions in the file and + not deviate from them.\",\\n \"speak\": \"I will start by reading the + instructions_1.txt file to determine what actions to take next.\"\\n },\\n \"command\": + {\\n \"name\": \"read_file\",\\n \"args\": {\\n \"filename\": + \"instructions_1.txt\"\\n }\\n }\\n}''}, {''role'': ''user'', ''content'': + ''Determine which next command to use, and respond using the format specified + above:''}, {''role'': ''system'', ''content'': ''Command read_file returned: + nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThis + task_id is 1111\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\\n''}]"], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3212' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RXyc6rvJad36c4OtOUFCCAN/8MAgFC5ySQhNQISELoQmsDvi9f+r4rVakmHngv + WfbeWo3//a8/f/62afnKpr///PlbF+P0979+9p7JlPz9589//+vPnz9//v27/j/kq0lfz2fxzX/h + v8Xi+3wtf//5w/3vzv+B/vnzV3oPlO6TiA/H0Zt6Ge7PDjuxVaVsyMQKKlV40cPZWtFy47IVzRYt + qNGWWrlo8XNE/L260ft9e0QtdyMq+pyQEYjDhy/n4yqpEIXQY+vSAGI73uJgU3wXbGx4x13VTAcg + V9gQwUxNd0G7bgZ7Ky10r+ktYm9HElF6qPZYNctiYIsRi0jRKkpfJ6dC7FRQGe27eI/xdzyl85q0 + HgSi2xChZxvWuYbggYgTjM3lkZfjzGsbZVIiwFg58iVZXvtCiZWPRb3gXejLnrQA6zCO+M5zqssF + fX6Td0+3I7KOH2ge1JgD80RlfFjBRguTBA/UVfBwYPJfxFqri9H7sEIw8udKZ+V4lcG43SOqf60D + W61jnoOPTwa1vcsnnC9ZmKCANx7Y6bKPPnMdsxV5jXfBIg53tKrvvQkJ2j6DrWZ+9bHAgoPO9+uL + mo6rpPOmy2flp44DRzXCZSoqAMN5GPgR5qM7NWlPQJWUI9aEibI53LAMevyeAt5DSUhn4lfgfulA + 1Uph5aLZ5og4e9hQ56zoiCXvewy4bmTqnZ69O28f5xXeqz9Rl3aYsa2pNmBHcMDv95wP06B6MmwN + g8eJla7prLViD7/nYaxuhkk5yiKQDwqoHRjzsDTfbYESpDyDReU0tnt/ryN8fDOi1n3bsbWbawB9 + rxo4NpCX7rI67KH5yjx1Y+fChLCdDbg5eY716WyGy5pEF7Qgz8X3QKiHRby4jSx48pH6tdenrLqI + HsSfSsTPq1SXBPyPA7sJucH2HtOhfaJiA/FztbERYxUtXB7nUjyyGrs+J7FSC74OBJ1zpEF5PqGF + XDMDjNs1wjaTmnKyaAOwF9M32aZVq490dTk43jybei+2R2vRDSO8JlvGYVJmIf9ejAuipXsObg/S + Iya0TJVvSNewpj1gWPBxE6NHtxUDaUWSTnR/q4LyzDfY5ZqU0SXnCFi0PVP/UFdsYY5/gh/+4WAz + PhHdV2uiFDKziRRNOVrQ7jPLulBdMTbOeslF2tIqq5i8gm1Seu6sxZ9+a1zOVcD31Y7R5KYSZRrr + BO91IdJnKXje4CR3I7V2yQ2tLZfbyjN1r9QxWktnMG5tsF4ypZowYUZO+lkGmHk/EENhixZ+rG/A + s7DGmge9uzRuOEJmcocf/lhoIstOhvhBYnIdNx933oERAMTYJdvTY2CzV40muDEJqBNbRrgM4WUP + /lbTcRTuaEnas7FXbKUxsdWf13LUDlEO+Qu96PG1+7jLc7UjwEKUBduLm+qk3sgi0hV6IbtCfpaL + cdYM6H17T/V48fVZXa0bvIxCClbCBQObz90snm5WGjAxXsulfOQ9PBJIKc6ncViRHG5gNtorjgNj + LodffmBtdmmyu8Ulc4NslqXF5ojSbKSB+nktKGph9tjbJG3Y7YqkAUdD92Cec81djhdsIBKYr2DF + myz9FvPJkKlIDtjHtNb78BBuQHQqhXpudPjRx5MIucudsU716zAnUisDWdsZu0g12HS5zCs0jD8Q + pQyKcFLbcfy9TyBqM2YLkzYeSp+vD9WOy1SuJFNfSv2aL9Tc46lc5CprUXrOL9RLvk64fpq6gm9+ + FqjzlFi5TjxxZOHLC2Qu5w1aZ3IuAH9KJ8iD9yVdjM/JVDxlUWlitF99cWwxlu9xdsUxGS86675u + BMulcvBePLrD4u2mC8hSyWHzmPT6cvIgB0+9T1QP23ZYE8A23PrwTeYwe7mrPGgqfMZdSJ07GgYS + ifoeROoGWIPdnI6cvLeVXWdJhH9ldCC3frVBX4sau8t3h8j6sXvgblFIQ/NdpXN8VznYRtydXnbK + Ba1He3yBN+mvAKhVDgupBxOl8YCJgunB5c947cEYRJsGERJ1dlo3MgRZdifrtdzrjM1WD+ajAuyQ + x4jWxsUzagaoyRzzQ8iEw02GooEHvSkfG02HedijpjyUVO8OE1oPXzX+nQ+2Tpw/CL58z0Cp01eg + sKJ0+/Wj9kqxEUS8DzxfXzQkCRB5hvMf/I+fGPCjZySX+08614chhrcQPgNp+96Xu4bcAZadk1Lj + vb0zRgecwft+UrFp+Hy5mCdfRUK0dlj9bPBAtcPphWRNvmFHpAe0m8/dCj/6RbUME7ZYoWujldUe + dupTO6wcViqY8s6lF8mxyjETKw68dkvI96qa+ux/+wC1ZuNh29uBS3pzuSCojikOEunNpuV+s2Hq + Cj/oWaOFsxRcbwDSiPHJCSXW77U+QAnH5diPxGMptGdDlQg/KVi/sdpdw/hjKH6PF4oXzQ+55Oat + SBxNDfsB6dylndgKjw1ziHS8DYzoRn5TuEO14GfjN+kk1qdRiYXEwV5HbgMzOD+C3/cZL78tsyOO + ZOVgFA49GFvntz8ZnPONHij3gWOzDXiEw2S7+Cl8vjqJ7IuBjt63xE7vc+FqqI0NrgUj9TQUD9yv + f74kbqTa2d4PvFxFPeRxoVPTq81hjSpFRtiwhKBxPSkkyqSbMmK9/DOPmq3BqZ+R3eoXwqQHGSbx + 4laQVMaG2rdMK+em9iNUPO8cNUc1LOdwg16A3oITIG08l7T76hEk4T0jVRdFbPyoZQD1hdPx9ejq + Oj2ZniEbh++K90l0TZdffdnMFzPYXZdHyk7FV0aD9gxxcnwk4fTIFhupNychO7kO07FXkh7uVVIH + kvHI9Vk7ZAXaSXxKuOp+1JdeMQnSH2JJU3GNfvJd7il7FwjGQvBB7K4uhSKOhvYfvfrxHweV90zG + 2bMJ0jW7XF5I0RoakO0koYnfri/lM/IhNbqIY2TetAUkI6jBL3/7Vaky0Gb1jbUMB0ywr8oNITHz + iPDT7znLNBXZuXilVzjXbO3z10YOy+xI3VkwyiWqjxy43j6j5vJQf/Lvoir6E0IabCzd3a2nmkDQ + 2UecPu7fcPFSLQOLvTW6x49PyVgzg8Iy38cWC17hrH+5BtIlPJPMv/glc9xbj56DvKOB1aT60k5o + heJ55UhnPFSdP4+3F/Bua9LsC199jjSpRWDEGX3k/NFdcyZk8OMn2C0/1sAk5JmgldFEJMHkh86+ + aRfYQHClDrwVt90enRV9O0ckLEg5l0nIMJXl0jjULT/fYdbirgVFCZWADaLkDlb2IcqJFZSgJC7D + ucgPNmxj2SJ8zYxyBb+z4YdPgUjGizu3Vv1SAjW38Sl4r+G4rZsKMpTcgt5oLZf4ftWC91I1GuhC + yVhduA5KTheMLb79lpQO+AWngvnY8vmu/PW/33qwnpyrPpdCnMD46mp6hNPN3fHHhwmRvU+o37pB + yIruvaLbgQaBfHzI4Y/fmkgyiE69bVqFc+au+//kuUVnh4FdRGTDoxEH7LOi1Mef/ASyUldkeZ4c + tMpWJcNp17zp8fRpQzY0+IRGt4nJzJ+5kBxjlsEqxi8a5e5pWD8CNDvjawhkhrZLl7BwCfrxP5r2 + Y6WPnOw46BJ9FiJ6O9B7CcV78M4Wppj3azRyin1B63vO6HUmYjjKVV7B6XCX6JHC1V0kYyXy7/8p + s2xfZ8Ucm+hHT/A+uvkDh6JHJV+vwUR427HcHfKfAeqliuFEmjW2c4g8QyO2d/zm/QNaVXo20KNT + RNJ21ExJX14NRM3/AQAA//9UmlvPgr62r+/np5j535IZAZGWdcdJQMCWg8dkZUUUQRARsC00Wd99 + B96dnexrYyAtY4zf87TP51K/wRQSvwOPs/smkqk31TA8pwnu4ouM8z0D/dz/CATo6OBkXPUB/dhV + tPQ/spo47Nkxf+TQ02oHe84v6/n+8RnUZxVGNIo7xOkr5FBzG46RaByFdEix+YI9CWwcXM+fbLJy + x4T3hB2pzuwVJ8cPNKFgyhO17JgDJqq+B+e8h061tLYmTb9E0M78ZiD2Z52SdJsJS97D+1KP//I6 + KOBhorqfbgBT4yyH7+tKp27K45SHcaKq8/5ga49WfbtbbWVQD+WAXTO5B2NpHW8QiqpGquJ3B1Nx + TlpNSZ4PHKw0VvEjMi+asR8VbJNyl/GHrXUAe8Aja4XXwegftyEc5dsGm+Oqt0h1T2T4OwyACL5e + p2Mh+jdYPwpMr0LuVNM6XnWqLFbtH7+PZmCLUGrenJ6fvpNyZiQTHOjVpTPfcD5VqgxIxyJ63zlH + aywiEoG0qU7YEV4159G3CGH/MV3y1eIkILI5EDjzMDbOjp+xQvvpyt74MDJu30drRN39pi7z1v/e + jaBhWXgBhctK7Pz2niW39dWHgilOKNtZTT/q7UDAzFvUKTu1GuxbP8Dz6JzxfjNYvdRxpqu1FiO8 + s696wKVra6vKSmFEGkEI2Cm2G1huyi3eUShZX22nqmDoz2fqq6UUjM9CSjab38Em7d+8OwgHFVje + FRt9Y1XSllW6puTXgZTHzZH3QVmq2uit3tgoMjdb5/W5Af0LnrE+53HW7b8DsFfDmoaOGwe8Gh6K + 2pK8x05adyk3piBSqxbd//aLalVxh67QE8RKgVasKHcI9mRno9Vgl9U3j18HiIrUxSgOdM7kZ3uD + zoQEIla+mLKr3LGlHuixndpqtMuLvdFIUmAP2VHP9Amf1NC+Pgg8fF/9dE9uOdyNOwnjnXkC46ne + T/Cc9tbMDzBlua9BOL3WHRqm9Q0wP8lMcCVWSzSPGRnd+lGnEWTn1CQiqgi9oAiGR3+FxoslZ3yT + FwlcafcLPVbfOhiVxGqgdVWrhe+rwbN0W4sjVNK9/4r68ZwKDNoqeNPwfP+Bpf9AFowJ3tGgBNMH + bRXo2WFFNnECqqEoDaTFd59R4xY7nGVD9FfPZDxtUcq5nhMQt1uduvij8L7f9XfYQOLiv7z1OagR + oMBgZP30naw9M1IAp3zciboi34qpRphD7+6aaNPwiI9X+SSCi1a5RFrZY0CtD3vB00pOaWj423R9 + Tf0GKMDN0fUQr6rpCAwPepHP0YbCo9XvcCRqVx9tqPG59QETHLuFDyuRqX/19LT2whODn416Q+GU + 6IHohfmkGsx8oplXLHpE/g0ODnfIhf6IxeZ5C53ukCMQml3FAJNu4JC5H7wfzg6Qm+w0wU3/WVE7 + tu1sssVrBLTyTKghgqJieRbCP76420cDiMD2a3g63Rku2/M64GatXqDUfDjegs2h500MPZW36gpj + JfwCjlzdh/uzXyPxexDBjyhKCJ8VipA07/fMJ8riM7CZlTsgzvujXfZmg05fA/cLXwO+83UyVmjq + p0f5u0Ep/6X4PPczFhouBCSXdWr4wsuaQiFu4X7bF9QBlc4HwKQLlMc8Q+v3wQHTazMRMLSJj4Tb + 4W2NZTpclvWkS75d+F97l1KB0SPdBTLsQgGU7VghsNJY//ugq7zwC43w4HASr54dmH0YYWJ0Sddn + 9rpry3zy2VkEv+ehyCG0b3e0bp9WSposZ7CXjfrve5SH7/UEw1YjeHvvivRX7eIWfi7Zici/vReI + VnBhUPRvFrbCb81/tP0iOE7HHTWyxOR0fMgN7Np1TP135FXibVOo4PhzQ0JfT8DHyXV9CDJTIWw4 + eNW48OQ8r+juK+wz0X+3HlC30EHKAY0Bi05PDzyewofaGap7rtO9D6ZPB4k656fp1E2+IvBjit2l + /9f0YC48jcD7iXu544MNkxdysM7TIR1/kuqAWvZjas75iZpGHqlbSe/J4gf4WMABKH6j0YWnaF4/ + azUsxZFa9eaY8lY8IJhtG5OM4vOZ9Y5qNDCTnQq7A1ardzesCNTKI0HS9WkBfn4cT3Bbpzu6hfEW + TG199cBNUSLqdcW1Gk1SCFrqyy4SgolXQ9o3J8h+yQmN3dutFp8JHr2yxotfZZuyu0Npr41IrdYg + o4+LOcHUF118ozcPSHXLazB/D0T2rijgwcjUxbfOfkap6mOZNnDmT/Kd650o73ACf/P5bIbBH3+d + zmFCpNlPSdCBDBbuVM79Iw7G+khygIJBosG8vsrwVBlUzgmd3z/tRXosdG3hU3/moemD9iqwhdUR + jUX2ydqr/GKayx/G3//FxitfqvxZy9SdvlYlGZfjANuy3CDYgd76ORYLtdRhZ3zJL2PGpGvtQwos + hlaXY1YNJbt7oIuECdHJ/Gav0xoJYLqtfLq7dkP/TZFcw48m+ESxXk9rfEN90NY3+44epr+qhg+I + GmjQBFLz+XtlXf28QOivsY89IJ3SaZuCBC58Cd8vIWhP2voC27X9o/pddzlP2sFf/NTM8/dgFMKN + rvr+dKXe2MtW9zqdashFCVPbIUE6vvsNgeV08ZBc2xXn4DEgSJqhoJ5Lzn3LG8mET3LTyHbj7DK+ + wxcRrpTeoTs1Qda4GX8vOPtIJNrFjc8+UIft2vnNPrfgv7NfONo8j7A++yJyuX5sMPMFepdZWslP + e/uCnpekZHOInxW/v7MWzPkYKe/qxzu7NhPYsd9IxtVW52vp2joq05s3DU0o9h/nq9aqvSJrvDeb + Anxg8TqBh3/l1PR1sR/RbXuBRvIO/vIM639Bt8x7PPsd/ve9K3SHsLe2LWstyZa6mf0sUbK9yKdW + 13R15mkElEnMeCjsB3jfa+bCv/3PymNhWS/siJe6Gh1LQcu8RZOHtumfP+12TrLwL5B22ynR9ivL + mnl9kw0W2A1QU4WEbmV3mw2Tm9rayvPedHtIcDpWb+0OEC1ruutWu4zXz725EdLmQP3COgTD+eCT + JS/SbXN5ZqPqMlFbS7wnH7/bB1N1v4nL+QS1NREFPO27CM48P8/bd8rWOL7A7H02EBAYtH7f/WPa + NEp3xv7HPIP2Vex9qK1MExtzf2GGtM3h8Q0xziJkBfLsv2CuqXfq5ZcxpbMvhYeiu6NnVrdBw4Ti + tdQLmQbDSvvk+DTBahhkxOzjZA2nOibQON8e5Hse7Ipv/UsLZ3+OkUJkiytB6sMbTft5Pd8pU+M0 + B59f2FIr+YSVlE7JAFNjtcXeEe8Dhs4ZglEjamj1GVj2Xa1ae/N9DgQfpl8JuBm/I2g4LcDhY2oA + /9hVAk8rZ6T48K7T0a0SX1PL/kiEwCjS/qTxAwzt7IHWv+hUjRnMHDDzOJruySOlh8BO4AhQQO34 + +7S+1waLsCzbhox3jMCwKbscznkB796G0bP94WhD8fGMSDr71HGpj6ZyKyJLuzNYk0wsFh+yzGcg + SvdXAuZ6ImP3/lSTwIwQXp96QKQ5nw4JbQu42+YNDphc96NxORKwjvENSYNgBHN+G6DYBSYBynRI + 2e0bq+C7/iAiRL4NeJ7pivZM7gSJytpMxYe7EqG+y1vsW/jKxezqq3Dh3YP6Hatpv687OB42Euln + vz3N+wvbB939+YAxJ8gDZH03sXte7fh6CL8N1Lbtk55LD2XfvipNDSh5SGefV9F4ZyLNvHQFNl9D + k47DKfLgtthf0RpMLmfzeaB2VvYHagmnW/o7fwIV+km3wcaqAz1RvFZQa13MiVK9t1yav19YaynC + tq9a/Hf+WAq82EFN3q5A0paiD4F7nNjUDPbfYEyR0MBI7Qfymf0ZW/rpG+Q2krWy5Vy6FrM/5zoO + 7DUJln4Ar7x+4FR7uwHXqjYHc//G+iewrTVyPQ9+izwma47ybGLkWoB2ijO65J0pfgEZgJ7d/s63 + iDVmHogsHWGHBxiMk6bcYHc0X3ifqGU1Fbdogu/jJqKevzkEw3HsGqglp5ii2+EdcOf0y5f9IjNZ + p+N5fVOgQF8VETagqxq79iMIVp8dDkKz62c/LYL6KakYX0/nfvbhE+R3vMd7/8Wq7xgYByiqSo2z + Y0AA8YNXrQmO+sK2/NhYvDHe/h9fb57tIZv7x23hE+wu6zfzFFDzxkWjCPRqIsGYq0t/tXF/Digw + vIt2KNo7Ag9X6HnS1j44PJUNEtaaWTEla0Ko6+8Gb/s3B2zxGcvz9/FvH4x36ocAQkOlOIjcTKTo + MwDZc9azn22q0VfK01IPBObc5N/NcTfAQyp02OdNmU5rKZtgbEcxdi+BWYnG9vCCcrZpaDC/z6i8 + LwMQJ+OF6rkehiUvdpuG0207ef1wcUYZMsmT6XaeX5xbBxEK+WjgLWEs5cPmfoPz+Qp90FPXcxhl + B5Aa2pYGQzdYbJ6n2j/LrYD//de///3fyw2Dpn3k7/liwC8ff//5f1cF/nN73P4jivJ/qPx3E4EM + tyL/57/+7yWEf75923x///Nr6/wz/PNf/5ZUqPxdOPjn1/5u7///l3/NT/zff/0fAAAA//8DAHN3 + aKTnIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfb48352f8d968c-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 23:21:34 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '102' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - fecf22b60b7b162df4e8f245a8b6eb90 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file to determine what actions + to take next.\",\n \"reasoning\": \"The instructions_1.txt file contains + the necessary information to determine the next steps.\",\n \"plan\": + \"- Read the instructions_1.txt file using the read_file command.\",\n \"criticism\": + \"I need to ensure that I carefully follow the instructions in the file and + not deviate from them.\",\n \"speak\": \"I will start by reading the + instructions_1.txt file to determine what actions to take next.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThis + task_id is 1111\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 1597}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6500' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA8STTYvbQAyG7/0VQmfbxMl2d+NbKBS2UGgp20tdwmSs2FPbGjMjN4GQ/178tfmC + 7R4KPVp6pXleSzqgyTBBXSjRdVOFDyu7yj98rK1bbj5/KYvtQj6pvP7+dcvfHjBAu/lFWsaKSNu6 + qUiMZQxQO1JCGSbx/eP8MZ7dLe8CrG1GFSaYNxIuovehtG5jw9liFmOArVc5YXLAxtm6kbXYkthj + Ml8s4wBPzc8SswDFiqpOoft5fAxQF9Zo8pj8OGBNfmrsbEWYoPLeeFEsHaZlIe4sHFIGAEhRCtvm + hfgUExiDY4L20gVTfAImykAsqKYhzkAKAlG+XJs+2n3aVppWItkLbE1FoAYZQ+vppB9tEWhb173E + guHMaCWdSsmLFIyHSR2lGJyTOVLesuF8wFv1UIbzt3DtTFWBqiq7g5o6UUnUgDilS7DbLnPexQ9Q + hfpNsCFiyInJdZOO4NlfPHljrX/p1pxunSOWv5psKsWDvxBWb/7t7QvUMKq12HWfGKmiNOUQnv/9 + TLQzYrTx9fXKEPvWjY2eXlkgbZ0jfbE9fcW0P1N+KtsNy/XqDK4hfUOqnACHTfhfCz1gHYPpDMf6 + mytkVdNAfDnQK2fK5dcXPCQ67anHydRF/fXBx3EcpylPlD3pCJzyEY8Bbg0bX6yHS8QEvdgGAzSc + 0R6T2fHn8d0fAAAA//8DAHrFs+FeBQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfb48364828968c-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 23:21:45 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '11098' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86907' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.062s + x-request-id: + - b8fa55724277bee4e9099b5c3f76d269 + status: + code: 200 + message: OK +- request: + body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command + to use, and respond using the format specified above:''}, {''role'': ''system'', + ''content'': ''Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThis + task_id is 1111\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\\n''}, {''role'': ''system'', ''content'': + ''Command append_to_file returned: Text appended successfully.''}, {''role'': + ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I + should start by reading the instructions_1.txt file to determine what actions + to take next.\",\\n \"reasoning\": \"The instructions_1.txt file contains + the necessary information to determine the next steps.\",\\n \"plan\": + \"- Read the instructions_1.txt file using the read_file command.\",\\n \"criticism\": + \"I need to ensure that I carefully follow the instructions in the file and + not deviate from them.\",\\n \"speak\": \"I will start by reading the + instructions_1.txt file to determine what actions to take next.\"\\n },\\n \"command\": + {\\n \"name\": \"read_file\",\\n \"args\": {\\n \"filename\": + \"instructions_1.txt\"\\n }\\n }\\n}''}, {''role'': ''assistant'', + ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I need to append + the task_id to the output.txt file and then use the task_complete command to + indicate that the task is complete.\",\\n \"reasoning\": \"Appending + the task_id to the output.txt file will allow me to keep track of all the task_ids + that have been generated. Using the task_complete command will indicate that + the current task is complete.\",\\n \"plan\": \"- Append the task_id + to the output.txt file using the append_to_file command.\\\\n- Use the task_complete + command to indicate that the task is complete.\",\\n \"criticism\": \"I + need to ensure that I append the task_id to the correct file and that I use + the correct task_id when using the task_complete command.\",\\n \"speak\": + \"I will append the task_id to the output.txt file and then use the task_complete + command to indicate that the task is complete.\"\\n },\\n \"command\": + {\\n \"name\": \"append_to_file\",\\n \"args\": {\\n \"filename\": + \"output.txt\",\\n \"text\": \"1111\\\\n\"\\n }\\n }\\n}''}]"], + "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4447' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R62Q6zOprtfT3F1r7NKQUIYFN3BAghDDaBjK3WERBCgBDCYBvc6nc/In+pj/om + EoOCwcvftwb/1z/++uvvNq3ybPz7X3/9/S6H8e//s5x7JGPy97/++o9//PXXX3/91+/3f92ZN2n+ + eJSf4nf772L5eeTT3//6S/ifM///pn/99fdX7b/UTV+namKVmMBwvFwwpp/e473s6PD4zk7UO7Tv + dBrMDkGv3RvUZqjlNMm5A1ertKTuiiheXVXPXL6vXxUagdZzxr7Agoec3LH1AgDMu96yoDztU2zv + jizl9+E7wPsjcghDguaNt5cLQRG9VWrQwzdlGDsWgKnq490Gh4DJ3czAjYQyjavynTJ9ndjAtOiH + jPhUgRkESQbz/qMTxbPugBDllcEYr3SMk8z2ZqKcJK2M4AFjvX/HA92FkfaqDZMGqZxU0xWmGSRb + w8CHZ3LopYPHGsj7VYJWp60LyGn0cnXlmxzbSuenc00kHwj55YD9L6fepHV3XS3FoMaIPsdqep32 + OVyP15Z6q+02/X5WkQDh27KoB0bGp3CcJFVCD4Z9fwv4qHXWTetMtSfdtzp502aWI/g71mzieD3M + Hx3YWo+B2mut81h20gQ4PE4PfHM1xKfdSj6B6/o04+SMLoATzhItj/IYG8dbW/Htt2xh0B45Ep/T + yIc8/97g0Dw9etjDpuKvZ4NAqMgjTS521dN8ujJ4klYqxR/jBeZoQrlK1qs90mzSmnPoV5265r6H + 869P+fDYpyq8OUWHr+tABcxu1x3cDFdIDXLMqsSSGx2+VV+jCdc7b27kF1Ne1UpBqmVZsWS8JgZ3 + uXenZtl75vwVIgfal8sLm2+ep4IUvyOVH5qeGv36aIrnWSjV0ZEtHESPQzW97NmFH01+4VyAFeBM + MwgUD1JDD0PzSvnQ+QJ8KeMeZ1pkVXOvPSXwrMgKATHd9cNwSCOQCBcf+ynTvOGt1hB+xs0VB80k + pOM2v5VQ1ZSBunKapjwWHQhTmKyxaT4Es94j5wa1EW0pTqORl7egP8EfnoN9TQHBrcOgfWcuvlnf + vJIexamBbmFgUk7pCfDp9W6AujJ07HYJSaeCKj68bdIGCXw19kPp5zWEqhBhTGiedgsewSRHEuEk + 8Sv2sW411O/GHS947tuzfUJaKKXqn/U1rG0pAcj9lti2UqeSti6TtWs5ZwTa75vHh+6Yr50DBPTA + iRCP53ewgk2oXzCGvOQL3hEk+kqmjrv/eEx6YR/WsynR3Ts4Vqyq2KD96oH9lG+ABEOYQDVtjwgM + wiZle6Qn8NrsJ+y86k3PvqklQO1kuGgjzgfAZ/tL4OV4f1CjuovprL/hDPozKohWfV8mH7GzgpN8 + 0qlTE8pnS24MeJj0NQ4hGDn1G9vWzGMSYee2SsHQjKYOza5Y08MsDz3pGuZDgXUU7+v3k5N8FxKo + uJeMCKTsTO43yAJtGB5p8N3VfJb2Ugk3z+mNWCa/eh64Yg30FX2gZm1TzvL8lcDtHj4Rz25jyj9X + tALJLp6xeSxsb6qNswrj7BnQbZM8qnnfwAyUk4rJ+hCinlWTbWiUqAHei/OXD5fHl8CZXTvClbiJ + B8OpM2iUd4PI7uoVPz/XwofClnwxRv2z6uKL0UBvNz8oVvM1b8bTrMNSjAi2vPO750N2iWC7t+/Y + e9lOPGjaYCj47d+xa1lWOp+frxJ8N0cX40r+Vlw0ghy0c6vTi5W21VSFR1trxLmm+8ChMUu8iwqG + pjwj6BsknqTgU8Ms8m9kRXNgzpunZUD2kGJE0oMGpqR7qfB9VW8EzvcOsGD3cjWiQ5kem0Rb8FLb + IF7xGdv8pvP5Kj0YJNbthd3bNetH+X0gcP0Fa2xM6YmztfLSNdastxQL/qmfDNPR4XQ4sd/3SacV + dgrouqFOEzkbQb2zThK8OMEbm2Zu93OQn31te9wFqJgNwSMGLU5wPd5mbJad3I8YxDNcWwhQ+/LY + g6lTpBpavkxotPt+PC41ggoZ21Hsct31OMzPHbh18h0bZQk8vvEUA0aq09JdfT1wYRxXNnwklzNp + h9zkzIpuBghP7hMJ9+vb46fPjgFLTj3qzccxnvqj5cJlfuhTCPYev1bGDEXaCBRpr9Fb3t+ASOlm + rB/uvifWRlnASjV8ctStuKcH2Y/g6EcVNnbfvTmIUF5Bu1RlVCtpBWj0IAJw4Kkjq/zU9jw1c/0P + fpVCTPgECtUC9ey6S73CvLW/oax1D6vEjqicOOcyt2DwPOywNx+DuDReowOKXgB4+3476WbtDzak + srWj9+DWxcyPPRv86q13vjaAuYfBge/X1qZHlSaAjGUKwTJ/aFkP1dI/DVBEBOE9MZ2UYQRyIIpX + kxTK7JhsJzU+pOevj63+Xsdj/C0RhOnGwPsz6U2yLjwCuOhGeMe0XbWBISDg/F652GaBlM4vvhM0 + F1Yp3cdpYIrWqidqYyYi3ovTNZ0f0dzBXEYvIo/x3Zz1Zpto3m6McaKrvjnvHo8CztUUY2yvKm8Z + XwYPhmtif+X4Me1ic6Ud33eX6te0j2mSNwUMlU2FGBIe5nxYHSD0ppOF0wdk5pDnGVO2VilhQ5I7 + zk2UODDcPwRqmJeDJ575wYWzJV+JnGdWKiz4gct6pUu/6+cXDyT18Jk01IRuZ47NvUpUW/5A7HFn + X3Ex1Ftodg8RB1pSmu2lD0NNqo8Haniw5qP83hKwPbpXut8mRcymdKPCjaw9kGhFcvV9f44Qolit + CSyufTXwd5XDcrAkfOxl26SBKzawfBZPnEiusaw/vYCkcykSpKPXz7HTdiBoTxrOgFSAqTYeKnDd + SEdiCWyPSwKztVp8PpE6jEM8K5Ilq9J2VZO5rIKKuw89gRIqVJr5ZgHYWR5r2JrvButOVadTfwxn + bcEDRf5R4kwk2gwomRT8nKxXSvS3MEN6Hi70MKZSP74/x5Wmg8Qhat4PfJBeewQTZ34TNnl3MPLj + 2leX74FWAjS58FDeERRlWpLVaRP1zCOFD+53fqbWeqReK9y2JUg2/YsUy/xOQZ3I8IdXttSXPrgx + puFn7lK7b32wuaxQqzyDFcXuGX/jaQKuDPUSHakRlgVf+FILE/L4/voXnzfPsIOPi1iS4rhPvVHr + /EQRL/meBnMh8olHpgR3nWQhZQ8Ec8atM8Mdq2N6kkLFHNq2mcEmDu80bBvWjzcyyJBYyQu74B2n + zARuDlW0boimJYY3uSa3oUPHA7UcN/XGxt/mkPcwocaUfVK2vdiJJnzKLTXCF0lZpzsQ2rS44eBE + z+bSX5FW2peWwANl8bwXKv3PelJf4s4kBep1iIp0IjD2m3QUJB5pa37Y4lOL/ZgZjmIBRZRNpKwa + 0NMvOWWwnh2X7u7PsmKfo1rCj/dosZ5Uq2qUr6n74wP4sgsffMq2OVL7l7Qlbee9OTPHfQS79Vkj + 8m2NUmE42B28PoyAWt6Jm0O4vZfwte1O6JZndczS8jmrX+Jm1O1SnTPVcC7gfD996X6NgpR9U1+A + 2ndE2Bf6S9omq0MBdSy86J4/PcDpLY5g2+hP+sxobfIsYQ48rlqPWjuZVmzhk7/3o9dOLfuJrqMC + hsV3QB9itunEn+oN7I+yQ1Mwhnz+bmkJ1l9lTRf9xAdw+6zg0r9ooN1pP5p3IYJTT57UdExmDtVk + 67BXQU3R40HSia6TEtycZ4F33qMy+WGz6dRso2RozkIMpPtsu3DQv5zA/cMCUjSllqJineHHQdbj + DdIKX7s/Qgef+LrwptcJZ6CqfYEISjLFw1o53WBCSk4UaTuns+SYLtTG3Ui3xLPMHgwWArLhP3HG + w7fHquqaQ/hqU3y5PD58ZJV4U0sxJHTfqUYvhs9VB4/mNqVBMzdgsitUAwM0CrW9t14RgYm1trZ8 + gA1n9nlbO/YKduv8iN3b/dUzdcsJ3AxnSDajj0xylR4zDJ4+pmZV7tJFz9WQR6yiunCxqo0kXATY + zrWElNf4qXgzDzZc1jNe8OHNwXBLVD+QExqkxgzmR0QM+H09Zmop95z/+BHcf9rLHz1IbfK+wGdx + PGN7vS/T6anZJVz5W06U1Qd5bGObJ23bYRWblrLh0wkdB+2Z1CFGp81c0VT8JLDJmmEZ7zvla3+w + oK9kW3yKFRjTH75M/h3JmkxVzDXzOMNMeHLqrC8uZ6/Wuai6n8jLeIZ0uhzEDP707TVwaPrGoLgB + fvH9Ra/fTc7gyga/7xOjS8Cnpd+ABa/41rk85isxL8DMbgJRJMMCk3mHESTrDceYsBDMvjKfwOWY + fwjYn/cx2WpuKC98BzW7qOR9vk4gCNIxw1tF3VUbEyXujx9g/DG2YHo0uw5sxNahVsnbiiR17UNX + QTZGAjSBuJnlEEz994zdffAF445pDohX00wqxwy9SSWZAeP3gDFWn1E8M4P6ai7MMt32su1J+i2R + 1ItHbggUCFTdJAZIid9vQHrL33JR7+dMc+TNhijnVWPShb//6hXGaaynEs3DFWjDg4axvXf46DxD + Hzyrr0Cdfjv2Pz9BRmjvYbv/qPEsVEUI+021w/4L7fi8GdQaBLZ+w6i9tObsssKAC7/+wx++hugz + tUpte9FjbTUaBk9ga34aJG7CKmYxtVtIN+uJbq+Vw3ksCCe4s7QS49PBiQdVOBIIcfAin3WQgEn8 + 3i8wUvc5Akv94Kv2gCAqih12XAhj9jRDWbPvo44+wfrbzygkHejCu0wRTrJ0Ug4T0pb+SY2yLjym + CsdBewREIPJlCFOe5KQEaVCYNLrDZzpj8WJDJTRf9DDUWczGcWXBQlh/abD4Fax5YgF+X7VNnQAn + KQ9crYFlJK9JnT3llCQ1E6BdigQJJW/7/iCHquYJpKILX+SMFLgE2a7ZoLXW3sxJJUUHs4NeUF04 + z+aodUddC4tBwvbrBKs65HkIfvXXX/RJdz42J1XH05VaIZP4ZCa6qpkfp8C7gJKKZWc3V736e6fO + 9uWlXDMDF8T3bU1R9N6b0nNT2mB5Hv6jN9/xOKijo1rIF4YW/PFrFjxRIzuWKVNwGcK0iiJqGeSa + MuVxE+C8zQjG7YN7LKZppCLpsF74oGbWbUtmGElfkazppzen5p7PMM/XT9L//KGTfimhaqQh9nNr + 7Od8dyPQMy4CDYDmgcn8lALcM25Rvdh+YuriwIf1cJrwkRzW3uyyVofNfAK4PF4lj9vkfYISqi18 + fT2GntVO68Pd53rEFnld+0Hu5hlq++uT5H50iKeH4AhwI9YrrENYpIxtvi3cH6UV+vXTzWw/bMi2 + iCNm4xj0vazrUPgUWyLen2XPtvWhgWEx6zjLrbH6it/jBd7v0pNaKO9jFmzmApZtH9CF/4K2Poul + Gqlui+S7rgN6VMIG+HH3RUPTfeM5qoYCemnZUy9CL3P6sDrSQDzjxZ8wgHi05hnatLxhv/leACv9 + HsGN/SEU6eU57WHeRUD4ZJsfvk3RyVcQ7jrBwlviHvuxP95mkEfXNXpb0a0iK4BruImjO7W8867a + /PhueC1Xi78XVKNh6jocHUGly3EvXcw1g76bTdh42Jt+WJ2GFexf84YAf/LM6XJlHfQDNaHoxy+l + V5FBTo8Ub++iEP/6Dfx6bKT+V3hVfO3XNtzywqZnZnup9ONT3nSx8EE+G4BnieyCHV/Nf/joJCFH + goZuxiRWXb1n9+aTqI/qYWLbX6/Tlu7MVuE0MSlqz+deAkEgQHjLK3woRJUP1RS16qKn/q1/m7us + q8ZZ6PH+UkecqVtqqZshD+l2iq/xN8PH5OdHUT2p8opFrpzD1TsSFz6tcJJexA46lybD3kq69JPe + j9LPLybN64w99sPbhRgadVyYpXOY7Gp4O5IS7fqCxvPWfXYw30kzobga0nbxH6BVkj3eSfeDyeqz + Vir7o7DCZn9sOIfhx4KTHEp4Jw8fr3vHbwIZr0t88SrQz8Lk+tDsyjXR9LRLh8vjRTRHVrY0N4cg + ZWmpGKD7dgpR5/W1oqdPlIC6XUV44YNACLe+CrSwupIwk7JemNADAgN0JsKPWkm/fhAZ0AK8Qx8Z + 4B6Qt+bDTCg11PLP4aff9B9/wFuVBTHP2sgCQDI7NE/ZPm2vxtHXeK+EdFdaHhAHkTfq4v/9wYvk + 9xsHFIenj9ZH8crbKTs6Wn4IU3waOy3lYLhJP32CxIBAc6RTv1Iyod4jZodN2kbaQQKHPmJ02xJi + jkl3DqHZ1QfszDSreD49GVz0Mfo8XVINzRNLwHRQQnXTRmYb8k6G6Bqb2J/vHeeXVRzBXoWE4tM6 + 9Oby3csQjVZKt5vPp+o2w7iCvnv/YP/0ufbTwvehe5GuFKdHEA+/fOCj3snvfZf/szu4+G9492Qg + nmqjK39+PEW29K5G4lMVlMZGJHIxPlKmMcOCgW3cqPN6u+bELoWtecaxw1ukBeZkfi4+HHSiLXrr + EE/NvU/g7vgx8WF4I06fl6IEVLZ35C3dLtWPf4IrUA5EifvW5NdScjcvhDr0CQ4v/t2rJ/uXZxCW + ydtq83m7DrDvs0v323sApvhb+qBo8wzvZUD7ATS9q+Y7YSarpb9ywisGcfsi+E9+cmHDH78PB1NH + zHnxS7XdJ0nxgVObCylLoPpbzwz3m5itTlMDbuRkYJzR2psMg7DfeKh9jXE/e/vTBcCVP2Hran1M + OmSlCkdHUpHEbwVnxzcl0AmuCfWESgZS78W2tvBx6h8oS0cQ3xa98t7Sg85fZo/o1GqXrs0Xf3eq + +Nk/yHDhU3RnbPYpk+XXCUr1KaZbIS89sh7nCGa7ekOkUb2lrKqeGTxM1uH3ft78jtUTPL83EtmM + gd6P+ODeoJS6KdnIAFfTmucDwG0jI+W87/nU58RW63kr4aW/pWOagQv85S2oYoP36+dwl/vVv/G/ + +Mdg3Ew6dS2rTrurVCZwmR+iBNcrGBM5zDXdzw00aknpDR4pEGy+KCDZ/mFx/gi2DdyxJiavpb5R + hbUZfL/scPHTPubkPG8InrPRIRvpa5nd0l/VRDj5VF/qHf+SlwVvZ3ODsRB8vPmWBhGExnDGSLCu + S17CIoCkMMbXNRrTefHff3kCdobcXPyx2v2TL3hzmHCeHI9IKycOiMhsLx4WvQkuGyT+/Mzqj99s + lLmPD+e9B56pKOhqLtCfniqr+aVTARprvUTrxb+nugUQPDmGgLdV5MbTXm2Hnx9HFnyAyZWOtsYD + /YRRY1x7Kbc2Gdx2R4vun97W24T2XINncXkiAeVePIdJhCC6BgpRh0Hho5cACDOh2f/yr366wsKG + /mW9Qvw6H+PvIIIasuTWkXDxS9hDmVQoaHhHCnOX9nx6zb52b24Skepc4Kx20hq6sH7jYH45sSB3 + 6gzfaCjoY3618bx7dC7M+vOOrBd+PoHYK/7w7Z10kyoqhk4LClHnOPiCoZoOx28HD4al0Pw4Jik/ + bNYdtAP3+6uvYCoVs9ScyuWLXnlX36OlMrD4eUSJRD1mmsY6baVqCg2+VhTT3bXO4RDOe6wj4eER + RbqXYCN2DiEneWdK3j67QOueAmwaHz/9TsCQf/ihgZ66aX8fvgQE9SqnZn+0weJ/qfDk3WOy+voU + kJOq6rDbn054u2lEc/r5V9dsbWJ96T8LHxwg++gezlL28Jb8Mgf+IBVo44pTzH76fMl3iXpGEmCo + 8yBQhu+W2k+lT6fNzCJYKZqB9x/mpmPxknSAYjHBexmWHp/tF/nlPWhy+i3nWVKVsHtsb1TP5G3/ + 82ehG59r+stz/+QFaZDSpV5uOb8pax2eJMDIvKFvk6XllcGp7894G+13qdjccwYuTkixmexwyl6t + ftF++tsf7NEcnPlSw/dVvuHz7hAB3m/4TfvlBe6h2PPN5224UMfSC/mLfzH9+q8W1i+8uz8ePbdh + UcKt1ehkAFLB20WfqKbjJ798pBqXvFZb/DNsJb6a/vSSWrfAIZ1lrGOSXo3wV+9+eOh/8w/Mx6rF + 6Eh8MC9+ljp/vHmpv7Sff3rxe1JSig6HquLrJOnAjWsaERa+SwPP9f/oZ397EKpJOSgIgmEzY4NE + 1GT6OrLhgr8/33fzuRYInLz8QYbYb2Iynt4JNLunSPfrQOWjcqgbsLGJQV0xOlfTPd4zEL+/O7QW + fKFi+wDMMHRXH7rVzD6d7LMSqhO1VtTLeRfz1LwY2t+/XQH//Y+//vrP3w6Dpn3k72VjwJhP4z// + Z6vAP5NH8k9BkP5JpT87EciQFPnf//r3JoS/v33bfMf/O7Z1/hn+/tdfogb+veHg77Edk/f/vvKP + 5Yn//Y//BwAA//8DADqWffrnIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfb48903cf4968c-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 23:21:49 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '191' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 82f9958865f62293d4b4b8fa59c8ef2c + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file to determine what actions + to take next.\",\n \"reasoning\": \"The instructions_1.txt file contains + the necessary information to determine the next steps.\",\n \"plan\": + \"- Read the instructions_1.txt file using the read_file command.\",\n \"criticism\": + \"I need to ensure that I carefully follow the instructions in the file and + not deviate from them.\",\n \"speak\": \"I will start by reading the + instructions_1.txt file to determine what actions to take next.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThis + task_id is 1111\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to append the task_id to the output.txt file and then use the task_complete + command to indicate that the task is complete.\",\n \"reasoning\": \"Appending + the task_id to the output.txt file will allow me to keep track of all the task_ids + that have been generated. Using the task_complete command will indicate that + the current task is complete.\",\n \"plan\": \"- Append the task_id to + the output.txt file using the append_to_file command.\\n- Use the task_complete + command to indicate that the task is complete.\",\n \"criticism\": \"I + need to ensure that I append the task_id to the correct file and that I use + the correct task_id when using the task_complete command.\",\n \"speak\": + \"I will append the task_id to the output.txt file and then use the task_complete + command to indicate that the task is complete.\"\n },\n \"command\": {\n \"name\": + \"append_to_file\",\n \"args\": {\n \"filename\": \"output.txt\",\n \"text\": + \"1111\\n\"\n }\n }\n}"}, {"role": "system", "content": "Command append_to_file + returned: Text appended successfully."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 1317}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7831' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6ySQY/aMBCF7/0VozkbFLKCprmhHipUqVXVoh6aChl7SFwS27InBYry3ytIgF22 + 2l72as+8782bOaLRmKOqJKvG16O3czf/vV9+SD6X6fzP4cv3j2n29aCXpir1exTo1r9I8dAxVq7x + NbFxFgWqQJJJYz6ZZWk2SabJO4GN01RjjqXn0cN4OuI2rN0oeUgmKLCNsiTMj+iDazyv2G3JRszT + 2SwVeBO/fkymmUB2LOtbbZYmnUBVOaMoYv7jiA3Fi3BwNWGOMkYTWVo+2XSWyZ5GOBYWAKBArlxb + VhwLzGF4HD5oz6fHAj+5HXAlGbgiYBm3K6OhkhHWRBak92Q1aWB3LnAt+5bHvGfYmJoELEBJC22k + W/8wHoFyTSPtuddYbZRkeooCE+FSPS5QPHYYSEZnjS17m8tobPkCY2fq+h8U1YZAlv9L87W0PWgE + y9cfRgXDRpnY9IwFWOozJRvbMAgtrjEqFwIpvq5jV9Ep4xcTuEdGT3J7wZ3TeZ0l9YxOXE5s6H92 + YVY21OOf8O5cylDeH+fj9fcC304uLgIaYqsUxbhp6/pwNXQ2NXgrbIedwI2xJlarXglzjOw8CjRW + 0x7zpPvZvfkLAAD//wMA1GRhPyUEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfb48929ebc968c-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 29 Apr 2023 23:21:57 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '7934' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86890' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.072s + x-request-id: + - 38b1938d8fa82ba256f64bb39662db49 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/challenges/memory/test_memory_challenge_a.py b/tests/integration/challenges/memory/test_memory_challenge_a.py index 16f8a6b7..fe18773d 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_a.py +++ b/tests/integration/challenges/memory/test_memory_challenge_a.py @@ -6,6 +6,9 @@ from tests.integration.agent_utils import run_interaction_loop from tests.integration.challenges.utils import get_level_to_run from tests.utils import requires_api_key +LEVEL_CURRENTLY_BEATEN = 3 +MAX_LEVEL = 5 + @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") @@ -21,9 +24,7 @@ def test_memory_challenge_a( user_selected_level (int) """ - level_currently_beaten = 3 - max_level = 3 - num_files = get_level_to_run(user_selected_level, level_currently_beaten, max_level) + num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL) task_id = "2314" create_instructions_files(memory_management_agent, num_files, task_id) diff --git a/tests/integration/challenges/memory/test_memory_challenge_b.py b/tests/integration/challenges/memory/test_memory_challenge_b.py new file mode 100644 index 00000000..3ab54054 --- /dev/null +++ b/tests/integration/challenges/memory/test_memory_challenge_b.py @@ -0,0 +1,81 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.file_operations import read_file, write_to_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import generate_noise, get_level_to_run +from tests.utils import requires_api_key + +LEVEL_CURRENTLY_BEATEN = 1 +MAX_LEVEL = 5 +NOISE = 1000 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_memory_challenge_b( + memory_management_agent: Agent, user_selected_level: int +) -> None: + """ + The agent reads a series of files, each containing a task_id and noise. After reading 'n' files, + the agent must write all the task_ids into a new file, filtering out the noise. + + Args: + memory_management_agent (Agent) + user_selected_level (int) + """ + + current_level = get_level_to_run( + user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL + ) + task_ids = [str(i * 1111) for i in range(1, current_level + 1)] + create_instructions_files(memory_management_agent, current_level, task_ids) + + try: + run_interaction_loop(memory_management_agent, 40) + except SystemExit: + file_path = str(memory_management_agent.workspace.get_path("output.txt")) + content = read_file(file_path) + for task_id in task_ids: + assert task_id in content, f"Expected the file to contain {task_id}" + + +def create_instructions_files( + memory_management_agent: Agent, + level: int, + task_ids: list, + base_filename: str = "instructions_", +) -> None: + """ + Creates a series of instructions files for the memory challenge. + Args: + level: + memory_management_agent (Agent) + num_files (int) + task_ids (list) + base_filename (str, optional) + """ + for i in range(1, level + 1): + content = generate_content(i, task_ids, base_filename, level) + file_name = f"{base_filename}{i}.txt" + file_path = str(memory_management_agent.workspace.get_path(file_name)) + write_to_file(file_path, content) + + +def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str: + """ + Args: + index: int + task_ids: list + base_filename: str + num_files: int + + Returns: str + """ + task_id = task_ids[index - 1] + noise = generate_noise(NOISE) + if index != level: + if level == 1: + return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command." + return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command." + return f"{noise}\nThis task_id is {task_id}\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n" diff --git a/tests/integration/challenges/utils.py b/tests/integration/challenges/utils.py index 05f85ae7..0c97402c 100644 --- a/tests/integration/challenges/utils.py +++ b/tests/integration/challenges/utils.py @@ -1,3 +1,4 @@ +import random from typing import Optional import pytest @@ -32,3 +33,12 @@ def get_level_to_run( if user_selected_level > max_level: raise ValueError(f"This challenge was not designed to go beyond {max_level}") return user_selected_level + + +def generate_noise(noise_size) -> str: + return "".join( + random.choices( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + k=noise_size, + ) + ) From 06ae4684c8692cc941eb379a34934c4b0b3a595e Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sun, 30 Apr 2023 05:40:57 +0100 Subject: [PATCH 199/222] replace 50+ occurrences of print() with logger (#3056) Co-authored-by: James Collins Co-authored-by: Luke Kyohere Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> Co-authored-by: Media <12145726+rihp@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- autogpt/agent/agent.py | 32 ++++++---------------- autogpt/app.py | 3 ++- autogpt/commands/execute_code.py | 23 ++++++++++------ autogpt/commands/file_operations.py | 11 ++++---- autogpt/commands/image_gen.py | 9 ++++--- autogpt/commands/web_playwright.py | 4 ++- autogpt/json_utils/json_fix_general.py | 13 ++++----- autogpt/json_utils/utilities.py | 5 ++-- autogpt/llm/chat.py | 7 +++-- autogpt/llm/llm_utils.py | 23 +++++++--------- autogpt/logs.py | 23 ++++++++++++---- autogpt/memory/__init__.py | 13 ++++----- autogpt/memory/redismem.py | 4 +-- autogpt/memory/weaviate.py | 5 ++-- autogpt/plugins.py | 37 +++++++++++++------------- autogpt/processing/text.py | 11 ++++---- autogpt/setup.py | 4 +-- autogpt/speech/eleven_labs.py | 6 +++-- autogpt/utils.py | 22 +++++---------- data_ingestion.py | 18 +++++++------ 20 files changed, 134 insertions(+), 139 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index a049b9d2..af3b8205 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -8,7 +8,7 @@ from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_messag from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner -from autogpt.utils import clean_input, send_chat_message_to_user +from autogpt.utils import clean_input from autogpt.workspace import Workspace @@ -83,11 +83,7 @@ class Agent: logger.typewriter_log( "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}" ) - send_chat_message_to_user( - f"Continuous Limit Reached: \n {cfg.continuous_limit}" - ) break - send_chat_message_to_user("Thinking... \n") # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( @@ -117,7 +113,6 @@ class Agent: if cfg.speak_mode: say_text(f"I want to execute {command_name}") - send_chat_message_to_user("Thinking... \n") arguments = self._resolve_pathlike_command_args(arguments) except Exception as e: @@ -128,24 +123,19 @@ class Agent: # Get key press: Prompt the user to press enter to continue or escape # to exit self.user_input = "" - send_chat_message_to_user( - "NEXT ACTION: \n " + f"COMMAND = {command_name} \n " - f"ARGUMENTS = {arguments}" - ) logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} " f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", ) - print( + + logger.info( "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands" "'n' to exit program, or enter feedback for " - f"{self.ai_name}...", - flush=True, + f"{self.ai_name}..." ) while True: - console_input = "" if cfg.chat_messages_enabled: console_input = clean_input("Waiting for your response...") else: @@ -176,7 +166,7 @@ class Agent: user_input = self_feedback_resp break elif console_input.lower().strip() == "": - print("Invalid input format.") + logger.warn("Invalid input format.") continue elif console_input.lower().startswith(f"{cfg.authorise_key} -"): try: @@ -185,8 +175,8 @@ class Agent: ) user_input = "GENERATE NEXT COMMAND JSON" except ValueError: - print( - f"Invalid input format. Please enter '{cfg.authorise_key} -N' where N is" + logger.warn( + "Invalid input format. Please enter 'y -n' where n is" " the number of continuous tasks." ) continue @@ -206,16 +196,10 @@ class Agent: "", ) elif user_input == "EXIT": - send_chat_message_to_user("Exiting...") - print("Exiting...", flush=True) + logger.info("Exiting...") break else: # Print command - send_chat_message_to_user( - "NEXT ACTION: \n " + f"COMMAND = {command_name} \n " - f"ARGUMENTS = {arguments}" - ) - logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, diff --git a/autogpt/app.py b/autogpt/app.py index 3bbf6581..150bfca3 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -6,6 +6,7 @@ from autogpt.agent.agent_manager import AgentManager from autogpt.commands.command import CommandRegistry, command from autogpt.commands.web_requests import scrape_links, scrape_text from autogpt.config import Config +from autogpt.logs import logger from autogpt.memory import get_memory from autogpt.processing.text import summarize_text from autogpt.prompts.generator import PromptGenerator @@ -172,7 +173,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]: def shutdown() -> NoReturn: """Shut down the program""" - print("Shutting down...") + logger.info("Shutting down...") quit() diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 1bcdede2..6b141dba 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -8,6 +8,7 @@ from docker.errors import ImageNotFound from autogpt.commands.command import command from autogpt.config import Config +from autogpt.logs import logger CFG = Config() @@ -22,7 +23,7 @@ def execute_python_file(filename: str) -> str: Returns: str: The output of the file """ - print(f"Executing file '{filename}'") + logger.info(f"Executing file '{filename}'") if not filename.endswith(".py"): return "Error: Invalid file type. Only .py files are allowed." @@ -47,9 +48,11 @@ def execute_python_file(filename: str) -> str: image_name = "python:3-alpine" try: client.images.get(image_name) - print(f"Image '{image_name}' found locally") + logger.warn(f"Image '{image_name}' found locally") except ImageNotFound: - print(f"Image '{image_name}' not found locally, pulling from Docker Hub") + logger.info( + f"Image '{image_name}' not found locally, pulling from Docker Hub" + ) # Use the low-level API to stream the pull response low_level_client = docker.APIClient() for line in low_level_client.pull(image_name, stream=True, decode=True): @@ -57,9 +60,9 @@ def execute_python_file(filename: str) -> str: status = line.get("status") progress = line.get("progress") if status and progress: - print(f"{status}: {progress}") + logger.info(f"{status}: {progress}") elif status: - print(status) + logger.info(status) container = client.containers.run( image_name, f"python {Path(filename).relative_to(CFG.workspace_path)}", @@ -85,7 +88,7 @@ def execute_python_file(filename: str) -> str: return logs except docker.errors.DockerException as e: - print( + logger.warn( "Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/" ) return f"Error: {str(e)}" @@ -118,7 +121,9 @@ def execute_shell(command_line: str) -> str: if not current_dir.is_relative_to(CFG.workspace_path): os.chdir(CFG.workspace_path) - print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") + logger.info( + f"Executing command '{command_line}' in working directory '{os.getcwd()}'" + ) result = subprocess.run(command_line, capture_output=True, shell=True) output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" @@ -154,7 +159,9 @@ def execute_shell_popen(command_line) -> str: if CFG.workspace_path not in current_dir: os.chdir(CFG.workspace_path) - print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") + logger.info( + f"Executing command '{command_line}' in working directory '{os.getcwd()}'" + ) do_not_show_output = subprocess.DEVNULL process = subprocess.Popen( diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index e5181691..e9afdaea 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -11,6 +11,7 @@ from requests.adapters import HTTPAdapter, Retry from autogpt.commands.command import command from autogpt.config import Config +from autogpt.logs import logger from autogpt.spinner import Spinner from autogpt.utils import readable_file_size @@ -106,25 +107,25 @@ def ingest_file( :param overlap: The number of overlapping characters between chunks, default is 200 """ try: - print(f"Working with file {filename}") + logger.info(f"Working with file {filename}") content = read_file(filename) content_length = len(content) - print(f"File length: {content_length} characters") + logger.info(f"File length: {content_length} characters") chunks = list(split_file(content, max_length=max_length, overlap=overlap)) num_chunks = len(chunks) for i, chunk in enumerate(chunks): - print(f"Ingesting chunk {i + 1} / {num_chunks} into memory") + logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory") memory_to_add = ( f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}" ) memory.add(memory_to_add) - print(f"Done ingesting {num_chunks} chunks from {filename}.") + logger.info(f"Done ingesting {num_chunks} chunks from {filename}.") except Exception as e: - print(f"Error while ingesting file '{filename}': {str(e)}") + logger.info(f"Error while ingesting file '{filename}': {str(e)}") @command("write_to_file", "Write to file", '"filename": "", "text": ""') diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 9ed0f44b..fbed067f 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -9,6 +9,7 @@ from PIL import Image from autogpt.commands.command import command from autogpt.config import Config +from autogpt.logs import logger CFG = Config() @@ -69,7 +70,7 @@ def generate_image_with_hf(prompt: str, filename: str) -> str: ) image = Image.open(io.BytesIO(response.content)) - print(f"Image Generated for prompt:{prompt}") + logger.info(f"Image Generated for prompt:{prompt}") image.save(filename) @@ -91,7 +92,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: # Check for supported image sizes if size not in [256, 512, 1024]: closest = min([256, 512, 1024], key=lambda x: abs(x - size)) - print( + logger.info( f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}." ) size = closest @@ -104,7 +105,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: api_key=CFG.openai_api_key, ) - print(f"Image Generated for prompt:{prompt}") + logger.info(f"Image Generated for prompt:{prompt}") image_data = b64decode(response["data"][0]["b64_json"]) @@ -153,7 +154,7 @@ def generate_image_with_sd_webui( }, ) - print(f"Image Generated for prompt:{prompt}") + logger.info(f"Image Generated for prompt:{prompt}") # Save the image to disk response = response.json() diff --git a/autogpt/commands/web_playwright.py b/autogpt/commands/web_playwright.py index 4e388ded..70f19dee 100644 --- a/autogpt/commands/web_playwright.py +++ b/autogpt/commands/web_playwright.py @@ -1,10 +1,12 @@ """Web scraping commands using Playwright""" from __future__ import annotations +from autogpt.logs import logger + try: from playwright.sync_api import sync_playwright except ImportError: - print( + logger.info( "Playwright not installed. Please install it with 'pip install playwright' to use." ) from bs4 import BeautifulSoup diff --git a/autogpt/json_utils/json_fix_general.py b/autogpt/json_utils/json_fix_general.py index 7010fa3b..e485aca1 100644 --- a/autogpt/json_utils/json_fix_general.py +++ b/autogpt/json_utils/json_fix_general.py @@ -9,6 +9,7 @@ from typing import Optional from autogpt.config import Config from autogpt.json_utils.utilities import extract_char_position +from autogpt.logs import logger CFG = Config() @@ -33,8 +34,7 @@ def fix_invalid_escape(json_to_load: str, error_message: str) -> str: json.loads(json_to_load) return json_to_load except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - fix invalid escape", e) + logger.debug("json loads error - fix invalid escape", e) error_message = str(e) return json_to_load @@ -98,13 +98,11 @@ def correct_json(json_to_load: str) -> str: """ try: - if CFG.debug_mode: - print("json", json_to_load) + logger.debug("json", json_to_load) json.loads(json_to_load) return json_to_load except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error", e) + logger.debug("json loads error", e) error_message = str(e) if error_message.startswith("Invalid \\escape"): json_to_load = fix_invalid_escape(json_to_load, error_message) @@ -116,8 +114,7 @@ def correct_json(json_to_load: str) -> str: json.loads(json_to_load) return json_to_load except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - add quotes", e) + logger.debug("json loads error - add quotes", e) error_message = str(e) if balanced_str := balance_braces(json_to_load): return balanced_str diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index fa604a42..58f5f8e4 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -49,9 +49,8 @@ def validate_json(json_object: object, schema_name: str) -> dict | None: for error in errors: logger.error(f"Error: {error.message}") - return None - if CFG.debug_mode: - print("The JSON object is valid.") + else: + logger.debug("The JSON object is valid.") return json_object diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index e0f0226d..64f0c4a7 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -185,9 +185,8 @@ def chat_with_ai( [create_chat_message("system", plugin_response)], model ) if current_tokens_used + tokens_to_add > send_token_limit: - if cfg.debug_mode: - print("Plugin response too long, skipping:", plugin_response) - print("Plugins remaining at stop:", plugin_count - i) + logger.debug("Plugin response too long, skipping:", plugin_response) + logger.debug("Plugins remaining at stop:", plugin_count - i) break current_context.append(create_chat_message("system", plugin_response)) @@ -227,5 +226,5 @@ def chat_with_ai( return assistant_reply except RateLimitError: # TODO: When we switch to langchain, this is built in - print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") + logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") time.sleep(10) diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py index 9a2400c7..cc4048d6 100644 --- a/autogpt/llm/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -128,10 +128,9 @@ def create_chat_completion( num_retries = 10 warned_user = False - if cfg.debug_mode: - print( - f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" - ) + logger.debug( + f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" + ) for plugin in cfg.plugins: if plugin.can_handle_chat_completion( messages=messages, @@ -169,10 +168,9 @@ def create_chat_completion( ) break except RateLimitError: - if cfg.debug_mode: - print( - f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" - ) + logger.debug( + f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" + ) if not warned_user: logger.double_check( f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " @@ -184,11 +182,10 @@ def create_chat_completion( raise if attempt == num_retries - 1: raise - if cfg.debug_mode: - print( - f"{Fore.RED}Error: ", - f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", - ) + logger.debug( + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", + ) time.sleep(backoff) if response is None: logger.typewriter_log( diff --git a/autogpt/logs.py b/autogpt/logs.py index c1ac126c..fcad9efa 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -10,7 +10,6 @@ from colorama import Fore, Style from autogpt.singleton import Singleton from autogpt.speech import say_text -from autogpt.utils import send_chat_message_to_user class Logger(metaclass=Singleton): @@ -83,8 +82,6 @@ class Logger(metaclass=Singleton): if speak_text and self.speak_mode: say_text(f"{title}. {content}") - send_chat_message_to_user(f"{title}. {content}") - if content: if isinstance(content, list): content = " ".join(content) @@ -103,6 +100,14 @@ class Logger(metaclass=Singleton): ): self._log(title, title_color, message, logging.DEBUG) + def info( + self, + message, + title="", + title_color="", + ): + self._log(title, title_color, message, logging.INFO) + def warn( self, message, @@ -114,11 +119,19 @@ class Logger(metaclass=Singleton): def error(self, title, message=""): self._log(title, Fore.RED, message, logging.ERROR) - def _log(self, title="", title_color="", message="", level=logging.INFO): + def _log( + self, + title: str = "", + title_color: str = "", + message: str = "", + level=logging.INFO, + ): if message: if isinstance(message, list): message = " ".join(message) - self.logger.log(level, message, extra={"title": title, "color": title_color}) + self.logger.log( + level, message, extra={"title": str(title), "color": str(title_color)} + ) def set_level(self, level): self.logger.setLevel(level) diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py index c4eb4a07..a955a3e7 100644 --- a/autogpt/memory/__init__.py +++ b/autogpt/memory/__init__.py @@ -1,3 +1,4 @@ +from autogpt.logs import logger from autogpt.memory.local import LocalCache from autogpt.memory.no_memory import NoMemory @@ -10,7 +11,6 @@ try: supported_memory.append("redis") except ImportError: - # print("Redis not installed. Skipping import.") RedisMemory = None try: @@ -18,7 +18,6 @@ try: supported_memory.append("pinecone") except ImportError: - # print("Pinecone not installed. Skipping import.") PineconeMemory = None try: @@ -26,7 +25,6 @@ try: supported_memory.append("weaviate") except ImportError: - # print("Weaviate not installed. Skipping import.") WeaviateMemory = None try: @@ -34,7 +32,6 @@ try: supported_memory.append("milvus") except ImportError: - # print("pymilvus not installed. Skipping import.") MilvusMemory = None @@ -42,7 +39,7 @@ def get_memory(cfg, init=False): memory = None if cfg.memory_backend == "pinecone": if not PineconeMemory: - print( + logger.warn( "Error: Pinecone is not installed. Please install pinecone" " to use Pinecone as a memory backend." ) @@ -52,7 +49,7 @@ def get_memory(cfg, init=False): memory.clear() elif cfg.memory_backend == "redis": if not RedisMemory: - print( + logger.warn( "Error: Redis is not installed. Please install redis-py to" " use Redis as a memory backend." ) @@ -60,7 +57,7 @@ def get_memory(cfg, init=False): memory = RedisMemory(cfg) elif cfg.memory_backend == "weaviate": if not WeaviateMemory: - print( + logger.warn( "Error: Weaviate is not installed. Please install weaviate-client to" " use Weaviate as a memory backend." ) @@ -68,7 +65,7 @@ def get_memory(cfg, init=False): memory = WeaviateMemory(cfg) elif cfg.memory_backend == "milvus": if not MilvusMemory: - print( + logger.warn( "Error: pymilvus sdk is not installed." "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend." ) diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py index 6b512778..2a072a55 100644 --- a/autogpt/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -73,7 +73,7 @@ class RedisMemory(MemoryProviderSingleton): ), ) except Exception as e: - print("Error creating Redis search index: ", e) + logger.warn("Error creating Redis search index: ", e) existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num") self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0 @@ -145,7 +145,7 @@ class RedisMemory(MemoryProviderSingleton): query, query_params={"vector": query_vector} ) except Exception as e: - print("Error calling Redis search: ", e) + logger.warn("Error calling Redis search: ", e) return None return [result.data for result in results.docs] diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py index be29e1b9..0b4f58b5 100644 --- a/autogpt/memory/weaviate.py +++ b/autogpt/memory/weaviate.py @@ -4,6 +4,7 @@ from weaviate.embedded import EmbeddedOptions from weaviate.util import generate_uuid5 from autogpt.llm import get_ada_embedding +from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton @@ -35,7 +36,7 @@ class WeaviateMemory(MemoryProviderSingleton): ) ) - print( + logger.info( f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}" ) else: @@ -116,7 +117,7 @@ class WeaviateMemory(MemoryProviderSingleton): return [] except Exception as err: - print(f"Unexpected error {err=}, {type(err)=}") + logger.warn(f"Unexpected error {err=}, {type(err)=}") return [] def get_stats(self): diff --git a/autogpt/plugins.py b/autogpt/plugins.py index d03f0037..a5fe43d8 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -15,6 +15,7 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate from openapi_python_client.cli import Config as OpenAPIConfig from autogpt.config import Config +from autogpt.logs import logger from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin @@ -33,11 +34,10 @@ def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]: with zipfile.ZipFile(zip_path, "r") as zfile: for name in zfile.namelist(): if name.endswith("__init__.py"): - if debug: - print(f"Found module '{name}' in the zipfile at: {name}") + logger.debug(f"Found module '{name}' in the zipfile at: {name}") result.append(name) - if debug and len(result) == 0: - print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") + if len(result) == 0: + logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") return result @@ -71,12 +71,12 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict: if response.status_code == 200: manifest = response.json() if manifest["schema_version"] != "v1": - print( + logger.warn( f"Unsupported manifest version: {manifest['schem_version']} for {url}" ) continue if manifest["api"]["type"] != "openapi": - print( + logger.warn( f"Unsupported API type: {manifest['api']['type']} for {url}" ) continue @@ -84,11 +84,13 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict: manifest, f"{openai_plugin_client_dir}/ai-plugin.json" ) else: - print(f"Failed to fetch manifest for {url}: {response.status_code}") + logger.warn( + f"Failed to fetch manifest for {url}: {response.status_code}" + ) except requests.exceptions.RequestException as e: - print(f"Error while requesting manifest from {url}: {e}") + logger.warn(f"Error while requesting manifest from {url}: {e}") else: - print(f"Manifest for {url} already exists") + logger.info(f"Manifest for {url} already exists") manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json")) if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"): openapi_spec = openapi_python_client._get_document( @@ -98,7 +100,7 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict: openapi_spec, f"{openai_plugin_client_dir}/openapi.json" ) else: - print(f"OpenAPI spec for {url} already exists") + logger.info(f"OpenAPI spec for {url} already exists") openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json")) manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec} return manifests @@ -115,13 +117,13 @@ def create_directory_if_not_exists(directory_path: str) -> bool: if not os.path.exists(directory_path): try: os.makedirs(directory_path) - print(f"Created directory: {directory_path}") + logger.debug(f"Created directory: {directory_path}") return True except OSError as e: - print(f"Error creating directory {directory_path}: {e}") + logger.warn(f"Error creating directory {directory_path}: {e}") return False else: - print(f"Directory {directory_path} already exists") + logger.info(f"Directory {directory_path} already exists") return True @@ -159,7 +161,7 @@ def initialize_openai_plugins( config=_config, ) if client_results: - print( + logger.warn( f"Error creating OpenAPI client: {client_results[0].header} \n" f" details: {client_results[0].detail}" ) @@ -212,8 +214,7 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate for module in moduleList: plugin = Path(plugin) module = Path(module) - if debug: - print(f"Plugin: {plugin} Module: {module}") + logger.debug(f"Plugin: {plugin} Module: {module}") zipped_package = zipimporter(str(plugin)) zipped_module = zipped_package.load_module(str(module.parent)) for key in dir(zipped_module): @@ -240,9 +241,9 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate loaded_plugins.append(plugin) if loaded_plugins: - print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------") + logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------") for plugin in loaded_plugins: - print(f"{plugin._name}: {plugin._version} - {plugin._description}") + logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}") return loaded_plugins diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 372e331d..9391e0a0 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -6,6 +6,7 @@ from selenium.webdriver.remote.webdriver import WebDriver from autogpt.config import Config from autogpt.llm import count_message_tokens, create_chat_completion +from autogpt.logs import logger from autogpt.memory import get_memory CFG = Config() @@ -86,7 +87,7 @@ def summarize_text( model = CFG.fast_llm_model text_length = len(text) - print(f"Text length: {text_length} characters") + logger.info(f"Text length: {text_length} characters") summaries = [] chunks = list( @@ -99,7 +100,7 @@ def summarize_text( for i, chunk in enumerate(chunks): if driver: scroll_to_percentage(driver, scroll_ratio * i) - print(f"Adding chunk {i + 1} / {len(chunks)} to memory") + logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory") memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}" @@ -108,7 +109,7 @@ def summarize_text( messages = [create_message(chunk, question)] tokens_for_chunk = count_message_tokens(messages, model) - print( + logger.info( f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens" ) @@ -117,7 +118,7 @@ def summarize_text( messages=messages, ) summaries.append(summary) - print( + logger.info( f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters" ) @@ -125,7 +126,7 @@ def summarize_text( memory.add(memory_to_add) - print(f"Summarized {len(chunks)} chunks.") + logger.info(f"Summarized {len(chunks)} chunks.") combined_summary = "\n".join(summaries) messages = [create_message(combined_summary, question)] diff --git a/autogpt/setup.py b/autogpt/setup.py index ddc7c6bf..c71fb221 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -119,7 +119,7 @@ def generate_aiconfig_manual() -> AIConfig: "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage" " multiple businesses autonomously'", ) - print("Enter nothing to load defaults, enter nothing when finished.", flush=True) + logger.info("Enter nothing to load defaults, enter nothing when finished.") ai_goals = [] for i in range(5): ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ") @@ -139,7 +139,7 @@ def generate_aiconfig_manual() -> AIConfig: Fore.GREEN, "For example: $1.50", ) - print("Enter nothing to let the AI run without monetary limit", flush=True) + logger.info("Enter nothing to let the AI run without monetary limit") api_budget_input = utils.clean_input( f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $" ) diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py index ea84efd8..06806b56 100644 --- a/autogpt/speech/eleven_labs.py +++ b/autogpt/speech/eleven_labs.py @@ -69,6 +69,8 @@ class ElevenLabsSpeech(VoiceBase): Returns: bool: True if the request was successful, False otherwise """ + from autogpt.logs import logger + tts_url = ( f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}" ) @@ -81,6 +83,6 @@ class ElevenLabsSpeech(VoiceBase): os.remove("speech.mpeg") return True else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) + logger.warn("Request failed with status code:", response.status_code) + logger.info("Response content:", response.content) return False diff --git a/autogpt/utils.py b/autogpt/utils.py index 6efaceec..601254d7 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -5,27 +5,17 @@ import yaml from colorama import Fore from git.repo import Repo +from autogpt.logs import logger + # Use readline if available (for clean_input) try: import readline -except: +except ImportError: pass from autogpt.config import Config -def send_chat_message_to_user(report: str): - cfg = Config() - if not cfg.chat_messages_enabled: - return - for plugin in cfg.plugins: - if not hasattr(plugin, "can_handle_report"): - continue - if not plugin.can_handle_report(): - continue - plugin.report(report) - - def clean_input(prompt: str = "", talk=False): try: cfg = Config() @@ -58,12 +48,12 @@ def clean_input(prompt: str = "", talk=False): return plugin_response # ask for input, default when just pressing Enter is y - print("Asking user via keyboard...") + logger.info("Asking user via keyboard...") answer = input(prompt) return answer except KeyboardInterrupt: - print("You interrupted Auto-GPT") - print("Quitting...") + logger.info("You interrupted Auto-GPT") + logger.info("Quitting...") exit(0) diff --git a/data_ingestion.py b/data_ingestion.py index b89a33da..d400dc96 100644 --- a/data_ingestion.py +++ b/data_ingestion.py @@ -10,11 +10,14 @@ cfg = Config() def configure_logging(): logging.basicConfig( - filename="log-ingestion.txt", filemode="a", format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s", datefmt="%H:%M:%S", level=logging.DEBUG, + handlers=[ + logging.FileHandler(filename="log-ingestion.txt"), + logging.StreamHandler(), + ], ) return logging.getLogger("AutoGPT-Ingestion") @@ -26,12 +29,13 @@ def ingest_directory(directory, memory, args): :param directory: The directory containing the files to ingest :param memory: An object with an add() method to store the chunks in memory """ + global logger try: files = search_files(directory) for file in files: ingest_file(file, memory, args.max_length, args.overlap) except Exception as e: - print(f"Error while ingesting directory '{directory}': {str(e)}") + logger.error(f"Error while ingesting directory '{directory}': {str(e)}") def main() -> None: @@ -69,24 +73,22 @@ def main() -> None: # Initialize memory memory = get_memory(cfg, init=args.init) - print("Using memory of type: " + memory.__class__.__name__) + logger.debug("Using memory of type: " + memory.__class__.__name__) if args.file: try: ingest_file(args.file, memory, args.max_length, args.overlap) - print(f"File '{args.file}' ingested successfully.") + logger.info(f"File '{args.file}' ingested successfully.") except Exception as e: logger.error(f"Error while ingesting file '{args.file}': {str(e)}") - print(f"Error while ingesting file '{args.file}': {str(e)}") elif args.dir: try: ingest_directory(args.dir, memory, args) - print(f"Directory '{args.dir}' ingested successfully.") + logger.info(f"Directory '{args.dir}' ingested successfully.") except Exception as e: logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}") - print(f"Error while ingesting directory '{args.dir}': {str(e)}") else: - print( + logger.warn( "Please provide either a file path (--file) or a directory name (--dir)" " inside the auto_gpt_workspace directory as input." ) From 064ac5c7425de91a75c3ef2f2911bb7d942b223f Mon Sep 17 00:00:00 2001 From: Luke K <2609441+pr-0f3t@users.noreply.github.com> Date: Sun, 30 Apr 2023 06:37:41 +0100 Subject: [PATCH 200/222] Refactor AIConfig to Sanitize Input for Goal Parameters (#3492) * Update remove_color_codes to handle non-string input The `remove_color_codes` function now accepts any type of input that can be cast to a string. Previously, it was only accepting string input and not casting non-string types to string which was causing errors in some cases. The changes were made to both logs.py and its corresponding test file. * Refactor AIConfig to Sanitize Input for Goal Parameters Details: - Modified `ai_config.py` to correctly handle and sanitize user input for AI goals and convert them to formatted strings, to fix an issue where some specially formatted ai_settings.yaml files were causing goals to load as list[dict] - `test_ai_config.py` includes a test for the `sanitize_input` function in `AIConfig` class. - Removed unnecessary tests from `test_logs.py` * Update for readabiity * Update for readabiity * Updates for conciceness * Updated tests to confirm AIConfig saves goals as strings * FIxed trailing space at end of line --------- Co-authored-by: Luke Kyohere Co-authored-by: James Collins --- autogpt/config/ai_config.py | 9 ++++++-- tests/test_ai_config.py | 45 +++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 tests/test_ai_config.py diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index d662429f..88acbfe6 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -7,7 +7,7 @@ from __future__ import annotations import os import platform from pathlib import Path -from typing import Optional, Type +from typing import Any, Optional, Type import distro import yaml @@ -79,7 +79,12 @@ class AIConfig: ai_name = config_params.get("ai_name", "") ai_role = config_params.get("ai_role", "") - ai_goals = config_params.get("ai_goals", []) + ai_goals = [ + str(goal).strip("{}").replace("'", "").replace('"', "") + if isinstance(goal, dict) + else str(goal) + for goal in config_params.get("ai_goals", []) + ] api_budget = config_params.get("api_budget", 0.0) # type: Type[AIConfig] return AIConfig(ai_name, ai_role, ai_goals, api_budget) diff --git a/tests/test_ai_config.py b/tests/test_ai_config.py new file mode 100644 index 00000000..a9fcdad6 --- /dev/null +++ b/tests/test_ai_config.py @@ -0,0 +1,45 @@ +from autogpt.config.ai_config import AIConfig + +""" +Test cases for the AIConfig class, which handles loads the AI configuration +settings from a YAML file. +""" + + +def test_goals_are_always_lists_of_strings(tmp_path): + """Test if the goals attribute is always a list of strings.""" + + yaml_content = """ +ai_goals: +- Goal 1: Make a sandwich +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- "Goal 4: Wake up" +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + config_file = tmp_path / "ai_settings.yaml" + config_file.write_text(yaml_content) + + ai_config = AIConfig.load(config_file) + + assert len(ai_config.ai_goals) == 4 + assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich" + assert ai_config.ai_goals[1] == "Goal 2, Eat the sandwich" + assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep" + assert ai_config.ai_goals[3] == "Goal 4: Wake up" + + config_file.write_text("") + ai_config.save(config_file) + + yaml_content2 = """ai_goals: +- 'Goal 1: Make a sandwich' +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- 'Goal 4: Wake up' +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + assert config_file.read_text() == yaml_content2 From dd96d98fa1522619f89cdf66e5b86d95be5981e4 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sun, 30 Apr 2023 01:56:57 -0700 Subject: [PATCH 201/222] Feature/test summarization against memory challenge (#3567) Co-authored-by: Toran Bruce Richards --- autogpt/agent/agent.py | 4 + autogpt/llm/chat.py | 87 +- autogpt/memory_management/summary_memory.py | 112 ++ ...ve_memory_trimmed_from_context_window.yaml | 170 +++ .../test_memory_challenge_a.yaml | 1094 ++++--------- .../test_memory_challenge_b.yaml | 1348 ++++++++--------- .../memory/test_memory_challenge_a.py | 6 +- .../memory/test_memory_challenge_b.py | 2 +- .../test_browse_website.yaml | 1318 +++++----------- .../test_write_file/test_write_file.yaml | 185 ++- tests/unit/test_chat.py | 10 +- tests/vcr/openai_filter.py | 5 +- 12 files changed, 1847 insertions(+), 2494 deletions(-) create mode 100644 autogpt/memory_management/summary_memory.py diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index af3b8205..bd105932 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -56,6 +56,10 @@ class Agent: cfg = Config() self.ai_name = ai_name self.memory = memory + self.summary_memory = ( + "I was created." # Initial memory necessary to avoid hilucination + ) + self.last_memory_index = 0 self.full_message_history = full_message_history self.next_action_count = next_action_count self.command_registry = command_registry diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index 64f0c4a7..b4e6b1a4 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -12,6 +12,10 @@ from autogpt.logs import logger from autogpt.memory_management.store_memory import ( save_memory_trimmed_from_context_window, ) +from autogpt.memory_management.summary_memory import ( + get_newly_trimmed_messages, + update_running_summary, +) cfg = Config() @@ -36,10 +40,10 @@ def generate_context(prompt, relevant_memory, full_message_history, model): create_chat_message( "system", f"The current time and date is {time.strftime('%c')}" ), - create_chat_message( - "system", - f"This reminds you of these events from your past:\n{relevant_memory}\n\n", - ), + # create_chat_message( + # "system", + # f"This reminds you of these events from your past:\n{relevant_memory}\n\n", + # ), ] # Add messages from the full message history until we reach the token limit @@ -81,21 +85,21 @@ def chat_with_ai( """ model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - logger.debug(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 - if len(full_message_history) == 0: - relevant_memory = "" - else: - recent_history = full_message_history[-5:] - shuffle(recent_history) - relevant_memories = permanent_memory.get_relevant( - str(recent_history), 5 - ) - if relevant_memories: - shuffle(relevant_memories) - relevant_memory = str(relevant_memories) + # if len(full_message_history) == 0: + # relevant_memory = "" + # else: + # recent_history = full_message_history[-5:] + # shuffle(recent_history) + # relevant_memories = permanent_memory.get_relevant( + # str(recent_history), 5 + # ) + # if relevant_memories: + # shuffle(relevant_memories) + # relevant_memory = str(relevant_memories) + relevant_memory = "" logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") ( @@ -105,33 +109,36 @@ def chat_with_ai( current_context, ) = generate_context(prompt, relevant_memory, full_message_history, model) - while current_tokens_used > 2500: - # remove memories until we are under 2500 tokens - relevant_memory = relevant_memory[:-1] - ( - next_message_to_add_index, - current_tokens_used, - insertion_index, - current_context, - ) = generate_context( - prompt, relevant_memory, full_message_history, model - ) + # while current_tokens_used > 2500: + # # remove memories until we are under 2500 tokens + # relevant_memory = relevant_memory[:-1] + # ( + # next_message_to_add_index, + # current_tokens_used, + # insertion_index, + # current_context, + # ) = generate_context( + # prompt, relevant_memory, full_message_history, model + # ) current_tokens_used += count_message_tokens( [create_chat_message("user", user_input)], model ) # Account for user input (appended later) + current_tokens_used += 500 # Account for memory (appended later) TODO: The final memory may be less than 500 tokens + + # Add Messages until the token limit is reached or there are no more messages to add. while next_message_to_add_index >= 0: # print (f"CURRENT TOKENS USED: {current_tokens_used}") message_to_add = full_message_history[next_message_to_add_index] tokens_to_add = count_message_tokens([message_to_add], model) if current_tokens_used + tokens_to_add > send_token_limit: - save_memory_trimmed_from_context_window( - full_message_history, - next_message_to_add_index, - permanent_memory, - ) + # save_memory_trimmed_from_context_window( + # full_message_history, + # next_message_to_add_index, + # permanent_memory, + # ) break # Add the most recent message to the start of the current context, @@ -146,6 +153,22 @@ def chat_with_ai( # Move to the next most recent message in the full message history next_message_to_add_index -= 1 + # Insert Memories + if len(full_message_history) > 0: + ( + newly_trimmed_messages, + agent.last_memory_index, + ) = get_newly_trimmed_messages( + full_message_history=full_message_history, + current_context=current_context, + last_memory_index=agent.last_memory_index, + ) + agent.summary_memory = update_running_summary( + current_memory=agent.summary_memory, + new_events=newly_trimmed_messages, + ) + current_context.insert(insertion_index, agent.summary_memory) + api_manager = ApiManager() # inform the AI about its remaining budget (if it has one) if api_manager.get_total_budget() > 0.0: diff --git a/autogpt/memory_management/summary_memory.py b/autogpt/memory_management/summary_memory.py new file mode 100644 index 00000000..9e612637 --- /dev/null +++ b/autogpt/memory_management/summary_memory.py @@ -0,0 +1,112 @@ +import json +from typing import Dict, List, Tuple + +from autogpt.config import Config +from autogpt.llm.llm_utils import create_chat_completion + +cfg = Config() + + +def get_newly_trimmed_messages( + full_message_history: List[Dict[str, str]], + current_context: List[Dict[str, str]], + last_memory_index: int, +) -> Tuple[List[Dict[str, str]], int]: + """ + This function returns a list of dictionaries contained in full_message_history + with an index higher than prev_index that are absent from current_context. + + Args: + full_message_history (list): A list of dictionaries representing the full message history. + current_context (list): A list of dictionaries representing the current context. + last_memory_index (int): An integer representing the previous index. + + Returns: + list: A list of dictionaries that are in full_message_history with an index higher than last_memory_index and absent from current_context. + int: The new index value for use in the next loop. + """ + # Select messages in full_message_history with an index higher than last_memory_index + new_messages = [ + msg for i, msg in enumerate(full_message_history) if i > last_memory_index + ] + + # Remove messages that are already present in current_context + new_messages_not_in_context = [ + msg for msg in new_messages if msg not in current_context + ] + + # Find the index of the last message processed + new_index = last_memory_index + if new_messages_not_in_context: + last_message = new_messages_not_in_context[-1] + new_index = full_message_history.index(last_message) + + return new_messages_not_in_context, new_index + + +def update_running_summary(current_memory: str, new_events: List[Dict]) -> str: + """ + This function takes a list of dictionaries representing new events and combines them with the current summary, + focusing on key and potentially important information to remember. The updated summary is returned in a message + formatted in the 1st person past tense. + + Args: + new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary. + + Returns: + str: A message containing the updated summary of actions, formatted in the 1st person past tense. + + Example: + new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}] + update_running_summary(new_events) + # Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7." + """ + # Replace "assistant" with "you". This produces much better first person past tense results. + for event in new_events: + if event["role"].lower() == "assistant": + event["role"] = "you" + # Remove "thoughts" dictionary from "content" + content_dict = json.loads(event["content"]) + if "thoughts" in content_dict: + del content_dict["thoughts"] + event["content"] = json.dumps(content_dict) + elif event["role"].lower() == "system": + event["role"] = "your computer" + # Delete all user messages + elif event["role"] == "user": + new_events.remove(event) + + # This can happen at any point during execturion, not just the beginning + if len(new_events) == 0: + new_events = "Nothing new happened." + + prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember. + +You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise. + +Summary So Far: +""" +{current_memory} +""" + +Latest Development: +""" +{new_events} +""" +''' + + messages = [ + { + "role": "user", + "content": prompt, + } + ] + + current_memory = create_chat_completion(messages, cfg.fast_llm_model) + + message_to_return = { + "role": "system", + "content": f"This reminds you of these events from your past: \n{current_memory}", + } + + return message_to_return diff --git a/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml index 1ad25810..bb29c4d9 100644 --- a/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml +++ b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml @@ -332,4 +332,174 @@ interactions: status: code: 200 message: OK +- request: + body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"thoughts\", \"reasoning\": + \"reasoning\", \"plan\": \"plan\", \"criticism\": \"criticism\", \"speak\": + \"speak\" }, \"command\": { \"name\": \"google\", \"args\": + { \"query\": \"google_query\" } } } Result: None Human + Feedback:Command Result: Important Information."], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '483' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6WxOyurbl+/4Vq9YrvUtEJWG9IfeLJihesKurCxC5iSCQBHLq/Pcu/E6d7n7x + AfFCMuaYY4yZ//jXX3/93SZVlo5///PX3+9yGP/+H8u1ZzzGf//z1//8119//fXXf/xe/787sybJ + ns/yk/9u/71Zfp7Z9Pc/f4n/feX/3vTPX3/fopdD0flZVNPUPC8yT+sT3gt8pY/uh6XKIzl8CTzP + 74Rhggh4m3hNHTU6VhN7kC18XyqDotgb+zmfIhk+DKen3kq1uXg3HwJceS8R25JtA15/gkgZ7UnA + TptGFXfomsGpRFfq7XDr94G9VxVzjTbUY1RLlu+XQXJ53LE261NCuyg4w6QiDZHiXKrKTRMg+KJl + RuBWrkPyiQQGU2j55ASHhnM1HnIomOyK/UP46VlvDDGM50TFfqDyiqUNOyuv9Lahhye6hzPnWQTX + 0DngC2N2zxHqDMiu8I6RNGxBvy4uKRxflxRn7lr0yfd0G6D9kXSMwuTT8yG7s13IOgHbO5FzPiY1 + gUPXrSiGmtF3X3RSYWolBLEi6qohbVigrDwhJErOdX924KlWtIgcqepU34SIL3sLt1W6RYA/hpBd + XlYN7qJ2JXxb0rAtJOBAr1hv0EbURZ88Qa3CO68k7Hjw40+nMUeKEphbspncgg9QLSM4xqs3Vldq + 2Q+N4auwuBY11W3fqDYwfjLZQOOZ7vX+0k8nZT9AZH0YdYzjPpl1ZEiwdpQNUjYbLZwf5gZBr0EE + yXfb1ScRHBFEb4QQUJTGZ6etZ0E8ZQeq7nDJSSh3AVTF3UR1cQQ9f/Rghrt9s8HqIbgl3dGzIazi + zZaqvQjC6fM+11AAnUH9G90l8yRLGdRbsSJSo0nhDx874yTL2DtEVcjelmDJT1utcDaLec/ZfmvA + q9po1EykIZnDzhRhfMgDnJovE4xm6NTQEsIj+azevJpv+HSAard6oM333erTls0GWGnUwK7DN9W4 + G0wLNifhjI3tM+x5FRdQcTV5Qq0ajRV7R48cesn5RtYAv8NJj9sWyhZbY0MqPsm8CzADv/U/lPtJ + 58q9HGQeHWO0RpHaSyvCNeW9eiUYqXcnmd9lxIAZrwKkMA2B6f18OHA9SAoBtI56LhFPlsVTOC37 + U4SDaHcGkG+WgS1SQp9kzrlUhq5fYZWlnc+d/BLAfZhjanrVwOe5oRA+SHTCqLmvknnclNqqt8SC + PrEE+Dhf1Bk2gD6wE+GGz1BKLEj6JsbHfez5bGUypnjl7Ua9ZwX0Sex7AhJpeyTysy8BewRWBpUU + vwhJd2FPuDHJ8Ni+Juyvdmd/krtYkyV7ulJNqO468UPYwMo4R3SfayEgD3vnycA/a9R7H7Vqs5dg + Bm/9l1L9/IbVpE4lgsVoaTjQFKlqjuMjg7f0nqMdqu/JFL/cFFTxekujyH75v/oGIJ4q6th2mHDz + +4kg/OgFKRa804MxEvg9zh49qmmt840Xy2B0zk9qW49rwkmbOFAJ6js2d/6pkqxSJNAs9YAoJ6r3 + kzp9EVhfZwGrRdT1k51oGiw0KaWOcDcr8aoEKVzwgrVCf3N2TcNSMYm3wQi1Rz6JwERwT9071a/p + N6GMpY2MOqPBL1xs9bFwijOM5Rbj+4m1nKNMa2C9P9rk/TG3Pnt9DwN0b1ZEfXx89Pw5PD14LjSb + yDa79mx3r6Bsf0Qd8chnCZ3dqYSZc3FJMSeFzp6lHgAxkjC1nncjHGCgy5CBtUXtjXnkE1lFIjSb + yMbJBuX6JB7aARhXfiav/vjWp1XHY8ir9kQtfPLAdNzwmyy9wQXJFLbJYH0DWV7qjXqm2PXvfDgF + UIZ9jTjTuoocTm4OdkOFyMZ4dsngzucblDo+4MN12FSD7YYQipvvih78wOxJJq1UmCm5TdXhc03m + /KWk8NrnD2qiSK1Y9Vx9wRoRAXt3u/PnPAgh9ByLUkshVTIJxz6TrbI1sakIfcIOm5xAaY0qatw8 + J2HnS2wASyqf1Oi9kPPrVhXhFHfSgvfE5/k1rJXUehBqrjOz38jn9Ca3uxWhe20kFWvRqoRSLECs + CyHWqcwfZ+iO3p2sG/us98q9JPC7YgVF/u7DZ8N/zMBxpZii4/fDl342w+YxaljXbwFgZeh7snLJ + KMY38wQ2xf5ogU14KxEzh7QfLRqVcH1lAjZipfQndJPiHx4w/kSnkHu7yVHgtNphu2GXZFY/6gVe + lCimzjXPOVvpkgh2R5LRvQwswPZP14Af/J6wrl7uOnNuWgzkx0vBfgfUipw0N4LwYLvYzldiz/N6 + bUH3ARn1TXoE8+68J4pyjCk2Nd32e+IGDsw3jzeNEncLSK7OKRDM+UpxbyrJV+G5BovhXWEru36T + WX1bMTQPTo5NfT/qfE4tA0p3o6aHVef5ZK8HN3jt9zLe36ioT+uWfeGzEABGluPobGKrA6wvMcH7 + 9GqD4RX3B3BzvxE+HmXK5yGJLVlzYwEfsVD1VC4UCU6NMWLrLEh85CwgCtiCErvtgVWt8662CkPy + HW220hGw2yNP4b19Z+RtstDnJWEZHJPMoOb82XDii1MOtnPYkt0pVMJB6FoGX5K9wobmlLwvj6AE + 76AdcLgtcULWaSrCQGq93//3OV3vG8iFuqX7jvOeDF/ewMIaRGp06APGBDxi+MkOGGuf2e8JausU + quQWYLyJJT6sdEEEXzMH+IilBLBG7FKoSdIB47TzK/4ZX7J8bJ8T3UvOUZ/XIzsr95paGEXWA/Dp + UNfwZGYyVY8oDbnnngcZefCD1dqmVafvqhaEN2Ri119PITkmkgZHVUfY2vf3ZNA+6xo4JtTwq6qU + P3gBjzL40iDuqorI2daD/WnekTk7RDrHj2sDz+28IcL+SkJO7NsBiunwodqqqwFd9AJY+IKsiHzS + 6VtcCZAe64nMq67m/VWytsr3yDycLHphoNtXAOyAGdT8mJE+m1Y9K5aRW/R4j5Jq4UNBeU6Bt/QD + Ho5XJrYQN/qVwPbtcdFD3xvklTuTebN6huyqRCmkFVSp8RxafT6dShk8DK/HnpRrfp9LQQmOg9hS + zXicQbeK7hDGssvQSrXVZD3Z8Q1UzZliJznlPW9ldIH6cevTxM+PlfTZgQFe4l1NUZ2EIa3iQgC9 + 9vwifo9uIV+d1wN0+LukjisUPnXy9AzyTfJGz12rhRsv+Zwh/5QddS3jVJEvbRtwFY4Fja/6OpkN + /8RgkcOEquWN9ryYqhKa9tSRKcovPquemy/YyuWGuhp+Av7a6Q0kJj5QdKhjn0MQE3AKkvOfz7M5 + K1vF3mdrJFbtxR/vMtOU7bFxkLD4iW9OMwi6NKsRsPsdmL5wXYI6KG/YfzQfn83K+6vUczki/s4o + 6OazJcK7NlBq4Ab1FMZXBndPvCK7avAq3s/NGQYX94JE1DoVHQ86gnorVWitKrtw8gwnBZMInngv + tGdAUeY10E6TA2EGkXpuRbEE1fLQIUF/lDqtvnomE9euETDpkfO19kiBgI4SNaIdCZlyDCQ4Cv6B + VGveJQufDUr9MSVq80DW5wM75NB9v3dk/epVf1P5TgYPfaVR1RxgNY1zGUH3jFTyDty8mstdfIEn + 93omKVKmnt22fQZKH+3JdtFXvVa+BTB8nYyej6smIQLeavBPP86uXjgDr/hC13dU+uPb8Zq1FsRM + GSl6rorkexxPKWisKqF+EYZ6W9HEgJ8zf2C0PC/fWzcV5quviPXvye03fcAOskbnA6psHQL+rQz2 + 01dYj7Ghc7ZnhrL0M7wvnKKnWfc1YBz5D4zI3uK8liYZuhZ3yXolbxKiWnoEvcm3iLTxumoQdt4Z + yKh4Yzu9lH1XWp8bXNYXY30/h81aOTEQnqsV9fy3m7CWwgyGxRrhoxYFPqnfvgiFDRvoLclg9X1d + VajYwWwgmB2UhFy1coaege5k3uzvCdOvnQrXloCwxa9rPudTICvZ95KjMeiuYMr0eAupOzywfau0 + iknnaw1HmwuLf+l0XpJtKluqi7HabWky3Qa73Sr0YVGNXvqEi+tSgEUtcuq/HQu0YeGUMKmGhh4K + 3dene3epYXnzHti5ZRYQv+4pVqbGGolSwHVS39csUqrkcscnMxWr+Tg+Ujjf43B5XqYv+BTBhMeM + rJ3XQ59zeoNQf1QXtE1Oas/w6uvB8+PVEvDMViEr3i8ChgJgqn7cLuS1laSy3PID9jZZ69Ov4c7A + V7MrPSq5XI07QBoonk4T9fvNoZrCyzAAKNg5PeZc1zfppDkQIWmF1lc5D6crg1+QEbTFqBiO4eB/ + 5zO8CoOK789zW01FpLGfHqUvTR9ClkonBuWnJJLVTGvA3NMzgsAPNDQnZ5NvhlH1lMva9ala27if + b8U4w5dwRxhd0L6X/HlVA/O9a7G+y9fJbOf7r3w9AICNXfKouAtrBMPYelJPUxJAgEMH8A6+w8/v + 6iTqOwd2VVkTZR9//blB3hl0fXghEzJKfURrJ4DgBnwiJsDRx/M2HaBFFIEiDxSgrZNDA/eVzjC+ + 2j5Y+9ktg6e03KGJ3ceEX5sxAj2fAuqxj9yzraeUYNH/2L9OPBykYmWAvhZybNpiwydDlFoIJmem + etFBfXxePxl8SeYK+9tV7lN19ylhaW08/OO7yQsLAQqrIqWaH+Qhf+ttJL+PT4NmdcLDrjWuLZS3 + VoL3F+74U1R/Gnh+Si/qnkIlIdnnWcLr3DywRm5myNMi/8Ky4CX2x/ZQzcXQbyEP0i8+2n7Haez4 + M4Ty0aHGuioSlsR7SdH2X0Td3SvQKUKFofz46YteST9rdD8r7vuzI1BvMp+l0mOG2zTe0r0VxxWr + /d6Ac7MTqd19Sp+VetEqb34XqRraFZ+f/rGBw9qQ8D7UHH/hx/inf5d6aQBrg10GR8E9/MmP6PMY + e5BI+Uz44vfyzctQ4Qd/JrLkJ/1M2R4CdR0Fv/WrJrR2znCpL6qNwaYf7N0cw0uZ6Wi3ewX+FJyC + M2wQ+yJZUxI+tbOrwsW/UJfv0pD89OAFKweyWfKb2V4ZX7Dw4+In8op9wroEBtycSOevp4QDUgfw + tK9LqhaRWzEY+FvI0PZOnyda9VMVPmdwRlWAJNq6YP20TAFKxKko3ptW8k2FlwX53k/QuoNvTsHL + Y3CXJx8EorbwGWbqF4p7FuDjwh+0e68kqCZOR7PFH9DKRTl8m+RC91apJZLjAwdepWzCtv7FiYhX + XwfGezPG1Pk0/vAgwUVxt1OOzZ9eDbMKwuzu7LAty/uKH9aGB8LCe1L19XH7GblFCpf+SJ01jXzy + MFcHeHkFCdmF98mfrmqG/uh999D6yXrlbSOwZtobbR/HAYys125Qr6GPo6P0SHiIvQyGt4NJ1be5 + rsgNPw5gdZV7ii2p8VnQJy1Q3PJOVoPw9kdlSjLgPIcZ+3TIQ17sTQNOZ2CRpf/3w7e9xPB2LBlp + 9fiTzAA4Bvwo6mrJD8uEb+/uDdqCrJFNFqCKb7qrDFfaaGBLp76+IRuxVcJzsSJzzU5g0eslgNKF + Ud3WU8DKcmjBJXTEBY8fzk5t3gBzZ2P882stuxWHH38TLo5JP2fSSgMfBiIEPe8O6OG298BJmnMC + +09djY8ClnCvXlp6cdeIr6P6U//05sLXjr7x3DOB7pbnJFj0Qz8pvgZ+eaDdFmLCrY94UWjAHtiL + tq4+tcazBbt6zqjx2FySce2RGTBtZtRY+I1rM07Bc6fJSIi9Y8Wcdy/DpX6R/D5qPTtfzobyW8/5 + TTacdclbgB7evAgrb7iiJWEpzMhhS8JtSZORZKEMxWcSoQ/Y2n2VFnkLzVgJ6GEKIKCwW523Gyf2 + fn7Cn9/B+gYvShxjx9Vf/XSXmQqgmu2xO6y+Cdsn2IPkgyuqM83tZydJ/2v/+cOVwkV/WErIeoFc + jXn02TYNINxevg6CU1RXk7wFOXgFOCDi4qemqClyeAR9hC3adqDwwk6A+BsfsHk2h55stgMDzenF + MarY3herIDDgq28tmsnFoVry7QHUqZtR20OdPifqyVA0ehoJS98YTMP6mQGLrAR0nboTEPW3V+6e + O1XG6g5rnMBm10Dn+qqoeTYPPVj6Mdwk5hmbxjzqTUKUGrKrcEfwzvpkxNpHhhGCA6lNbe0Pevgx + 4LGFAdWliSZTs/l4MBqnlCIlOFd9/EEzIJcyIehdFPq8P+9EyE6fHCORDWDG710E05P4xv5bfenf + X5658DkR7Z1WLev7BdZF/1DD2bSAd5vkDIon41hzD0PFSTS1ytKPiOhsWj5/ImkGwcW/0IPeCDqf + xiJXyt1Fxd7FxOG08li8e1XRhp7bpwLI5TYFUEubN9aDzEho+bxqsO6zkShouwaMNRqBi/6nP7wv + eaMKNcoOCBQh19n67jVQSY8vNITgXv34E17lq4s976Mns7nREfzVozU0gS9mQnIAPz1gy/ODj5E5 + NPIzs0oyPZqCT6SwIOi3no3y/Aw4rT9BrAD7Df/U2+K/blsihCWiggs5D7NegCczlbHpvhveCccq + hc5GeGC8+DeedaUFsyZzyIL/amMIyhZsmtz95U096y/m9sc32MfXIpzNmxXALmoOhOAm5Fy8EA1Y + nyNa8Ff47PZoU6Aaxhcb4LLXf35febxsjYjNa883lYtKeAtIROZKufNf3gTAbefjfbPbJ5K9OrRQ + aJ9P6q92s88jQkR5yQvw8R6BaoLnIIP4EByxrRQvPgHzq0Hqkgda3/W5X28upacseTJ2dvViR+s4 + Anh9qbEZiXNPkXKsf/MOsi15Hs6K7jagTTyBVBAn4ThqPpRHIzKJuORl618e4e2iKwFLvstPpm1A + 4po1mvevvGdsPRx+9T72XrwCVMqKCJYjcKn2RLM+7tK+hukTHn75lT7fM5lAK6lNjM+RCUTMnC8M + +mOI1den63nQHLRfP8JPciI//MdgiDcYrQwlq9jZ2DZQ2Z8cNP346LtXEOzcs4I9MSXhSFaBBC5f + xKl/2+x1vpGnG7gGZkXNtLNC8XZv8p+/wE5kr/xh28sZlAstxX7t5aD9uo8Ymm/QUj22YciNobvJ + y/NgVZEvYH7nmQRHIzaxdcAnf7qcpi/45W+4jP2EqS/owB24CtQ+grXfit3dAc6o7ukyvwIUf3ca + XPQD1cV1Fc7+O9LgE/GQ7Epi9O/Uk6FsPLov2XkbPxlfwijJhgVLmmrt1v+jhy93/UAWfusnn+5k + uBeyBu/v6z1YL3iQf/m0q2EF8LRov9A5fXw0XMip3wj65QB3/IPQz9/x80nJ5R9fRsFo6zzEWgbt + z70m4rJf7G1JFqw/toRdcbglE0FyDJEnfOiiRxPuufEA3+CjU5X0nc5uzlEFnJ4e2JjI2LPEIRE0 + toGz5CmlTw77XFV0UPXUaF4Fnwxp28C2sW8Ur7Mvnw33LsFfvnPrNl99qsTzAOzPtcY6vFf6/KrL + VFnyanr81hqXztnppqDMQfSy9Asaf6wZtmGE/sz/eHx/n5W2RALWssNWn9nhAQFODAUJs5hXjN0K + pHi7+EqPxiVNxtXuIsMXzTMkZus4XPqTAIVnZaJ5MvtwEgaSysVoaPiw8P3PH8BcEHTsxP4ZzOte + uoFJcQZqvB/in3xFLnc3lTqfTqhm81MMP/7A2Pk0+h9/u/Q/spXGTG8Pm/bPfA6j74H7LdCfGSw1 + YUYf67EOJ+MKWji/fJ2i5v5KFjyKwK/8J5mcsEx++wUWvqUH+GHh3IVRC08SywkQwMFn1WtqYNKe + KD7KTRvyylfTP/MziyVLPOCPX+iIxp5Gy/xrWPIjeNnlGtUuyTYca/oeYHTTBvQVtAIsec0ByJr8 + xD+8MamaNFCez3uKHodPz9GBer/8Eu1OyVuf78qaAHHTrvAprm1Oln6m7JPzCmOAvWpCdirDjb3r + qbbot0XfB7DP3x3VJrNPFr5BP71O/Vp76AR09Re2hxvCep13YB5cqVWM2HOxpykADPpby8GSj5Lx + KFPQGaLQ/vILROvbvR9Xu1SG7/39iarFn/LAdlWIDU2gP30+uRYo5fal38gUqCScwNUv5V0gW9Sp + 6zxc+t8AHFPQsJnf5nAUSB4rzOoc6l68Xh/HSpWhDARO7cndg01EGhGyUbthLQeZz1dq58GVSBOy + zFNC+tuPxf+h0yOTdfYaNjNcdcYZe1tXDtn+ubeUhhknetklu/4rnvILNA9eTo3mLvvvG9k3yt+/ + UwH/+a+//vpfvxMGTfvM3svBgDGbxn//91GBf8fP+N+iKP2bSn9OIpAhzrO///mvQwh/d33bdOP/ + Hts6+wx///OXIv85bvD32I7x+/+9/q/l1/7zX/8HAAD//wMAPe/DkOMgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdedf56918cfbc-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:04:15 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '25' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - f2850337441001a8e5bd70bacb4dede8 + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml index dbd63533..a38ce01f 100644 --- a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml @@ -38,10 +38,9 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 3251}' + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 2766}' headers: Accept: - '*/*' @@ -50,7 +49,7 @@ interactions: Connection: - keep-alive Content-Length: - - '3383' + - '3294' Content-Type: - application/json method: POST @@ -58,20 +57,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yST4/aMBDF7/0U1pwdRIAFNjcuVVftZaVWldpUyDhD4l17HNmTLhTlu1chAVZQ - tdVe5897vzeaA5gCMtCVYu1qmyxWT3biH+vV5MHvF7Pdz4/mw1f3abN4xG/vQYLfPKHmYWOkvast - svEEEnRAxVhAls6Xk8X99G6ylOB8gRYyKGtOpqO7hJuw8cl4Ok5BQhNViZAdoA7e1bxm/4wUIVvM - 5hIu2ud6Ol9KYM/Knkv36ayVoCtvNEbIvh/AYTzJBm8RMlAxmsiKuIP0xEhdgENOQgiRA1e+KSuO - OWRiKA4N3HFXzOFBEGIh2IuAqhBcoTAUOTS644vrdMQ7FltjsZspkDE4Q3gcJNyx0N45RUeFJuIo - B/naKKCKngyVvdvnv8h3/MpQvGEQXCkWF9Ctt9a/XDvVVlFvkogvsQfsIq0H8TPlv3KO8pwSsSJl - 97/wDzBvuIIOho020V3fHCk2AU8BlRuyGSpvjbUPATXbveiMcLs12iCx3V+7xRrV88npxVjbIb39 - Hke7/4ncM7Ty9H3DwM3zkXLY4515rhKoUF7/bN/oZi/rt8AniCPIwJNTC62ErSETq3X/kJBBZF+D - BEMF7iAbtz/ad78BAAD//wMAZaybHTIEAAA= + H4sIAAAAAAAAA4RSS2+bQBC+91eM5gwWxHZtcUsPlVy1aQ9tpapE1noZYGvYRTuDYsfiv1cYcCo7 + Sq/fPL7HzAlNhgnqUomumypc3T/dRe2X59W3OPrVbhZff0YPhyZ/WHz+8PETBuh2f0jLODHTrm4q + EuMsBqg9KaEMk/j9+m69iFbROsDaZVRhgkUj4Xy2DKX1OxdG8yjGAFtWBWFywsa7upGtuD1ZxmQ1 + jwN82X3B4+UiQHGiqgu0Xi+7AHXpjCbG5PcJa+JprXcVYYKK2bAoK71IZ4Vsb+CUWgCAFKV0bVEK + p5jACI4FOkgPprgBLl1bZcCivMDuCJ5UZmwBUhIYy+Jb3UvlbTyTg0BuKoKWp46+e3vGtKtrZbNZ + isG/VJ4UO2tsMfB9Lwly41mAhRowDOKgtRn53kd2wwoTKIr3DFIqAUuU9WM7goZ87nxNN7RNpezA + GMIPpte19kt68C2vaWpDuLeqOj7T6+oyEvK1sUPZ0mHwxleKtDditOF6Cn6yQZZbT4O1zZtZjI9D + 1RF2lDtP0Hinic73ejJSgrLHIanrPLghtZ+Yn0xV/f/gZ/uzFIc1XTB91RjezVNZVdPAcAn6SoTy + xfUvDoW+92X89hKTiLOQUU9qO+wCzI01XG6HN8MEWVyDARqb0QGTqHvs3v0FAAD//wMAM8Ih+goE + AAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9a9ff2d0d6428-SJC + - 7bfe29d91e6b96e4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -81,7 +81,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:38:57 GMT + - Sun, 30 Apr 2023 07:45:17 GMT Server: - cloudflare access-control-allow-origin: @@ -93,7 +93,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '9047' + - '9335' openai-version: - '2020-10-01' strict-transport-security: @@ -105,31 +105,25 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '85995' + - '86493' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.67s + - 2.337s x-request-id: - - 4516accaac0d04bccb540241a7538089 + - d98bca49710a9416e1e51117937d09fb status: code: 200 message: OK - request: - body: '{"input": ["[{''role'': ''system'', ''content'': ''Command read_file returned: - This task_id is 2314\\nRead the file instructions_2.txt''}, {''role'': ''user'', - ''content'': ''Determine which next command to use, and respond using the format - specified above:''}, {''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": - {\\n \"text\": \"I need to read the instructions_1.txt file to determine - the next command to use.\",\\n \"reasoning\": \"The instructions_1.txt - file contains the instructions that I need to follow.\",\\n \"plan\": - \"- Use the read_file command to read the instructions_1.txt file.\\\\n- Analyze - the instructions to determine the next command to use.\",\\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": - \"I will use the read_file command to read the instructions_1.txt file and determine - the next command to use.\"\\n },\\n \"command\": {\\n \"name\": - \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_1.txt\"\\n }\\n }\\n}''}]"], - "model": "text-embedding-ada-002", "encoding_format": "base64"}' + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' headers: Accept: - '*/*' @@ -138,128 +132,26 @@ interactions: Connection: - keep-alive Content-Length: - - '1151' + - '600' Content-Type: - application/json method: POST - uri: https://api.openai.com/v1/embeddings + uri: https://api.openai.com/v1/chat/completions response: body: string: !!binary | - H4sIAAAAAAAAA1R6Ww+6vJrv/foUb95b9orIqWXdISAiYKuAipPJDiAgKHJsgU7mu+/gf2V25sZE - IRTb5/md2v/6x19//d0kVZaOf//rr78/5TD+/X/W357xGP/9r7/+4x9//fXXX//1+/xfd2Z1kj2f - 5bf43f67WH6f2fz3v/7i/+eX/3/Tv/76e/ucYnq+jTVb7DzkoCG8cqzxm6mfWUBSmG/yFPllXybM - xmoNT99UpwHc8qxz66qA4iFv6L5LCqPrghnBtog1vL+dHwF7FY0HoqWJsd32WrJlB8eDp3uDcXiR - mMtSLDVwDHZfuhvQYozNd5vCunUT6unj0LP+pAjgfere2Atkvp/EtqoBEdKBnr+C2y/qexuDszg+ - CFDMfcK28KtBl8cOGcOQC8bj6BOYjB7GRq99Epb4TFGfQijg/TXZG6wzXrpqVFFCnW53ZdONjQP8 - jpTgI1asXpTRIVNGzVcR976PbKDmOQZKoZzxvntZ/eIxvMi1xHZYcwIMpvxqSoAr1TM+gixjU3a/ - 32B/uFVUz8i+Ylof2DBkN5NIJy01Ftl/e8CaCoJ3Uj8EXWXnIcwHspAJObVLEOkURVL7O9pm5pB8 - I3e8gc/1Dsl8kj/uUlnqAL9d3WHNObUBFeRqgs4zZTi3hrifOEw7OMVnFx9qmwSTny8SjJbshKbm - 86gIvBeh2goXQt052wV89Yg8BXYng57V8pKQw9VwwHvTxPRq+kXCLCusoTubFoLj4xSw4aRrcren - B3z0hjsY/XNQwthqLHzxyyFh5Nbr8LDNE7q7h89guC2mCaGcYnor06Wfu+pcQ3lzZkh4jSYTsmbW - 4VvbCPQQyobBgrFoIIuOPnYi7t4L1v36hq+t9aG7bGNV4uFy9yB3jhjZzFGSjAlLfdgu8IlvddCA - +VYOA3gZJqXO5iAmfdUdPagmjY/vtbJJKC2goATn4IEAvr4S8ux5HxKhkbGD208/GK9pgKU5uPhu - QNdlWtfd4IuXIrpL7HPCOl3yIH9cCHZabLLp/L5P8HM9UmppZ9qPp00vQaZUJfVk4dtP6PQaIK6A - gneJG1RCE2UmSMYoRfdn/nUnq5UssB1jDdve/pUs3cs6g5f0EPBhyXV3mhOUwvb8PqEFFaBf63uQ - DKHKicSOgC26mbxh9uV8rE9DBqbpbAhqFpUyUdKl65dnb6TK59JdsF72eiAouIBq7zy/BMzZK2HZ - 49JtPu+vTc3+HVVTx19qdbvhnthm3sZY+gwI0BZuBsU7dQBUZ3EBeQ40FBe+zEaphBF81pVB9Ruy - XXrXSvJ7HyKAwADC9fBQoPIynzg+R6xnWV0PUC/dB1FcOrhzdT8p8MP4EAmmLAAm66kO7NDR6G6w - 3WpO5+M6fnGmh3ZjVvx9KjO4hyGP43RxqkVwr50KPv4N66gA1fD+GB34du+O7qV8w5pYnDWwlIKL - jds262fzGUBYXPKALLdLH0zmONjy/PoeyXLqv8FinA86uHvWmcAVj5bGPWrwEfJPugdvkwlDMtXQ - FOMDIXL46aen2ytKsHEcfNBImhDvs1N+60WmH776GrSVx94TsaNc62Q6bKtS1b3GxNfry62Wrrhm - UPDtF1I682HQ/C7p4GrlNnbPJg6G63eXgvvr+8E2aOVk3MDUAd62e1D99CnYWGdhCnNouzgZv727 - YAkSWCDSYG98nBKSCYIEB1cSiHQ08qB/EWKBnZvoaJmGjM1IvWQ/vKThlbbBJC/JArPvsqc7CwE2 - STzxlKXSIgSiUahYUR55uP4fdKUHp5rPllJCCFlBRLC33HmjbheoXFqTGjfNSaawDRR1xSfq75V7 - tcTRLgPOM2MY54Xdj1EmNfBQDjPV2Vyxjs6qAMp7tsXHT9UayzM/a2pqHDhq9FIK5vOhD+HzfhGJ - xM07sPJBBK1xMmh4HSyXdIGMoGSTDnFulfcDX7BIlfrgQL4lCsHMDroHo+CT4EMnQpfwO9hB2nSU - HuvomczDYE/wCMyKXvrcdJe35SmgWfZbMku9l4yElzglDMUBSd5+lwgzuAlAzyNCLXA7GKJXbmq4 - 1h9aYsWvhvPtosCNjlIk9PsHmI1MrcHjqnfU0k5esODhnKnTk7XUjTqWDMGzyOB2A5/0+FoeySwX - Lyg3+d359aO7vLhTCq6PZIOYptiAbemFUy+3JcL6XtcMdnlLGcyfwQM1zIqqAcplCNd+x/gtPcGs - 5Fqoxo77JiXjvysfDR5wX35ARPESuJ9hLCM1Ch4IH7Q578fXTk5/843g9lQk5JlHGjyL9IFgmzMw - PSy6/PCf5lsPBfMo8gi8kcaRbnO4J9Nav6qcShXNnWcXDGWzGRQSnlvqNpzBlhMNCHxtz8s63qan - 0JYX2FYgI68B+cYUwasErdelxGhb+P34hGb2p751kWg9TYL3AESDc/BJf9iBcL3mk/J5pSY+giTp - 5+qoaWptMZf++ol/nRsdKpdMxE5KD8ncxEUM1+uElfErYNtJX9S+LWqcuFlvDIOLBwjlDGPjqn5d - RsoshQO5WHjFo/5rbE4cvD4em5Xv1KDJjo8UPk2Hkf2T+7Bp9xkm+N6cNfycRcUg6/opK7/ieJQG - YxL6kwdjSwio7t8vgP+8fQHe5yQizDfGflLouVRKuZF+fJ0wxz0PMLzcEOE3S1nNzFcbYL2CElsg - MNgcxJ8QjmV/wnpj3KvRkZEN/aSQ6K6ERsAseToDrtAfVMtvxJg4Pa7hnAkZ3ovTBQz3qUvh9Jxb - 6nivZzB4l+cCHvvPBYdm9UrGs3Uk4HylFT4hoezZXeuIYrm1hoTuVCaD2CgTWPsZ31+Pol+sK+PA - nPEZNVb92GBPs9Tt5PT4N1+TMdEYnHVOJvA+OYyN0TaVu2uh0J/+nO8v14SdWExIvPJPsKz4q5Z3 - v0GDp2fVZLOL9Zt//LC1MZg6/lHD3XOTU1PkaL8Ui6+r8unpUKssZXc0nwkHc8e+0uOnHpKVP2sQ - P+KJcIN6Yfy+fwzyuzjpFM9XyR1WPQXg/ezRHMUHlzzqw03xXdnHO1iW1fT+GM2vfumeOiewiCHX - wDSPfaoz/uBuK6ftFJ6jCbbEfGLzt81NOMEuoebRx8ZCp3TVM16Nj1isgplbgK24+pcQrjWkZAjb - RAFr/dMdT2jFJKEoYU63ApKOxiaYHHj0wfp86qnTzpjFjyXAqm8IvbDNq19e0xSpvj0hjJtFTEhl - qQQeuQdPvSjdVqOZeROgTUNRt3SqO2xOVwT5035D9Wy2wcRur1pFaUTpsS0+YKbakMJ1vfFeZFuD - VEBHatzqN4rVZQkmeS4hrKWaJ9tVPw73OTnD+FGrZF7xlO0PAlKLlynjS2KfA7Z8/bfyNO8akVpS - 9bTd6w2sPAbR2N5hRatu0eEPj1DVG8ZyvebLTy/g+/MzVhNfgAh41oZHy6l5G/T81RVYylmPtmxn - ugKWxxKquTsRrnEL9qff1fyq4uPGn9k0OJkCdoneUKNTS2Pspk8H3+cHXvkGAjq4RaTOs16S7uPb - 7jSh0oO0nxk9GLB3Z6O4I7AR7zdqr/zKVvwHUO7SVc9WxvTKPhJY9TLFQzv2i52nUAov9o7Ii3Vc - +cur5cwGAdWSNKymzGEWNK/Rhcjh81AR8dDWcPUj9BR8puCz9h+cnO5DZA0/e7J35BBUKb5SVwdd - slwM6w2jxdfwfn/gq5nb3CMg0/6EbdG4JlvEMQuu/oOa3XLoBesKIDj2qYqfTbpjoig+Qti34QNf - j+acTEVQ+bCHwZm8p4NcDdBCHbjnOCRiFoJkEpsjB5/3QKT7+nsypq/89aGkhBaO3YUmU6epCpQI - 5lY/F1bsa3MDyIcwoqd8803Ywbjc1D/9h58Pdwo7j8irvqZaoUeMtDejUdvdCxF1xZuhV5f6V1/Y - pNniLmdDjCGzq4GIB+FdLa9JiuGPX/br+ouitECYO2FJqMxtgmX1P+ByeHnkehqlarqU+xtM/buD - 3fn9BXP7Xjwgn8yJrv4smfxcUWC+CTDVmlJY9ZjiA1N8h+jHl3/qp3ifVHzcqporDnFugbMIajKj - XnBpUe54NVPAEVtwvzVYj6pG5fvlTcSl/CT197EhingIEnqKkAgmia892OnTGZ/RQWc/PfqHb0z4 - Zmxh2/MNxlZnUeO25Sqi783o3/xUeX6yOC1ZwAU+ZZqmQc/qrG0FYAhXn6Lmcgimy/VlAXP/zqhP - lbGnyilrwJ0zLRwdPlW1pONVAj98wzvl2JOAazso01zD9uOOwcqPEyjtU4t69z1XI2xjW2bKq0SZ - eimT0b+YIYwqjf/pu0rcqncBFhPJ0XZ/CPtPJi+6vI6H3nEmunTNAwBK3xBrW52BxUCfCNiKQbB7 - B59gjGtJg/Zd7Ml8S/fgh9eQhJlAoBucqyW+n5uf/0fDvTMT8fwFA1j1DZru+Z0xQ6MNWMxjjfFN - MSteoedCHRJHp3Y7W2z6ytSHOxX12PCNUy/A6GGDU6S32DbIKRmb5IFA1fsh2QRHHszXt+jA/YN7 - Y92/z4wdLrkHdM2t8F56PILpdYl4EApiSRrQPoIltFkJrUsLsfWAW+O7eV5j5XMRMozmIe9nbxtb - UKyLL0aWrrGtMooakNPhRuPQqyu2E57Tz2/gPQx8MF4/Xw4C6/HG9pnH7pJvrz6c+SvDJwcejXkY - 5BqmeBqwqQ29wXbtXKgsFHZkie1Xz/L7pIEY1IwiXKTJpCU+p3qmplLrIaB+fu20RtXApiXqu2yC - Dkv8AO6vzYa6Y0eShQlHB/I9N1NbNLYB8ykw4aG83Sm6u/t++T6QBDffcaL6N2x7ZmNvgdvnEiM5 - LBzwyyfA9hLIaNslmjtE5Kyrv+fpU5tUi4FvSCm7ZEBKJF0M9oVxDBBXLwRieGfEafb2Ly/BKx/2 - veNGA1jxlO7UkFWDMX0j5ec/zFoixnLZap1aeTPE2Kp5Y66ffSOPZXuiltfVfVvdBQIvst3QY6ru - Dd4kIffDC3xY8WSCkj3AXZftyCd2FvbhFj1UBzXc02P7LPv50bIzrB/VlnpzUIO5MBITms3dJtza - D/PkUgs4CH8RT7PFYF6oKSquZIW6ivkJqLG8iHrW65xIT2QZzFSHG/iOMsBeem76+RziWlkC9Fn1 - xcgoTu3zH/1wxEpdjWLUvaFtDl98r/euK16FiP/5Kfw1P0u/CEdeA7cRZ9i2w/n3vDe4wU5c+7vt - pxv7ENjD55Nsmm1cLS81bcB7k56weaXHZPk+NgOMH15Fiv13nwiPGt/AnaQF9Tw96xf9vOGV/WO6 - ExEocfJ9OC6ByJuPOPqIYzKMtuqDbVHtKcLxla15SgiN8Ijp/tRtA5IWcwcqDrtIaucaDFY7WRAd - eZ+e1FwztvG34NXFdGvqAEepRs1ltRosF4zxfYHuvLOlGwxo8cZr3tYz9+45EBh+QY9cUbD2SRsE - J3jeISUCYbAdBm1S8yO0kbT6KRpfRQfe2kFD3Dxsqn71E9Anhyvdm74WCD8+McU6xPu6OwdTupEi - eP2UB2rQ1utFnXdDCA/ZA5tH7LLlc84I4ArtQf98D7hXp9jfi0n1jPcMMY8aHmwvFxkfnmUXrHgX - w1F7OtRxFxwM7sHlwFr/FAcGSsStmvPQ2TTC2u/UHaZEPEMqWTb95W9LWtjCzx9Rfece2cLRA1IQ - N21Qr8iPaimWWAfXT3FY9ckmmYTnZKtSbyrYk4VDxRgTBCjW5Zc6H79xB273WkCeKhu07QOxIppy - HKCa6gPFvVqCxs8XBa54Sh0Do2p+YVTCz9WlRHw0E5vI/ojgM+DqFU/HflRGhCDZEQ9tpqICc2bd - bjBx45p61lsHyyNSznDGX5+wnUYDFm12ukqtc77mx0UwBkhWYFC2LXa5nW6I+WcrwWX73uJjoezY - LL1v2k/PYmdjhsacHbCjpPNSYb2xYsB22wHCi8Ql6HG4fwCjhafDnx+NP1c3YaLkOOCHBzue4Gp5 - cYKvCDq1Ud5slX4+W0sBLwd7QZ/N69Xzh91dU8p62JHNnUbghWHR/fHjvEZRIm0HwsNVv6M514+A - 58urBkNWXvDhog+s3jvzDV6b1sWrXkwGTpugCm47h7qcIKz+LiLiBV6GP/g1Bc8mA8tng5FIeup2 - q16D33gS8e05NS4LyCWDn/fHJjy7NsFceq4PfLK/UnS39X/rnZ370Km+Q6MxiM0RwjDsc2xIxy5Y - rtbLVCtgHRAsYOB+ntBMgXn1TOrv0MlgV3FJIfuiC9ZuXVXNQmwi+NxyKXU4v3JnuzybqsohSu6v - 1GCTvfFK+MuXfny5cK/dDT5ENP7yk4Buns8IAukR033JFINsufkGV79FYCDz1bTtOR1628yj7jOI - +8nblzew+kGM5vrhMpTKJhyIJVNTaw/sh5ewljYIqfKnrqgauG/466/9NWuS7hDGN4hoxbA2pJRN - JkkhEG+EJyweQrblNnkkr3oIe1tIDIZTzYffO6+RXz69KOfLApk9ygSMHQqEvNE8UDCxIuymdckE - AL/88mmMneoMVv01gYdynYlYK3mygMcywen5SbEuxX0w6FV8g2veivdaVLOFCTtHNfdLjw+le6nE - g7lAWKChoc5My4B9wesNDJMKSFWemTvdpzJV13xlXd8yYTX35gH/iMOfvg/oKYgsePa9Pd1LKaxG - p9d8teqzF8WFxxJx186lmmJOJmIIrYSt+hS62MJEnfUPYNNTfsP7iy1ksxOtYBYfQQQdZzjS3S7t - wOgMrQQQp3pka16NisxD48CXP1yJrxpLTzuj1aBtljEZ8RrxoiwefvkHYp2qG9OEEw2u+Tb1xnSq - JqluQkhCv0VQlrs/ehe8z58UO0WrgXEfRwS6OgPY2FHVZcs3riH4dBM+hkUH2GUOCbg+jA8RbDYF - barWFpSXh/THn42HGa15pN+hL1DigPzwmX9EIdluXrtq+eWFAzFlMjs6Yot4eL1VjOLPn3x/TlVi - ARa5PuHjhLHm279i+OKViGL56Brib/427zjHWLa+YFGQ5EDxBnZIOVhvNvrnpACI23hYw/qzX0jN - vF9eg3izfvds5f+fP6b6kb8Y76abapXvpzc9bHjGxuXANCl3biWaqpMZTOwqLcDc1xkSl09TDfGe - g+DDay+aq9ormHsK+V/+jrgGvV2yiN0CfXIpV7zZBMP8GHjIFNemjpfF7vLRIwGe35DiA1CfvSjv - eu+3HtQ248hdnKSRAPvWPRlXffnHD675Oerw0LDZyLZvmEzVC+PAIMm0+v1f/v5n/218tE4KI8GV - kGjnw2//CwEe1He8k5rWHe+Ff1bllNwIHB/jiketCfdO5aJaTE+VuDlrN/UQBBHdSZ/9ur+W3+BJ - 0XO02akeW/WUBNM88rF5vPoB45enB67XIcSn5FYYy/gddFh2p5xma344D/JTgHWt3+nRtBj7flUD - qUTIBnz8tLSil8/rDKXIMRD7Gk01ACVwVNjyLdXQoWTspxfW9cXu4RsCet3jWhln5YHg4nLBul+g - Q1xEKc7X92vXPFm5e+aZ4pt6YLN+DWvIVccndUZu6X/6FL6XOf+Tx/WD20QQV3REyuZwDyZWcR0s - 5e0e7/rymyxZFzfweDwrODqURTJL1Qf99ieJrLRdNclzB3/5EHWeb9DP8bfh/+T5uIpLl13FXQdf - Prlig/dO1WAJYQjDnbTFXphHYEGjYkFr5M7YiNu5Gg68QmQzkinV8qADs/hBAkxztKend4MAX49A - gJuLoK95pN7XTvoYoPZIPNSdr1Mw8a/BA2nu7fFNc47u9jBbE+A9sCChfuBkjJtLp4bvYI/3mekF - yyJ205/5e8771qA6b4Tqyt/4pDw5Y07nnQClpH0hLrgv1cu/mDfYMrvDrs6N/YQ92wIF21YUD8OZ - ke9DJFCf64VwVV+5y/chDmpU8l9sK/zHXeQLiEBFPh+61xLHWF5q2EBgfWeMdLfrl/bq80r3Xixs - rvkns/WiVHgQO/SEBL2fpmTjgzEn19/8sGmbXxYQLHud1PWDJl0bdhrcqXqGf3p3cOyxhj88OGK+ - MNZ8Jpa54CVS83TcJ1tsh/afPObk6rTqbn5aQj2vAXUPY5IMCvy8gXg/oHW/EVfrfnoHLvJti073 - /Mi2k5crkNM2MXXTrE5YKYkKkDpppKjqK4PJeqirf/9OBfz3P/766z9/Jwzq5pl91oMBYzaP//yf - owL/jJ/xP3le+CcV/pxEIENcZH//69+HEP5u+6Zux/87Nu/sO/z9r78E5d/nDf4emzH+/K8L/1jH - ++9//D8AAAD//wMAP557oeUgAAA= + H4sIAAAAAAAAA0SOwU4CMRRF935Fc9cd0hlQsDviyo3sXGAMKZ0nrc70lekDjGT+3ZCgbM/NPTln + xBYWPjjxfe6q+fLUPDVp//K6DqvsV9N1u++WP24/Ox4TNHj7SV6uj4nnPnckkS+TH8gJtbD1w6JZ + zMy8MRo9t9TBYpelmk7uKzkMW67M1NTQOBS3I9gz8sB9lo3wF6UC+1hr3NQ3rCEsrvsHtTGjhg8c + PRXYtzN6Kn/OgTuChSslFnFJLoWchNKl/lmdXFHXYuVSqxJLiGmnEp1UcEUFlzMlaicYNT5iiiVs + BnKFEyyKcIZGTC19w5rxfbz7BQAA//8DALvrL2VJAQAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9aa48f88a6428-SJC + - 7bfe2a24dfa796e4-SJC + Cache-Control: + - no-cache, must-revalidate Connection: - keep-alive Content-Encoding: @@ -267,29 +159,37 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:00 GMT + - Sun, 30 Apr 2023 07:45:20 GMT Server: - cloudflare access-control-allow-origin: - '*' alt-svc: - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '105' + - '572' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - - '3000' + - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - - '2999' + - '3499' + x-ratelimit-remaining-tokens: + - '89866' x-ratelimit-reset-requests: - - 20ms + - 17ms + x-ratelimit-reset-tokens: + - 88ms x-request-id: - - 4b63030d949781709e3e3da468f83709 + - 396e6db6eeae90fd3920af59c662d881 status: code: 200 message: OK @@ -333,22 +233,22 @@ interactions: {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to read the instructions_1.txt file to determine the next command to - use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions - that I need to follow.\",\n \"plan\": \"- Use the read_file command to - read the instructions_1.txt file.\\n- Analyze the instructions to determine - the next command to use.\",\n \"criticism\": \"I need to ensure that - I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_1.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": - {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading + the instructions_1.txt file using the read_file command.\",\n \"reasoning\": + \"The first step is to understand the instructions and the tasks that need to + be performed.\",\n \"plan\": \"- Use the read_file command to read the + instructions_1.txt file\\n- Analyze the instructions and determine the next + steps\",\n \"criticism\": \"I need to ensure that I understand the instructions + completely before proceeding with any tasks.\",\n \"speak\": \"I will + start by reading the instructions file.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "temperature": - 0, "max_tokens": 3021}' + 0, "max_tokens": 2552}' headers: Accept: - '*/*' @@ -357,7 +257,7 @@ interactions: Connection: - keep-alive Content-Length: - - '4440' + - '4438' Content-Type: - application/json method: POST @@ -365,20 +265,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yST4/TMBDF73wKa85JlW5puptbD0jLiQsIAUGV15kk3trjYE9oS5XvjtIk7apF - gPY6f977vdEcQReQgaolK9uYeLV+Nu92SZqo+Tb5YR+//Pyw/py2ZfpVl48QgXt6RsXjxkw52xhk - 7QgiUB4lYwHZPL2/Wz0slm+TCKwr0EAGVcPxYraMufVPLk4WyRwiaIOsELIjNN7ZhjfstkgBsod0 - FcFF+1yfp/cRsGNpLqX5YtlFoGqnFQbIvh3BYph0vTMIGcgQdGBJ3FM6YqQ+wTEnIYTIgWvXVjWH - HDIxFscG7rkv5vBeEGIh2AmPshBco9AU2LeqBwybuxnvWZTaYD9TIKO3mvA0SLhnoZy1kk4KbcBZ - DtFLI48yONJUDW4f/yLf80tN4YZBcC1ZXEBLZ4zbXTs1RtJgEotPYQDsI21G8TPlv3LO8pxisSZp - Dr/wDzCvuILymrXSwV7fHCm0HqeA0o7ZNFW3xsp5j4rNQfRGWJZaaSQ2h2u30KDcTk47bUyP9Pp7 - nOz+J/LA0EXT940DN89H0uKAd+a5SiB9df2zQ6OfvazfAk8QJ5CRJ6cOughKTTrUm+EhIYPAroEI - NBW4hyzpvndvfgMAAP//AwD56qfzMwQAAA== + H4sIAAAAAAAAA4ySTY/TQAyG7/wKy+ekarq03c2tggv3IoQIqmYnTjJ04gkzTj+o8t9Rmna7myLg + 6o/Xj+33hCbHFHWlRNeNjZer/ezDqljtdh8XTz/X/HW3Pc53u1l5/LJfY4Tu+QdpuXRMtKsbS2Ic + Y4TakxLKMU0Wj7PH99PlbBph7XKymGLZSPwwmcfS+mcXTx+mCUbYBlUSpidsvKsb2YjbEgdMnxZJ + hDftl3gyX0QoTpS9hZJk2UWoK2c0BUy/nbCmcNX1zhKmqEIwQRRLT+lYiPsNThkDAGQolWvLSkKG + KVyClwQdpA9m+AmYKAdx4EnlIBWB4SC+1T1g2MwmchAojCVog+HyXNGXbs4x7epacT7JMHqt70kF + x4bLYcj6L6o9tjIczsJMB4FAAq540wBSKYEba+Gsdfvx1MYqHgbG8DnQn1H/Z9Us4xhWrOzxF93V + QS+Sk5CvDdMrbKEmjIi0N2K0CfX42MSh9XRdq+WcfP/Heyq4uIXsEZ6pcJ6g8U4T5f0z9kYqUHwE + UWEbxvcIDantdfLeWPvvH/P5qoNIF119dDndnY1Y1TTov5x5hKB8OXbfkOhrb+33OFeIM8iFJ+MO + uwgLwyZUm8FjmGIQ12CEhnM6YDrtvnfvfgMAAP//AwC356gC/QMAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9aa4aaaa56428-SJC + - 7bfe2a28ea2b96e4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -388,7 +288,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:10 GMT + - Sun, 30 Apr 2023 07:45:29 GMT Server: - cloudflare access-control-allow-origin: @@ -400,7 +300,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '10495' + - '9064' openai-version: - '2020-10-01' strict-transport-security: @@ -412,42 +312,26 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '85996' + - '86466' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.668s + - 2.356s x-request-id: - - 8a4f2cccf4cb6192b63c911d82abd833 + - bb4cad658d2a5f381ad0c0621cb834cd status: code: 200 message: OK - request: - body: '{"input": ["[{''role'': ''system'', ''content'': ''Command read_file returned: - Read the file instructions_3.txt''}, {''role'': ''system'', ''content'': ''Command - read_file returned: This task_id is 2314\\nRead the file instructions_2.txt''}, - {''role'': ''user'', ''content'': ''Determine which next command to use, and - respond using the format specified above:''}, {''role'': ''assistant'', ''content'': - ''{\\n \"thoughts\": {\\n \"text\": \"I need to read the instructions_2.txt - file to determine the next command to use.\",\\n \"reasoning\": \"The - instructions_2.txt file contains the instructions that I need to follow.\",\\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file.\\\\n- Analyze - the instructions to determine the next command to use.\",\\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": - \"I will use the read_file command to read the instructions_2.txt file and determine - the next command to use.\"\\n },\\n \"command\": {\\n \"name\": - \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_2.txt\"\\n }\\n }\\n}''}, - {''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": - \"I need to read the instructions_1.txt file to determine the next command to - use.\",\\n \"reasoning\": \"The instructions_1.txt file contains the - instructions that I need to follow.\",\\n \"plan\": \"- Use the read_file - command to read the instructions_1.txt file.\\\\n- Analyze the instructions - to determine the next command to use.\",\\n \"criticism\": \"I need to - ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": - \"I will use the read_file command to read the instructions_1.txt file and determine - the next command to use.\"\\n },\\n \"command\": {\\n \"name\": - \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_1.txt\"\\n }\\n }\\n}''}]"], - "model": "text-embedding-ada-002", "encoding_format": "base64"}' + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": null}' headers: Accept: - '*/*' @@ -456,128 +340,26 @@ interactions: Connection: - keep-alive Content-Length: - - '2085' + - '714' Content-Type: - application/json method: POST - uri: https://api.openai.com/v1/embeddings + uri: https://api.openai.com/v1/chat/completions response: body: string: !!binary | - H4sIAAAAAAAAA1R6Ww+yyrbl+/4VK+vV3hEFqWK9Ifd7KSBip9MBRQVErlVQdXL+ewe/k9M5LybI - pW5zjjnGqPqPf/31199tXhX36e9//vr7U47T3/9r/e+RTdnf//z1v//1119//fUfv9//8WTR5MXj - UX5fv8d/N8vvo1j+/ucv7r//+f8P/fPX32oyFiQ01EZlBz/q4eb8PiG137Fhaa7SDLm22gTQI99h - kdSphywYbRI9dmWEvzyRwSlFAtES/Zj3tDyKMH1vrsj7dk+VoVvoQdF7BUiPjhsw96HjwYWyGenH - SQT0DEwI8118J2Z1HRmeHrMGPZE/EfmlzoDOapuAhxVUyOXNeZip05XgvIUFiRa6B9PtPGqilckK - Pm3xbhiP/KeFMzm0+IzTo8qEQtnD6us2AbXGZKDyVxUllTx2yLKeD8YARg68sU9E1H4XVfPmebpL - JywcUdaE/MAakjjiWwxhIIwXnzFNucViEzuP9X0JTM/LTgBiKopI32dxTs05Sw+4+hTIu493dT6Z - AoUFO1dEq0pfndPq8oKg216DOhiqil4+nACqh6sGHOi9oRvuqIaHjXAl+qiyYURcQkGlNErAVTod - 2jMRRTBc2isxXlE4LGzpUnjBzRmh7BiprC0aCMU2ztHzQ9SBCtdNAbPBH5E2zU1EF7po8MWQEoCi - 3A8tUqAGkX5ZiPwVLbZ/ad5d5EDNyEPWzwP+JKw8fPrxRlJ9e3FZ2dIMOnVZ4W1x0RmFXGCAB0Qt - 8o/3ozue2zEEi1xfUKiHAlgMxRXhNmEv4uCHUo3VSYkhqj1GktuTd2d8viaw3y+XgO8Xp+KeTOxB - 4qQFyc71hdGgkmQwc9RCxjQXOW++RAG85f2B2IWuqfwJHVrIbzYCbpU7Gshc3EWoPWsFpWezypcD - e59gdPA5om0iyf12Pj9DVTJH5NHFz2fpXWZw0uIBBXdXrogkWCV8buccKb7c5digdQxdNJ9Q9EYv - l53QoYejyVPMl2KY07g1OHhMmw55NW+q1N1oM3yEy4cYb98cRmLuZ5i+4ZVkWl0Pc3A+x3AmoEXG - a/bVXZprLbjehjOWbicfzH62iNBWCwN57iZzKZmDGLz8NETBJz4Mk3F6W5LAITso9x9uGAOJj8FZ - j054aTQc0eR+j+G2UCyk+6E6sK6qeundqQsW/NPgLrdUxAD7AULyEBvVbp0fiX30FB8eLnGXz/Tu - t7tm/ybmU81Uin1ZlG588EXKg6eM5Wejh1933hMX5FU+bXmgQcGuROJotVZRJ3FaeB83O+LGsFK7 - sDgWMI6fJW6Og5NzlZhk8C7WI4oScwHLveVlWOx0H0MvnFxqp/cGJlbDBfyd1/K9D+gehk71wjPO - zhV1vd6AKT68ia2/HHXfl9EG9jJs0BW4t4pFz91GWuMZuW8YR7Tv+vpwjseFKPnVUftPwl4ApVmD - ZJydh1mc7gKY3qlKEA3FiG728x7sR20h+sc4RVTGWQlw00a4HPXRnR2YnmBniwPe2l9N5a9QbwAn - X42AiodvvrDKc8Bs4hkdL37NHq/yGMBLDyVcHgmLZkU9yCBNHAcdZaUd6DxxmWSG0EKxe9YZRe0B - Q/n8IsHmfNVVYhUvCMZq2CITGZ9h8MS5hk/x+kB6K1nD4owzFWe6KUiQxlZOzFt9gu7uKyPV0ypA - oTJk0OXTHilr/0lqnSB8GnGKJSzUVa960AHnEJjBzgBlxaxrmMB4jghxN28czRvlsYEScI5EOWWm - uru4cgLg+bMEoJneFb1M7gt68wkRY/km1ewNnQH1Q87jzpJDQC9FE8JLOOsEJU4GGCfWpTS87zU5 - BUIcsQhHnnikXw9Z6futYvO9s+Bju4cYtHfJJX5/zIAWJDwyz3bEqHaeOYk7MJvI5/ThLvB2e8Ff - f4aiTAZsp/ca/Np7FlSO7ltZU+DpeHKRUrayy6YtO0lTRD3cSBfTnRtcCRC+jyFyvUVwx8UDBfRg - LxIvGp1hjmsrhYf36U1++b+kHsmAtdujoD31FmBhaYrg+5CPwabFfD4XeLcRFO+LMTuzrToHeqBA - E7hyQMtWVtmTq0V44g0xAO394TI4SMEfvFPry6AyDTg9bApSEJcrXjnJ+Df+Uz+Qy26AisFDFIvP - MKLgZEv58vIeMfDKPg2A81Hc+eHOpXSYBg8h6xRX9HkTEri13S74cluV/fAD3mGaIFVjPqDZ8oih - qg4dHtZ8WdSqPwHDK0YsnE01wmp+TqU31Bokm+DsTlwYQ7jnxU1wsMQvaxw5mWFzgSbSxnIzzLts - 2kNOnzVSBNmbMTE8xeBe0GfALZELls5eYsnYCTPJDWyzrxleWhDvnAAvlemqi00UDI3mgZDuXOR8 - zRcFbszXhAGbQjZvlAuElpe5yDTHL+izPsDgtsskZHynkzsWUu6JuXXWkCIezIifglMNkk1eBSwx - z2Ah91cqZeLgE6WI/YgzXlYP4295RW6xPbvMxZ8QhN9aCPbtsVKX/UlVJOYJT/QwXZh/dO3B/fAd - OTv+lc/27EE4yqKODF1v3SnmcQLP6qMhK15XGLWHEXpDToKlYGm0pLnXwufTJMj3tzzDzEsUEUiy - h54DXarFh5IGKeFNcuy+Edh3THXAIjcXzOUDB2iTzwEcDG9PzJETAD5FOgdnqXpi+DJubLYshYN9 - trXQsXlbbF70moMk4F2kGC+5mvT3KZbYpeOJucYDK6PgBawmw0S+Va1Kd8ii8LXcUmQ5cxkNv/p6 - XqhNVGcPGB5VVAIv8nj0iCFwJz1kBiz2Io8s/+S6VPU4C8Q34YWBwp+HZeVn4C1zB/SkYZbPGrPp - gbXj+Gc9x6l7OLAQ8BspqqGw/fx8OiDklRvehyVj+Hbq9oenMmkkW4zI/eE5PFSGG2C1aXLG7ONe - 6t5vHXde/KlY374SqBBDQ9FA2mrZbm0M1/gguvIlYK6NN5W26cchqsYmgMeXo8BS222IP23QwJ4A - lGCtX7gawkDd70xqAWeOZ3LE6dudVaxrYl/Ud5Kv+Nd/+a98KDno/qmn9Bm+R3jKjiWxH203YCt6 - ChA1yoMEamPk+5UfihanfJD+MeZo/iZYg+GHBeSoWixq4eWqQWq2CB2340edLRkGYuUeBLx/+9+B - AGw6wF2kkVh0s1HpGaANKF/6iMVnAVV2PR9e4iJ4mJx2waSSe/1q4FU9qSTRdctlonscJTHDdfAe - TTiMzfcRQnOqbAw3ShVN+HTRgDMnM56HuBmmi3rAMFHtX7ylYMZfNZAcBjWCWjTm81QGL7g1L1uk - H17PiOTnoIePyz4jpmaXFY3bYGWTmkxs/vTK8ZiC9Ievv/UFRMV8Iu0unwRddFOocD8fPHFERoa5 - K3cfSFDS+YeXmBHbiOYrp2EoZIuOjFA/RNOueIXSGm8oc7VJpfuerw/cuT4j++jx0ejsjj18SnBB - WjCoA8+JYwmLvcCTnz6ZlV2EYQbvPDIucKloi40CyIoZEFnWbu5HkOw98DscEtm9BGCOTV+A6jwd - 8RySccBGbWogv44fYktO82d+4FGvOCL3RsFm+bBtwPpR4vleqS4rfoI4NfZElzjqsnvd1uDi6QDT - U2a68+mQ30HGqweiMAWpc3VSEphq4hYLnzeNOmu591Anebzy9RYMLbVE+P0qNa5opblLIt40IGRM - J5Z9o4CV81GG369cI0vf7lQamlEMfnzVza0UcLIVixBmrUrUR2oCvg+5Ozzr5xNKqj3PqHEQIfw8 - tBKlK39kl/L1J5/W8XvV9IDpBvTypsHwcN67DL3a/S+eiHpmT3V0J6ZAralmZFsyBcs3eASwt4mE - 7OQaVQypTfzjo1g0ZjGfL9E5kDBTtphKe2dY7r7sgbpYtkTnF5WxJA81aXZjLaBK1+ZkibkE8DOt - kFHteUDLbVvCYHIEvDTvli30kxiwuAsx8bGgDXtNzWsQW8MFCyLiVMrzisCfAnQJptvURvRwBw68 - oh4jU8RP91ePwPTOVExXvj3jrxvAfR+ZRLPFJJoMhhuxfJkjnrfn3GWeODeggoqO3E9C3XkbdCE4 - OpASE7ScOz0mNkr3o9Mh434Xo5UfnaTHaQsw7nUDdHsLZmCt30TWQxnsFuXUwuyleeiedT6bzjeN - wjU+gk5yGsaMPiqgwZkG0bAvsyU6GhDOUSkhnyvSnPUhV4C9YiASr/pnKuicwe4We8TfhlE1Ny+j - F1+d9ySZu1HcMTjfYsDblo2u96xj9FLgE/g4xwkph5IH43e8baCRhgekXTk4ENNFMrigCQX9UvkV - 6Xoagiy8X/6rHuenOIVYl74IGb4R7XTJdCAobCfYja9AXXaukQByfyj4iyrZZdwReMD1YYNstxJc - ahwoBKv/gJxvZ7qtEAkyDF7Uxx87d3Om7vUNXBEGtwd6q+YriGIRmp8gAFwh5/sLF1owFDh1jf+L - ugg75v3Gj/RKVVQ+fw4CbIloERnwj4g+b3MiPRzFR84L2QOvKecYSkFnIavb6S75gvcdfL6mjjdO - 1Q3zRzo6UKyJiaw1nhbnU1owK3gZ/cG/TT+9QLmtvKBOP4764+uw8O8M2Y/Wrob6soRAj/cRkneB - 7y4duCtwCWiPjO0bsmVrpClIq/lITpmsVHSpcw729iQhWdYOLol23R56w40gK5sSQNFsJTDORwcZ - 715R5+r4MeBtK22QWYo0oj6gnOQkiY+bd1+6i2GHr19/CYr868A233MiGU6nEN+F3bC89GCEDyM9 - ECTK/br+ZwiC1+yT9POmOXsd3g00ZO1IrkE3qHPUZDGUCqMjunTlB/KS55Mkq4JA7PozAhbkowOL - neljsNm0gLym5CQCkl2Cw1Gqq+HVoB7G1LwROaYZY+ZtDMHuI/PBllyu6nwmVIQtGC8rnirqMt52 - LQQgKJHOLxWYpoLG8O6/Y2JfHpuK0LDMgLiDIbE/zzej/bwEUs47BvLIlVTME4UGPHsAMRfjMyOJ - x+2hwHMPonw+arTWfwdqQ4Z+fF39owdmzbwHOya6Fdl8z7F0TOYtFjZVA9jpap3gD+/1GDVgaa67 - Gdaxt8G7VmoHCpUqAw7R62Ap/b3K4OO5gai82eSnV2fjJfcSyN573N/Iiy133woAfbd05XMtWOc3 - AbqtGMSOsKpSz0kt+BlLlWig96pZBKIFv7d0Qul48cHusXNieCD3PWrFEQ+0G+ALrHiDlKSB4M/8 - Hd7hG5lckAwL60P6qzcBHj+oospo1XDVz8jvegjwGs9wfT8Qb6GR72TdwlCZThneYgEOZPNSsIhN - dsbiiierPkng0tc8ytLdALAknws4yItATH7J2exdJA8KQXojv3xko+KHoEcOCOBbxmDVqxt4kD87 - ogoNjej1fZalw0lNiFveMJvEj6ZIfjeGyO5bEbD+fcPQPDtFQNfxTV4sbuDuo/DEf9Svavzp08ux - FPF31ZO7DIWWBM70iduzqebL7nrrYRuGYXCgcKomc3sQoN3xMTGG065aTuUngWg+ykgT6jdbxjYU - 4P1UO0QvG3PYd+JthrDuG+Tnh0JlS3VrwOPCZcS2RJPRtzjswZ4XNsSRT2fGo1vmiWQafWSv7S3N - 6evAq4NmLFK5VQlSm+QPf3oyD+U8fwssCDrpuuK7kE/n896CK18ifm/y+RLPVwzvxfwk2hrPix92 - 8iGhshD03OvEKPMKBdjq3SCK0lk5F0jbVf91HULXjcYYhfsCwqtCiN7Iej7mtk/BUIVB8DYPckUf - aA9hF+o1Ob6Ock7rb4nhWq/w9ng/qGN+umewHU8c+fmNy2UXZ3ARlRRZwO7UbsabHtafwg6a2/Pq - /tF3P75iwJy6Y+nqLbz24h1vrYmoNAzeGymt6BF570cXTdxtaCDUlxr52k2LeMeoLPjTI0de21Zj - eSsFSCbsI783r9EyNfIG7uXwg6xrdHMXp8hfsDb3Ia5I8wJUjR0Kt1nKkVg1MrDcnMdd+OGZcY2O - Kp2SdwG/+0IPDms9mj8d8YDYlEsgBM7L5bpbyv30JV7jjQ0/PrL6F4F4yQP34Fw9AVpe6iKb3CyX - e7jCC+Kmj5Cz5veY9rUDo+MnR3rtgZws/cuTNherJurK5/jhVJZ//MG5vqrV3oqeIkBJA4PvyKUM - d8y1oI24BT1gV+VzOL02YLxpEvH27ada6KcwwC+/3PxgqIzeDgrg1E2Gt/7xyGj9ijUoNN8QBUL0 - UhcvplDCdgOCvgn5qheDhwDyjcmIdo45dSLC1YPmMD2RHeFKXfWeA9v82JH1fXeJ5+cIH5tjiKW+ - PrPl2AYOVDcgCebKBsMCSmqAq89rWFrXZ/L8Mf7jF/gpi6LlMQEM+HmusCB1C2OYeAb8fHWdyLvh - O7DyXc3A3nVvZK3+7hw93RGGyXUiOnDf1RKwewn3ZNTR73vkZA0y9MTdCTmcNETdD68eS/9FlgWF - iEnlvf3DB3/93cmCFIPJcC7BgSppRWlANiCdnSkoSXuOMOQMA2bavgj4tDpXO7UXRNH9SjaxrceU - r/wigOfx0CE1qcd8zo+8BUZZ0IlN9uUwPxZTgPn28ED+o5aH5pOAEl7zAiAZ86NKhalMJF3PKFJ/ - +vOnJ/HlZmEeGZ+KbZ2CE+3nKUTHsp8H+nWPsvTys/Cn3935tbSj2GeShbQ+sao/9fmnHzhusquZ - u28pbIXuReR1v2RnbhdR2ibLC/PYtfJZ0r4CiMrQxPAIPJdFOPcgGJYjkenZZr/xwMXbyljw2DwQ - 80UF2EmHGUvrfFMLywXccfc0oNK+H6bAtu6QCZYfbE72I59OHYBwbMsiCPryGC3DAu4Q2qlAXHVp - qllLzBM05icNqOJNbBa8LhO/H08MZrPYD62oWnsYDNcAOWs9YUsME7juzyCNZ1y+1G29AYlVc8TD - D7HCommG8PK0ZmJW3Ddf+aoCt7bd4V3eaXmvPIYEPNj9HExdfwe9GwkYXp8Gwtyf/ZKhvEv3wXVR - MD21iuX46MBANt9YFK62usYjBeYRS8Sjcch291lvYYA35a8+AJpLbQ83ySYIuGk2Iop9SwD1e79D - f/iBfSs4Mduc3sjfpX5OH2izAYmTFUT7fEA0rOsnMe51wDvSpKyjYz2C4V3UASONzFgSuwW4ocZf - /SpVxU7i9KBk1ptkyaVS2evQ1X/4ypzYTj41tW1AlKYNCrhDGS2nvBhhAJsb8dfx//Hfi6Nhod81 - t3jgDkc/nIlvtgmYvQDXYOX/+MeH2GJcjZ8fFexs4cPmXq0hPAa1gU6BwOUM7B7zn/0LSy/FP/4s - fIsnGLT5d2BzXMspgJxhIu3zyXMKPBVKwl5mf+rtqic5eKttGmw698w47fEQYTapCQnSRWEzOLwC - Sdttz8HHe++jiS3v9LdfEOxvyr0iZKTOodlmDrK15qIu93arwNUvIaFnxPl82HgGPGZZtPq9/TA5 - n9KRlPYGEOLVbzXJujxC7/ktg02QHdm8eKyQVj2Cd1fqqTQU9gpMqCIgT0mbAd/YNzwsrp0E5WFL - IzrvXAVuC9lC0W3jqNP5FaXQRfRE5PpaVfhRxAHcE6z/8aOYLgj4N158+A7XYThTXoPn7aYIgB+E - FfMTUvzhF+rx1OZsMm0ImavtUHxxvXwWm+AEbv6Kn89jV9E7cVN4vXVn4gNhB1j23nFQ4PcPcmTv - zF3a6CxCZ1PpyB/AMS+7gXvBn17ZRccCsMNmn0CNnwFydpnOllc0bMQsLC6rXpLyxX5NBWzIpiNI - SQKXP/h5D7f83SP+vR+q8e1faxh7YYe57btmy2YgM+hCs0b+UBhg/6s/jXIIkdIOQb6Qy5n++BgK - MimtJl7wLbj6bShdx0O+XMhJP71l+iGolihuRbiAvRIs3mcbDVsn2cOrGqqrHgcu48/b8jBpyUD0 - xy6Lxtqfsz9+wOpPuGydX8neDe+AM6TIZXkjloe1/R9eVyybIw1OSnVEapbXAxWmPgFH81IgvZE/ - Od0d7hwQDr1JjstFy+ncTBRcEEHrdR0x/JXu4s+fkeRyVFvzPKTwOmIDKdnjorLNy8Hwcx1qJGs7 - I1rzhR6ux/uN+M5HdKnnnCzw299SvAoDTEtbhGHgcMTbKGo+9G2biPFNfBHr0Wcqs/3DLK77s0GY - iajifnp9xWtiwtt7WPWCA/TNfoeFMv9Gy7OOHOnv36mA//zXX3/9n98Jg6Z9FJ/1YMBULNO///uo - wL+zR/Zvjtv/m+z/nETAY/Yq/v7nvw4h/N0NbdNN/3dq6+I7/v3PXwLk/5w3+Htqp+zzP278a23v - P//1/wAAAP//AwBJaieS5SAAAA== + H4sIAAAAAAAAA0SPy0oDQRBF935FU+ueMI88TO9CFj4QxJWCSOj0VDJtZqraqQpxCPPvElGyPXAP + 554h1uAgNF5Dl9pssTqVz+XT3fx+/vb4snyl4WvYHday3i58CRZ4+4lB/xaTwF1qUSMTWAg9esUa + XDG/LW+n+aIqLXRcYwsO9kmzajLL9NhvOcurvAALR/F7BHeG1HOXdKN8QBJwRbG0cHVf+cyCsvr2 + SqrpaCE0HAMKuPczdCj/1p5bBAdeJIp60ksjkyJd+ldivJFBFDtrHkyPwbet0carIdYm0t4Qnkzj + xTQ+JSSsjUQKaLrB/F6NTBMYLewiRWk2PXphAgeinMBCpBq/weXjx3jzAwAA//8DADd8MExmAQAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9aa9c391c6428-SJC + - 7bfe2a713f7b96e4-SJC + Cache-Control: + - no-cache, must-revalidate Connection: - keep-alive Content-Encoding: @@ -585,29 +367,37 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:13 GMT + - Sun, 30 Apr 2023 07:45:33 GMT Server: - cloudflare access-control-allow-origin: - '*' alt-svc: - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '121' + - '1006' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - - '3000' + - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - - '2999' + - '3499' + x-ratelimit-remaining-tokens: + - '89839' x-ratelimit-reset-requests: - - 20ms + - 17ms + x-ratelimit-reset-tokens: + - 107ms x-request-id: - - bc3576feee112cf82bb99b1ab4530d16 + - a2e25fb58e91f62bfd8851675102b189 status: code: 200 message: OK @@ -651,33 +441,34 @@ interactions: {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to read the instructions_1.txt file to determine the next command to - use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions - that I need to follow.\",\n \"plan\": \"- Use the read_file command to - read the instructions_1.txt file.\\n- Analyze the instructions to determine - the next command to use.\",\n \"criticism\": \"I need to ensure that - I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_1.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": - {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, - {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead - the file instructions_2.txt"}, {"role": "user", "content": "Determine which - next command to use, and respond using the format specified above:"}, {"role": - "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need - to read the instructions_2.txt file to determine the next command to use.\",\n \"reasoning\": - \"The instructions_2.txt file contains the instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file.\\n- Analyze - the instructions to determine the next command to use.\",\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_2.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 2801}' + using the format specified above:"}], "temperature": 0, "max_tokens": 2344}' headers: Accept: - '*/*' @@ -686,7 +477,7 @@ interactions: Connection: - keep-alive Content-Length: - - '5473' + - '5444' Content-Type: - application/json method: POST @@ -694,20 +485,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5ySy27bMBBF9/0KYtaSYVf1o9oF6SbbIF0UVWHQ1EhiQg4VctTINfTvBS05Duyi - LbKdx73nDuYAuoQcVCNZ2dak65tHc387//lt3pkm0H3/rO7K5/bhS3e7W0ACbveIiqeNmXK2Ncja - ESSgPErGEvLFavNx/TlbLrMErCvRQA51y2k2W6bc+Z1L59k8inVB1gj5AVrvbMtbdk9IAfLFYr1J - 4Cx+bqw2CbBjac6l7NNqSEA1TisMkH8/gMVwEvbOIOQgQ9CBJXHEdMRIMcKhICGEKIAb19UNhwJy - MRWnBvYciwXcCUIsBTvhUZaCGxSaAvtORcCwzWbcs6i0wThTIqO3mvA4SNizUM5aSUeFLuCsgOSt - kUcZHGmqR7eHv8hHfqkpXDEIbiSLM2jljHEvl06tkTSapOJrGAFjpO0k/kr5r5yzoqBU3JA0+1/4 - B5h3XEF5zVrpYC9vjhQ6j6eA0k7ZNNXXxsp5j4rNXkQjrCqtNBKb/aVbaFE+nZxetDER6f33ONr9 - T+SRYUhO3zcNXD0fSYsj3ivPRQLp68ufHRtx9rx+DXyCOIJMPAUNMCRQadKh2Y4PCTkEdi0koKnE - HvL58GP48BsAAP//AwC/PogcNAQAAA== + H4sIAAAAAAAAA4ySTY/TQAyG7/wKy+ekapvdbpTbSiAoEogDaIUIqqYTNxk68UQzDm2p+t9RmpQu + KQKu/nj92H6PaArMUFdKdN3Y+OFxN/9w16Tl07vPb16/eru0T1wu0+/v03T6EiN062+kZeiYaFc3 + lsQ4xgi1JyVUYDZbpPP0bvqQJBHWriCLGZaNxMnkPpbWr108TaYzjLANqiTMjth4VzeyErclDpjN + ZotFhFfxa+J+EaE4UfYaSubzU4S6ckZTwOzLEWsKF2HvLGGGKgQTRLF0mI6FuFvhmDMAQI5Subas + JOSYwRAcErSXLpjjEpioAHHgSRUgFYHhIL7VHWBYJRPZC2yMJWiD4fJc0ZWuzjHt6lpxMckxeq7v + SQXHhst+yMe/qHbYynA4CzPtBQIJuM1vDSCVEriybpy1bjee2ljF/cAYPgX6M+r/rJrnHMMjK3v4 + QTd10IkUJORrw/QMW6gJIyLtjRhtQj0+NnFoPV3Warkg3/3xlgoGt5A9wJo2zhM03mmionvGzkgF + ig8gKmzD+B6hIbW9TN4Za//9Yz5ftRc5RRcfDae7sRGrmnr9X2ceIShfjt3XJ7raa/stzgXiDDLw + 5HzCU4QbwyZUq95jmGEQ12CEhgvaYzY9fT29+AkAAP//AwAwBdt+/gMAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9aa9d7a3f6428-SJC + - 7bfe2a789b7296e4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -717,7 +508,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:23 GMT + - Sun, 30 Apr 2023 07:45:42 GMT Server: - cloudflare access-control-allow-origin: @@ -729,7 +520,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '9467' + - '9322' openai-version: - '2020-10-01' strict-transport-security: @@ -741,43 +532,27 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '85995' + - '86458' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.67s + - 2.361s x-request-id: - - 10ccfb983a68d5f1e7b284af0cf6d8f2 + - bfa86d8ef9949f22b8e710ac50b2fcf7 status: code: 200 message: OK - request: - body: '{"input": ["[{''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": - {\\n \"text\": \"I need to read the instructions_2.txt file to determine - the next command to use.\",\\n \"reasoning\": \"The instructions_2.txt - file contains the instructions that I need to follow.\",\\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file.\\\\n- Analyze - the instructions to determine the next command to use.\",\\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": - \"I will use the read_file command to read the instructions_2.txt file and determine - the next command to use.\"\\n },\\n \"command\": {\\n \"name\": - \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_2.txt\"\\n }\\n }\\n}''}, - {''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": - \"I need to read the instructions_3.txt file to determine the next command to - use.\",\\n \"reasoning\": \"The instructions_3.txt file contains the - instructions that I need to follow.\",\\n \"plan\": \"- Use the read_file - command to read the instructions_3.txt file.\\\\n- Analyze the instructions - to determine the next command to use.\",\\n \"criticism\": \"I need to - ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": - \"I will use the read_file command to read the instructions_3.txt file and determine - the next command to use.\"\\n },\\n \"command\": {\\n \"name\": - \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_3.txt\"\\n }\\n }\\n}''}, - {''role'': ''system'', ''content'': ''Command read_file returned: Write the - task_id into the file output.txt\\nShutdown''}, {''role'': ''system'', ''content'': - ''Command read_file returned: Read the file instructions_3.txt''}, {''role'': - ''user'', ''content'': ''Determine which next command to use, and respond using - the format specified above:''}]"], "model": "text-embedding-ada-002", "encoding_format": - "base64"}' + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nAs a system, I recall that nothing new has happened since my creation.''}\n\"\"\"\n\nLatest + Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], "temperature": 0, + "max_tokens": null}' headers: Accept: - '*/*' @@ -786,128 +561,27 @@ interactions: Connection: - keep-alive Content-Length: - - '2083' + - '741' Content-Type: - application/json method: POST - uri: https://api.openai.com/v1/embeddings + uri: https://api.openai.com/v1/chat/completions response: body: string: !!binary | - H4sIAAAAAAAAA1SaSRO6Spfm9++nuHG39hsiInm4OwYFZMhEQNSOjg5ARXBAhkwgK+q7d+C/ojpq - 4wIzQJMzPM8vz3/866+//q6z6pb3f//z19+vsuv//l/ztWvap3//89f//tdff/3113/8Pv/Hyts7 - u12v5af4Lf99WX6ut/Hvf/4S/vvK/1/0z19/Hw3JYRmNmDGePm2CkP/dEY/auTupWfFUFHF6U+W0 - fbUT32UybI5Lxuzp21cd2voFGtpOZbE9xpztskoGPU/feOqyrzGgs1gDyXOR6GTo0XAg1wEON3Yh - 2tn+ZJyEsYqq4iwzHxaK8V2cYgAmrA1mhTrPxk2tq3JenhyyX4V1xk/2pUabtfplp720dl803VMU - BYqC19JBa6f13bAB8Xak7GyLVceKRylvoN4SsnMdNB5WY6Jc9QUi6uNr8+F61xvFXrwoM2z48MGd - cglUOW5JclVXaDhvCwlt79sQK1v1w+l0egEAi9bE2q18d7qpbY7GfBcQbbf9ZpNx9Z9IeIklMfTd - YEyrdF2C5z9ChpefUzW863UM2HlV+BnVXTSuQa1RgwMNL15ciDo7DT3l8xpCtt+mH/cbk7qW7mp+ - xJDfVgZTl9cS3WIRM/0SnNHUklsAh0tlEeP7KTkvv0kBF1u4kODs2e7kLlIdShwvyPYOTz5ode1B - 5OkuIfV943aVfJMA7uWZ+S4so/GzYR3a7gPCzlnGo7r6pDF6hfmKHd/1vh1cYX9DD/3AmO47Tz5U - yqCjl9VaxA/5yXgRnoawyJ9fcnRTlg1rdX+DLLivmb+4my4bF/sConUdskA2Vhk/TzRFg/M1qSij - NuLeq3aQP6gndnXhHvFnX9pQgCwQ7XE7ImHPViIoL9Yxo3B0YwVJmcPb/074TLDv0vaFFmDmdkVS - 7V2gCWtVDdslNZhfXBcZ+96+NVDDO5BQ129tvztECbgH5BGVR4x35PER4XZ5nci23rdZLX7WNnSt - NRJHfooutZ/NBIFPz8w/FGE7tFl7hvMVh8R/tjqfXvezAMlrsWNbT7ghWtBLDLcb/7Jd6MSI78xp - gAutcxLrGonWn1x/AqKBiOszPbvj6VMl8MYmIlYOR3e0k0MOfKXuiTVGp5Z7RM+Vwd0Soklcz6aW - JCH63B87CifbjPgVCwMcN9KKkI8uuT2qhVLpDwuGy+Szazm6ZQsUNsqB7O7S1lh3UiApXl3VdJO6 - ajYcm/G9RNV1z8yocDiXxsJU4vrWku0j5NXo544Jp2wxUMmrt+7QT+gJx6VlMW233UejvUQdRLnH - 2f4U11UT6IkDfooj3Nl4iwTfvVDYxwIi8eCtsjHXdzZI8YJSMZAfaHqcqQPUPlfEKY11xtO92MDt - 8jlhaTd2EX8oVwfwqJ2ZBf4mmvNThuVG25Nwjkfe3GNHYfebTYxbrxmD+t47G+31+DKv8bZZ9/qO - Z2RYrydxz+UR9U+XxHI3ThbTzag3eCpOOgoqK6JgKKJBM+UgorsjbLAwxwM3LlvhFw+0PG/Nan1k - sQiitt7g9jgF2UBvsJCpPtrEpRFxWbmwGjiXL59hxS74UKIA4NXLEdHRsWk5wx9T8Ranluhm5BtT - hvoQsOg3tDmyBX8d34MA5140iLmpTT6eIM/R+xZVxA6eGWJC4oVIN98VI5ZkutOxuKiwcpsX2c35 - x596lkD6XA8Y2S7P2OX+EMAWfIfybMMzyhJ1AaeAyXhpvh7VHF83lMatzfQC61wY7uwG/LJ/MOx0 - XrTW6aBDGJx3eFM9KB+dVtnC3Q11Rm5LanASq2+wl/qaitHl3E7KJxXhUZeIClfybbs2q1IFGcGD - JdKxjIb38qgjiKOS7BSxQow3ugQneD6IsTzs0bgNahHuquERfa/WFRdbJsFLNzpml6Hr8jVDBWyX - gczce1Jl/CKEAIb1eTLrPEm8qcsyBascVbI9TXXL5nxTppL1uLbcyh2fkt4h0fu+iR/EajYa0q0E - ozIM5uH1CfG0VCUgXoDZSV477XDWhQRGkyZYPr+Tdrw0Toq09u3TX33hahcAityXjmV6Kw1u9MoC - 3DwaaKrdcTWi2E1BeTsrPK3Doh3kTgmQPnkNU7mnVNOtlUOgvtsxlQU71ClXPQQprl5UTtU+42vz - EMAvXqwx3aNVuQ48qJgyEDuY1Gzo94dE2aeGT8zQFIy+OzUNzP2Ivt9Pm3/dk5jDCL1GtHRVtlPF - FjGEzqnA45owY4zL3kNH46oxc3DfBiMBy0E8SVesPNuScxJ88l+9JjtR2vC2k84SiGHnku3S37ur - cfJS2IwZZrq+D/hcbwAd7k6JxdB/Z9PrHgjKZjdaLF59Rbd6tsoTDYUU4eO9WVasXAoYcWG/JqYo - bFxambEJK1WU8NQsbGNcqOsA5FBdEu2srdzavQcJaK27JfjSF1nv0diBz9F5YLn9OJGogLuQi8M9 - wYqITy2/OEOnVNtlxvz45ESry/EQQq1ONfGNzTGb40tG+joF2h0pqaZoqkzlqgMih/ApuP32dQDl - 29cBMW5vjPg7C7aKo0oucb1c4+1nKkLFCMWM6WStGnwzvNXf/hEsLZ7V0O78DvA7d8k1UaKsX1eL - VMZovSP7Tf6c88/JYX1JPmz7zQkStZSWSA91nan4JrXck54UVok/sd9+TwavAvDjt0SR4gd8ksQs - kXeFFRMyGZ7BH9EkQZ3sS2Law6tlltUH8Cq/V+aH8jcrysBx0NbwW+bm39qYtl5tQtL0mBgXTzI6 - A04BPKidMNdnZjQeHjZGbt5t/vTPITmaKbgfRshefHWIPc+jCiFZIDxYNHKnaK8KEKnjkQRdfs6G - Sr7Jcv04X/CmxE7W0VXagL1U12TvPY1IaCfNhLn/0G/qFhHfTdpzvfQFl6XK2UBT8nAL8AybErvf - pe0I2pgox+XOouPrceecoTIH/bAfyKG77/kQ4CoE2x+PzCFm247f+GAq5eUYM9/sK+M1msob5PxS - McsiBhqOiXqTTypkWJq+frV6v4YamVmfMptNRzRdQ6VAVeCF7LR5vo1Zv+rA5NuNntJI4+Puvc2B - fvWYudQD3nNX8xRz/daZUzr7djVO21QubwtGtktT5Px4XtoQHQTCUp9bBgNvL0E3Dhbx1G1d/dED - JzdZ0v4qtO7zMjRYvo8hZVvykI3pcHknsGfrgD5XX9Hg68xJ5bXdiSw9v5Nqim1zQptROLHb3dFc - trsGuULW8hGPpIesm+8HR0N2KDfOcdVf1NpGu+Yq/dmPbrh/bgAxbZmhe3o2WksNK+Mptpj2PPYt - /wyog5V5i/BqLNuIzvEKXVvIzFP0Kx8yS84RNfCB7Qshapl+TmMQtmPNtDmfusEREwW39ZWctzeL - dy9YLWS47wj+npt1xu6EBajPh5xa9/Jm8FKrRXDckJG9dz0Y480MJmXOb6LeVsgY00aQkFsWCvGd - oTeY3hs3UBBIuK0VtRUKvzUhsLcK8wUUGRM+7xNYTOeeeFOiVmKQFinKD+zCPAepxpSFhg5a0yOm - +uE7++ljOEbtl4q7jY8mFOg1BNUuYn5NOzRE0SMF+8xbZirtwMeuiwZ0ek83tnVvgTH1i6GD+Bwa - DIdi3f78ljzXOwqzPhnqXgc09ydGokVqDFUgNCh0pi8d0vaG6Pz7lKk+2Qwf74h3/PDVIVfeKrM2 - T9P9/X/QR/5g7mknoHHzPuagxmVNPGU/GENQ5SkSS5mTfa9dkGA2S4psnx/xEtduK6iyHMhzfpBj - Eo/GBO86hobmX3K4ZC6ahnOhwwL4gbZ3Mhr9U9Ipeh7qnNhsWqFxR94TrA75l6kh+7gf/502yIHi - QE4WqRAXlrQE23ZlYtmvqZrfbyjrh+eGYWsltIPvHqiyAsmim5WbuNw/PEqka+mBbRvxXfHuwT1l - n94SXByOQ/T1d126Nmy9J/j6ldyxdHIRljyjWPb5x5ge/SmEdEV15h9HzRUKaSOBe7Ju9JefP72C - 1KUl4LYWacSf543+x596E8HZVK/egJCtNXhD40U2vp6eCRJbGsw8VTGnqLIwqBcqY/m0P7a/94+2 - dzMklqXds/F+rRcoKZKGzno/40KzOitbQU6IPfurKVkJtaJeOpnsd+9Nxja2lKDOdFS6bvIl4sJp - EKBz0J54izyu+tFcvdGY3Fd0PfU04t/bt4HGJlfmzv6dfaYigNXh9iXb/n5BfO7fSN0mKouuURPR - X7w72fXEsG1WWZ18lyXap3nCbi8eR99GDXP4+eXAzGg1qkUkQGwJMnE2zj3rX5KCYRxRTBx6092u - n/gT9IS7GBVdaEyz30GLZInnfm9m3BF3NVz9oSbbRjQrgTquCOdeMIh/2u4qJo3FFj0s5ODPvVy4 - bbxTt3Cl6YXsxmzjDr576TazPiW2YDzbpl5RQCszjzA0u8mtLwYV0Zx/RC2hjwYlu3ZIaxiii8nq - 0LiVjS2Q9dXDaPGxqmnoD45M9eMSr797FI2y0tdg+HhJh0l1oyF4qQtltQ464hLdQatpH9egqTYj - bhO83JGl4KFRK7aU3/qHy/U2keWZd5Dt7Oen5VEd/uh7/44ffHy6JEGbUTxhfgzUaLXYnlOIrctI - 7Oiwdr/LnSOhuV4Tsscxmi4bjMFxA0b8QzFVs9/OkanZmF3ehzgaN4dnDNRSUuLz1zbqS2+fwiJR - MHFW+NOy2T9DuHMxXjPF5lOzkd9/9N5OU2j10wPK0TAkujDLwOWe1HXomRvqr16j4QAH5w8P0VbQ - tEPyXRZw3MgrOrl9hcZxOIlo9p/sBnKeDa5lmj/ewOLH5cX5eMA2rPlGYISqpTvmxtgp836zPaMr - d7q89w3c3+GZTutQbblTaVv0Cm8rKg6uaYypljYgnuQr2y33dcSPu+kNxUG64/ykxcYoem/80/Ns - V++atr2TTwBx2NREww/HYMHiGKNhNBHdHOSRdw1rVNQ8t7vf+oq+LPwGFtcZXk1QGoMRKqrc+ILE - jL7x+JQeghKG4uqwfXbbcvFA7zd0WWQIrz6IGp1MvwKgus3wwIIX6oWiE2Dy44lZ76J2B+XqBNAV - zGemPezaKVMOAqTseyXYecnu2Gvrm9ws2sOf+w0zn1KKbC3TWS8ZfUyKRjHHxw134cIxplX+NCHg - /ZZsr/Y6G7Jb4MDmnUp//Op3sT2fARnhg3nd6lTxY9p2YB5ikVzbzHZFJVBsoBsPSKc3YzvS3fkJ - RdLWxHN61A5Txbd/6st+FdrZ8Kq3ImRuucNPEdKKpaKsQq48VSouFquMiec2BrgXZ+KcAgsJUnPD - oO2OGjO+RdDyWV/AEQeULpafdcuO56UDSnu0yD2l93a40FWDskeyZ4aQhtHgi3UBctkMjOSXncFP - vM7l9zK54qWLArerNXDQo9wUbKvJV2Oq/TFQjJocGU42YTUFZy4qV72asMzGdzv61nkBzdPc4cfc - r7p22psg3aeMWeRSGL1a2il4m/2FjgQVfJ2vn8WvHmNRwoE7Na/9Deb+iuVzb6PmXbgxXBcfjTlr - pHMxC243ZMIQEyytp2haRrEHsx6jSmMbraCh5wDNRhPJrml5Rd/iV4drrwQ/f9ey2e/D0s9+fhkb - 62vVS+gm+zZx74kRienhXMIwbhFehKJdTdL+tECBhBK2F18eWnXUjoHy1wMvr92+5XZRqtD44qzX - 7LEd08SYYOaldEin0S0lgU6ywLiCV4po8NGoXimQw3dPRTfB7XpjD7Gy25kLsrOFd8QfzfmNDFvt - mRecNXcch7uIhtc3J/v32ayGQhqlP35S3Dzitn2dxIV8CnqZ6b4RVrSVexNOu/7J1GspVrR98QUY - tX8ke0nvUWM4mq4EVitg9Fgm7bj7bHSQWCUzf3L7lln7i4ggjU3McShG/H6IRKXOjgLZCWHAB9nI - dPj5ddwGK2P0BrVDKu6exF/c3wa7ocGGcdzE5NffW9s+TVDiwSLH8rHOBsW+2JAnqKXr7P2e+dKi - gZl3s7R6qhmHgusogkvGvPactOxavWRQN5uSzHza5VPU4429zFXiH8eHu/p+jjXQrxqz/XPBOLOs - V4CE5tVSubpiV/rx1p9eWVze2F1f7l8BksDRyf7A7xXrxGcCl2XgEzM96S3XNZj1pBXRZThZaHW3 - 3EAOdLllRneyo9XyAjo63O2SjnpsIPZY7jvYGqQldla+Wv7dS4U0TskVS5JJq15wiCNb3oPTzVo6 - RP0XzAn4bQIqo7Vc9d52lCDPHybRV9KX8/elKpS1f11iGuiZW08VMjfa7qT94a3jwWkKMGGKiWfj - 5493ilDt0ifTn07adq0pBzDpO6AvcekZo1BeUpBezzsxHtd3O0idPqCnWZszb7f4UDztAU6iYrL9 - 5jhF/LsfSjj3Z5lYg/WKpn3QOaDG1p0Gh2OQTe63ltHsB4k/+/lJluQzrA0rY9qLPaPx1w+poDZE - bxa10Xm3JABlB/6vXvEpeRjlf/HvYfhU4zfsCnnhnTW6Wu7taLV+NbBJHOuKP9JBq4b8mphw3Nw7 - vPGeVdbmD1P86W2sKDsSrcPODdHD2jhs+zQf2bT8jjLci0Yhcz1u5/pe/Dnf2MKncKd6Y//hZeTH - IwfE8gTYM5PJH317ULWFMvcfosqOVQmuPdWyUkBFu3tYVgNHboA+d8BEZ9rkjvdrAYp/GR1mpR5r - +ZY1BZrrJ9lOm6Ca2k9dIn0t7IixtvzqDw883PoL89t4hcSZbyo//fZK4tH98abfeuZ4lyCb9YCt - CIF+Z6YVbathkp4e+PfzC1dUGTP206dLQ78x9dWFnL23UwqPerHF0muvo8mUvuc/+UcuWeaOhKcB - HJZDxDSCVNSZXRfAN1EQU9ONEQ0/3uCM3ZOo1zKZ9T7C6KFHDCvX/szpTW1vv3gj5nL3aSdTBBH8 - 1ItI0tCdKzwqk8LMZ5irZT7nuvH1EMmNnqIreO0AoyorG9MLKKpFHFVLn8qykAoe2zrbSzbln3cD - 6r4MqeAJNz756mgrq5O5JQa0BZ/OInNQngPB9cyjKcLJXO/Yk6k8InwdG1kBWYFU4jrLAk0yOTtw - uRgahkO7q8aPogpIPpEDsZr8jkbneEpRptIzhoJV7SDpRYl2Kb2wrYcOxujooaicssfnxy+qxlc3 - NurL/YmKn3Ufsc49Fmj3it64mf0ORRXByD+qGkvca8H51rQD0HaDg9ncL6hZSSE4zupN9ufx2zbc - sme97yFm9UHf/sm3n34uZl4jvLtzDmghf5gVHddoVEv1jELnWLA5/oxJCR8UuqL3sVwliPeWEtew - 0gKN3D/7JhuM5XAGb+Ne8Drc79pR/hD4nXcwktJ7NeEkn9DMg8l2fHcuCxbXGO4FEehmusnVzx9B - Xh4dgud+uBJW1wXM53fMm/eTlwvSwMUWL1SKzt+oq8DIIXxjm3hKUEaj+Bxy1Ny0gsz9pZrPK96g - Z8qV3ehWR/wR4gS2+5AwVVpXLsVaNeduiMnMl42hkhMJzbwfo+riR0PhV+YffunuX44xnmQmIyw2 - nGjGx476JjqpaGipSrn7rX76UgczsZbEZW9e/eEZg2sSRlxB4WxagI5UOWmZmTUf98dv4GTaPRWz - 5mOw81UJ4FM8VaKft7P/LXAD5Wa5Iyq6di7/8nSAqYeKnHar3uUbyxjgKHQVpZc3Nca19Clgdyws - 5lw2aD4PDFU06xNmyqjN6FlWw18/J9aOpqj/hs8C1gv2wKvT59hOxQk1QBfCaebndjVlykWU++ld - ME34vt2hKHMM/OI+mKEtFDSIn6UDnuFQurHUwOgOn0IFM4EDm3k1H46JnYPmLHLifDUtExG65Oh9 - sQ54KJJnNk1swEqfTznRjfwYdfQGACfYn0kyx3svbh5PZdbjRPMtHg2s+JbwdIUzRUpsuFO5e7zh - mywRFaPCQcILVoDez9j+c/7S7NEE6HsSfbyp0rwd1u05UPSDO9DK/Vbu+H5JDZr1I/O1qIv6TvPg - j/5XgrjIJtmv3pC1tCfOT28a1esM7Z26M2+J0fjEdS5vHvjNjNq88EHJjhQ5I33SSQ8qg4l9nv94 - Hl7Qbcm5j9wJ/EE/ET8+NdFPf8oefXvMdZYqWpXFVKD+tNfwatN9WuaWlg749dmwvXcdje9D2Nib - wpT3M2/MOZ/1B5SbCrBYa3suajslhR+vzKiXI36HpkYzX6T9/LyZNy2Uv39TAf/5r7/++j+/CYN3 - fb295sGA/jb2//7vUYF/p9f034Ig/puJfyYRaJcWt7//+a8hhL+/bf3+9v+3r5+3T/f3P39JCP7M - G/zd1336+h9f/Gt+3n/+6/8BAAD//wMAUfv/wOUgAAA= + H4sIAAAAAAAAA1SPTWvDMBBE7/0VYs+ysRO7cXULbQ+55FYoKaXI8iZSa2uFd0Nqgv97ST8IvT5m + hjdnCB0YcN6KG1KfrdanRVvuHp52zpWb+8dqW09lU2zaffM8gQZq39HJbyN3NKQeJVAEDW5EK9iB + KW+bRVMVq6rWMFCHPRg4JMmWeZ3JcWwpK5ZFCRqObA8I5gxppCHJm9AHRgZTLmoN1+0rv9MgJLa/ + kqqaNThPwSGDeTnDgPy3OlKPYMAyBxYb5eJIUTBe/NesrOKJBQetNmpEZ/teibeiIokP8aAinpS3 + rLxNCSN2ikN0qIZJfV8NFHO1/Zf9yeUwa9iHGNi/jWiZIhhgoQQaQuzwE0wxv843XwAAAP//AwBZ + 1nVcfAEAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9aae9deb86428-SJC + - 7bfe2ac2bb0b96e4-SJC + Cache-Control: + - no-cache, must-revalidate Connection: - keep-alive Content-Encoding: @@ -915,29 +589,37 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:25 GMT + - Sun, 30 Apr 2023 07:45:46 GMT Server: - cloudflare access-control-allow-origin: - '*' alt-svc: - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '214' + - '1446' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - - '3000' + - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - - '2999' + - '3499' + x-ratelimit-remaining-tokens: + - '89832' x-ratelimit-reset-requests: - - 20ms + - 17ms + x-ratelimit-reset-tokens: + - 112ms x-request-id: - - 301bbf6b8f5f18d655731e55edb09ca9 + - 74efe1e7ce809342ac1cb7dd57f90235 status: code: 200 message: OK @@ -981,46 +663,47 @@ interactions: {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to read the instructions_1.txt file to determine the next command to - use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions - that I need to follow.\",\n \"plan\": \"- Use the read_file command to - read the instructions_1.txt file.\\n- Analyze the instructions to determine - the next command to use.\",\n \"criticism\": \"I need to ensure that - I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_1.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": - {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, - {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead - the file instructions_2.txt"}, {"role": "user", "content": "Determine which - next command to use, and respond using the format specified above:"}, {"role": - "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need - to read the instructions_2.txt file to determine the next command to use.\",\n \"reasoning\": - \"The instructions_2.txt file contains the instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file.\\n- Analyze - the instructions to determine the next command to use.\",\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_2.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation. Nothing new happened."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to read the instructions_3.txt file to determine - the next command to use.\",\n \"reasoning\": \"The instructions_3.txt - file contains the instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_3.txt file.\\n- Analyze - the instructions to determine the next command to use.\",\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_3.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Write the task_id into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 2577}' + "temperature": 0, "max_tokens": 2132}' headers: Accept: - '*/*' @@ -1029,7 +712,7 @@ interactions: Connection: - keep-alive Content-Length: - - '6526' + - '6465' Content-Type: - application/json method: POST @@ -1037,20 +720,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA6yTy27bMBBF9/0KYta0YVu1YmtXoAWSVRfpA0UVCAw1kRjzIZCj2oWhfy9ESVDs - oIsA2d4hDw+HnDOoEjKQtSBpGr24+fSs8Vf1o/ZSlH9uDz8PX77ep7vP97ep2AAH9/iMksYdS+lM - o5GUs8BBehSEJWTrdLe52SfbNOVgXIkaMqgaWiTL7YJa/+gWq2S1Bg5tEBVCdobGO9NQQe6ANkC2 - TvYJhxk+F/ZbDuRI6Dna7nYdB1k7JTFA9vsMBsME9k4jZCBCUIGEpV7TWULbX+GcW8YYy4Fq11Y1 - hRwyNoZjAU/UhzncMYtYMnLs6BUhoxoZiXAoVMmUJReDJ6WRuZaalpZ0IiZs2eeWhbql0h3tMgf+ - ku9RBGeVrYZDvtXIlA3kW9lfOxRJxETslGPJDL63R6OFHRQW7HsYqJFfkCsiTTpjIuYtBy/z3M7A - uHp81Avg/6ykV6SkCub6CdCG1vdQQeyOCROVlK3iMdJ5j5Jey71oSPQcujIheonI6E0miP577RQa - FIfJ56i0Zu379Wt+qPYNPRv0Oj795nHVq89shcHB/EL16oLCV9dzMBT6tTNilr7Yfz02m2T9cTKM - lqNsbjvoODwpq0JdDGMAGQRyDXBQtsQTZKvuofvwDwAA//8DAIII0sehBAAA + H4sIAAAAAAAAA5RSy27bMBC89yuIPVOCbfkR6FYUaBv0UBRNL60CgaE2EmNpVyBXsAND/17IsqFE + RQ++znBnZpdzAldACrYyYpu2jnYfDyu7+FF+esHv3efq6+vvB3o9/qyKw5fuG2jgpxe0cpmILTdt + jeKYQIP1aAQLSJfbu9XderFbbzU0XGANKZStREm8iaTzTxwtksUSNHTBlAjpCVrPTSu58B4pQLpM + domGSXwitisNwmLqCdokm16DrdhZDJD+OUGD4SrsuUZIwYTgghiSISaTIA0rnDJSSqkMpOKurCRk + kKoLeCHwKAOYwb0ixEIJq4N3gkoqVGLCPneFciR8BriTtpNYjqKeXY2qC47KM3MeyoXzM265aQwV + cQb6rZtHE5gclaPlQ4XKURDf2eEIIU8m5SuOhWrwtlRz17Y2NBpG6lfA/8e9zWbmYr0TZ11o5udE + Cp0fJI2o+zf6lr1HKzevE1o0+6vJwdX1LR9GfIgzGOV6fa3HZf9/2kGmwdHp3b1mgYwv58UaieHt + JDFFeTc/7+EqWa6vCc8pL2Ez6qHX8OzIhSofmwQpBOEWNDgq8Ajpon/sP/wFAAD//wMAK3Nw1/ID + AAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9aaeba83c6428-SJC + - 7bfe2acc48b496e4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1060,7 +743,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:36 GMT + - Sun, 30 Apr 2023 07:45:56 GMT Server: - cloudflare access-control-allow-origin: @@ -1072,7 +755,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '10520' + - '9570' openai-version: - '2020-10-01' strict-transport-security: @@ -1084,43 +767,27 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '85990' + - '86451' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.672s + - 2.366s x-request-id: - - 40187af4864698ab7cafb098969e7421 + - adf37a7bcd126ffa2323b9436545abfc status: code: 200 message: OK - request: - body: '{"input": ["[{''role'': ''system'', ''content'': ''Command read_file returned: - Write the task_id into the file output.txt\\nShutdown''}, {''role'': ''assistant'', - ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I need to read - the instructions_3.txt file to determine the next command to use.\",\\n \"reasoning\": - \"The instructions_3.txt file contains the instructions that I need to follow.\",\\n \"plan\": - \"- Use the read_file command to read the instructions_3.txt file.\\\\n- Analyze - the instructions to determine the next command to use.\",\\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\\n \"speak\": - \"I will use the read_file command to read the instructions_3.txt file and determine - the next command to use.\"\\n },\\n \"command\": {\\n \"name\": - \"read_file\",\\n \"args\": {\\n \"filename\": \"instructions_3.txt\"\\n }\\n }\\n}''}, - {''role'': ''system'', ''content'': ''Command write_to_file returned: File written - to successfully.''}, {''role'': ''user'', ''content'': ''Determine which next - command to use, and respond using the format specified above:''}, {''role'': - ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I - need to write the task_id into the file output.txt and then shutdown.\",\\n \"reasoning\": - \"The instructions_3.txt file instructed me to write the task_id into the file - output.txt and then shutdown.\",\\n \"plan\": \"- Use the write_to_file - command to write the task_id into the file output.txt.\\\\n- Use the task_complete - command to shutdown.\",\\n \"criticism\": \"I need to ensure that I am - writing the correct task_id into the output.txt file and that I am shutting - down correctly.\",\\n \"speak\": \"I will use the write_to_file command - to write the task_id into the file output.txt and then use the task_complete - command to shutdown.\"\\n },\\n \"command\": {\\n \"name\": \"write_to_file\",\\n \"args\": - {\\n \"filename\": \"output.txt\",\\n \"text\": \"2314\"\\n }\\n }\\n}''}]"], - "model": "text-embedding-ada-002", "encoding_format": "base64"}' + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nAs a system, I recall that nothing new has happened since my creation. Nothing + new happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' headers: Accept: - '*/*' @@ -1129,128 +796,27 @@ interactions: Connection: - keep-alive Content-Length: - - '2194' + - '763' Content-Type: - application/json method: POST - uri: https://api.openai.com/v1/embeddings + uri: https://api.openai.com/v1/chat/completions response: body: string: !!binary | - H4sIAAAAAAAAA1R6Xc+yPrfn+fMp7tynzBMRkC7+Z7yrgK0KKkwmE1AuBEXkpQW6s7/7RK+dPdkn - JtiaSsv6vbH+419//vxtsiq/Dn//+fP3WfbD3//1+e6WDunff/7873/9+fPnz398P//HzLzO8tut - fBXf6d/B8nXLp7///BH/+5v/P+mfP3/nS7xmcR/tqhmFogr5at8Qa1G90Tw+dxGEk/qkC0U/ZfNg - dh5oSscYWfQ2HwONK1Df0YNhrTeqHrcbAdU2jGQTNzdr2nq2DXNyaElQRGo2aegyQnqsrmSdVMtu - FkMoYIn6ijn5jMJu47gP6G6vkrnHZol4izY2Ul7plRDfkbpJXj9SFE7Kk10Vj2f9eqIzOggyoVKa - U59mZhvA835c0IrmT/+xQlGAkkhwiRPnHhpJX7na0xYGYijNKxxHZaJadDi6TBfmIJylc97AIrZC - ojuDky2z7GwiL8cnPG5d1Wpjs9RhFLWIuOdZyIbxuQX1/RCAuCdy72Y3nG2kPrQlMdWmrab41c6w - sihl60f05jSXTjHQfhFQViJWjWHf7ZFadE+snpy6YjmdH9rPeXelktuc/HYxZSZyauzg7E6OvG9v - Won65/PF3Edz8efCCQpARdcQu59jPh/P0YgOZSGQHPWGP/7cQIBcFGJCHmi0eB0uNgg7D4Oef4jB - Z+W566EfzTPb2s0i5EJmeGqW7zD7SaeXXxyn1xlNVh+yH0mPEF8ivUVdRQ0soN6w5uRVb9A1QQYh - R0TR++fVebCs+pJkum4jcaGoOfBgkTGyn1w+4Zscg3C9FGwnE7Wb/Jh7MO/1hphZZVfiPju68Lqj - F7ObSLdGlF0j9am3N7K9VddMepxv/eq6qxkzXq7AZ64bMXS3Z4lH5F67Jve2EpRKcyKxmkvZfCzf - VJV+4ogFC3LpxmO73cDhrdzIpfaMrn3GC0kdYvOCX69ByOoglkqwMdbJ5vP7/ja9FJCuF4fYVWJ1 - fAqSEhZlbrLN3dE7TkO5gCPfVmQ3SE41rc7hFergTNhGdoyOS4UtwgZLlLk/Q9qNHcquEL5+JOKs - hphL21DdozPajlSopMbn4+FUI62iGvF9MnXTpa9FcPT1C89Ffvdr5+nb0O9eTyzsCELDECoz2su3 - gQqJ9OLTiytXILfaIV7nrDuaP+1UU+LuRpd6MyEe3d6mesO7iPjc8UN5TydRA/uUUNVIzv68LIx8 - kYCwZk6T+9WoEpKCnUslsfOG8fHUbs/w2S+2PecyopKzayC6/KyY+TCEsO8JbiAy72u2oZ6Tjf5E - BaTABVNRiXQkPThqIXw+QhIvyRtNxvr9AFS8G8qvTd/Nqa67Gk5LTle7CnVcLZmHQkMt6SBLTz4h - sylh2L0aKsvDwuIOXR0hm1Y6+eFOzMcf/WJqvXiuiSkaC4tFQawjtjFvbJejBg2uMotqPrYbYm6H - 3m+rQi9ACvsns/Sms6ZGOj7QxityhgHtOKdIP8KsmG86G1XVTRtpr8C68zQqy8QNxUH/mdFj36RU - vM6xP3/qWxUsNyfbTUT8diqMI2TPmlJhIb0rfpquG3QT4UR8RX+h8a4tTU3u6JsYxUxDapTNDKzQ - f5gVAFiD3VojevfnjODGW4WDI+17OJxlTIKTM2TDKB0FiNMqYngrvfy+WzdXcNc/W+Luknc2uixo - 4ed97Mhu9KSMrl4vETZemeOfwYm7aeiZgOqmvWM+zic+yU/zjAxlfWVEeemVlEk3DPMU9my7cJxQ - 9guzRkvlHX3qDSp+KvwNnJA/0GE/FdZ4bQ0Kz2l1Z/6Iqs/6dgtvRfeZZc6hP5r6qdQYRCO7uPkY - jnv9vEeVHTlkfzSk7Gmj6wggzVustnrV0fHpnEGExZ1sUKWG8/VFIzDVImD5jmRo3qyHCN4PACwX - TepPWXbWv/XF1l1C/X54NSXgzj9hrfCWXZ8xZ9T8Y/Em/kDCbsLnJFCVwtTw/DCErBv1oIbjWT4w - V2wuaHwQdw/XY1UyHE8VGkNNPIIWaQKGHVn6NDjsIwTi5YqVo2ehpcanGgTEDPoZtyYnu6qwGt8l - nrZVUY2Zt23B6pOZuPdq7n75LbirO+Z4zdEaJUIwXN77PUvqPO7GKXio4O3XKyqsSO7P3muIEXos - FLLRHNOXFk9sI357aViWSZ0NGvqZQRm7H7Jp81PV7aSLBPPtqf7+P/bC3EaxQBy8xNEdzYL0Y4Pn - b13cqvojHKegV1D3XolsPefHrvu53R/a55p4TpVX9HU+KUAOypkECyJXnTWFASwK40Sse6Jnoibt - z9qQIJUFZ28K6fkQq6svX0zRvOt4HexBQ3vDZbFFXj4jDgbUDK+Ejlwm1fwqdAqXgzoRt8hxx+Pb - GwOx/AuOO0fnvapJEez59kZMyM1q5NpSAAOynujMRbzJPUOC5WiKxPAau5KzII7VRWaXxJSr2ucX - OgfaB18ZXkjbSiz1H4zo9pARD+umz9eFmYJf+T5Vuiqpxjioas2di5TEpWdXPAv2sZbuHm+ylomb - TY8YrnAThRMheHKzMVfGWZuMQ/3ht7TiZ2cdwYH7BV5GURlyy+z30Oz1NclQcs1Y0ycmkh6XNbHG - ecm5eGt7WFquzkw04E6WDzdBffTQ/eqhcRkMLlp4V51ePutzuaSgknNQ03ttpHzM9FxUDbXYkr3Z - 6HxuC10BbbQIwaH0CvutflOgyJcL5i+c3mJKkKho15YjC/xEs7jWpx/+piXZ3ZzJb7rWu0I/6me2 - m3VszVBsJUT1BpHjxrgiWoa8hjgAg0RrOUUjIbsNyt6qgCe9eWT8OkWlKuBxRxK7+cm4sB094JvO - p3PUNGioFDgD37x9YsaGVo0aWdcrf+8AfuLI4F8+AjXrZ3ZdSCWaf0IUg1l5A/7wHZo6lEuQAKyZ - Pc+JNdrFqUavjWmSWHNKa5peDxfuy5fFSCZ1Ps91q9Xkrn+ztecV1kDa4AwTSU2q/JCmm9atL6Cx - 6w/MJdUhlO993Kof/qNXqnvZzA/HGD3EyGd7MjgdO/d7F3IRYtwdo7M1O9LVRsbGSais5Nzq2bpo - tI/+YjZP7ExaOz5WNzQgJAmdyuILCns4XW7AnNtwCceK4Q3sMM6J7+U3PknxMkabsizoeHJV1Bxj - xUPSTxqxbZDL1TxK+Rmt4HLAU9A4lvwoLBV+rpCw6z5aVezMpSM0UXRh6Su5IL4MFwoY8YCIAcbV - p92LuXDGATCSexmnwSGOkJnxHdUG6dnRu67nX33Fts9E7JhDp6OmRqeQ2UPVd/NHv8A221ofva9Z - w6osUu2r592umvmHXwDZ1XRihlXp/qzz1QiDvdjQlUwGnzVljbW5j1bkLM0Lztog8VTW9ZjhGgXd - CCiKoXTJk8ppk1TjFR0pHI+VQUyjsip+MgcM4/ASib2Sb9V8dD6acffoiX9x3pzqfJqBOA8bo4uz - RfINqyXUj+jANq5nWVNyWwgIbd41MYg8Wfyt2xjK6SgxQ5Z5OESe3QKeiyXbBM01GyO2i4HU8gar - tm6g8ZmFAeBqkpjRwBmNt+mlQjuFh0893ay5DFGNmr25Zp/9tiYc1DF8r7/8OCfn2wbtlw+PbXAe - +ZxoRa/ei+bATvmchaOtoUK1Lz8WLjcG8F99UO7biNl+5VXM5ssz3JrWYn7q0fDp3RYB2hRrzgIJ - 1d0843EPqu/uP+cN1nhAqfe7PyaGuJMffSyh3V3ZU3CcoJP3t0KE+7TnJHW9N+dHvAKo7vsVuSk6 - 7fh0K8vVcI1eePmUF5yVt/eM1Mo94kEjyB+LdqvCbIRv9j0PNup2DeMrNsjN9iJ/6tedBNU1ckny - iPbVBK9Hg2gQtSxwnL6bXY8cIXmrCmbvvkKfektRc96pzDnmXti/gkekiet0h+dX9QyHIzqVKB9q - hayj6O2PFkpSSA1FpbTri5BHoYQhzbbALC0xkBgxJ4YqOcp4ZXtuJZrMviIa6ynWAr0Px2ffY3hc - oy1J8mj0+bXwCxTmAWf2TkbZHEpXCa45OTBDgAsffHTy4LQ6ejgTo9ifrOApwZbi468+4/AeTbBx - oDPzZ9j54/Umjtr5rlJiXucpHKvgsdEcn5+pHhtn1Md63qL38pUzPW/MTF5hUQef84F4hW5lVKCK - iuSQNvRcz4v/wrcyOYrMfg5R2AfeRgGps23yfV750VlvkLd3Viz1ojlrBenHRTdbODKs6gf+0bMq - yHKcfPz/UA0kRhE6XX6AkHRaW6NPAkHdmQUj+KiTjqtaK4B8XbTEfMtnnx8dskEfPU2nfN7xz/6I - aOgXWzzOs99RMRQL2B+UhJhms66keIoFUApdI1uftPw7juKDKmIWI5KNZ7Ir0Ne/eadqkfEXRvYX - b8kFmqwbBm8H0N6eP5SLIGRfPwzKT5wST07iilfBfoTPfLowvTSbagUKVNdExKNoxGF3w3MBFskF - /JySNV9eHR20m/ZYsi13HhX1FCGFDZrKX78n1VNC1ea+aonDKoJaVTdGwOegYk7azGh+OAFVZ04T - spuIGs7puj6q0Fge2UxVak33drdXEacybXO95LOBLoDMvZMTY1mViDGtTZH8k14IOZGfbsoUJHzx - gXhSJFbjXs/3qpsWMYvtnPNP/YgQPuuQEIrGrP/6seX1ZBJfRVXH+8MpBURnG0s88a0ZK0INaSRs - sbiStWpM+6rRar0tvn4RjY6nl6i5o5at957eTaoUltrjet4yHyWQ9dOr6DXdKy3mTU7jU4mLD7Ro - uiWzRMg/58NdIJ0fM+ejbyZVygroooXJgo3edCPve4CfOwqocknGbMbnaw2Lx+VJkYn0bn5LJwG5 - udhgwfUuWf/xU7B18YHp5dzyMUHHEq73453KZvTiH7zFMCv6mzl5ZHb09aoVGA+HihidfOweW2aL - q04394zYU231RWsokG2PV7Z9O4+QT+GCgiJqjBgbWIZTdU4j5En4wrD4EesXz8jBXd+2bB1FW0sW - toqHNN0IyM5Er5Avp5MI1/v+jkU7rzN6mPYPrRaBU3gleTezsgogjxuXRSv51s176drA+a5QLN2S - 1p/usXiEs/O44nklS3z68XYYejGq6XLOfT5esmOuff2hHs9tNbxQ3iLxHs54Pg09Gi/KmEJdmAZz - aSNnDXrd9/A2Qp8Fdwdbv/5/emgXcjGjNVoG63YDV+0xE8qcpc/Hkj4QckeL/GjS4M+pvrHhslUZ - Mcv50I0Vcz20XKd7vOh1saIjVTYQ+H6A5d4z/d/8xO08FcsDcX1pmtIjfPOHmTZvNJzXZQ+OP52p - CtHS769BN0MmLEdykTwzbDUHm+h5epGPHiis6f3cPuB0qhNm32Sw2AqDjhxJemGhRzvrmyfCyrd3 - zHLzko/f57E+hCbzTVRUkyCFVLudapGYACjjfaZjeAXnI8Gov/vcdtwUsvsxZ5tLsg/76CamMClm - TbtznlfyUSsUbVHPR4Kb6dEN76dRg14WDgl09LDo6fUEMKKs+8035b5PMCS3uiZmCkk4Mm05wlS8 - 18w7OW63TNpA+eLpx0+UPo2meIblqItsJ6LW767ms0ZholbMCCudL6mzVdCzB0q2Sr7ik3dOTJj2 - +pN2GTmG37wV7eqgYNdI32aSWnYjfOd7SzJ2PdykK4BPFbbRqhTxtI+PMCQrlSIv1ziLQyFF9X31 - IGscbarxGlSz2lfUZjtfsjpxx3b6b56wDvI955/7Q7tc1pmXRtzvF4p6RY4kvohjDCfOAG11mIu3 - x6y7nPhsgbYYxfWypkpWxXzO40UOQ6xfcFEDD9mtj3OghXklVgNa9UrLIQdXX7eYB3D1x8DTVfh5 - QMZuHUnQvH49A9BY3NNF60HF1f4gaDuzZJhblYs+/kUFu0+Wn/xkzUW/1cdfvt1kTmFxBW1E1L9X - MvngWUXn8zkHdggL4uU56ibRwxHwaHGjT0fOsznX9aNmoc3E0pDQbig9J10dzkvMdke0seYs5AEy - BDEhThtZmeRIVxfFK2WJlY1XdfIDzwp8/CStpfkH9aUyCigwi54+19WuU0wWXKG5CjJehY7lyzcu - NZDt6p74HeFWT4Oefv0Klv3hiqhdFqDpZ+lEp9fg+UsuZd4KWPr+6EUrXK6DWkeR77+w8Ca3cHAI - ATi+lYKc1Zz9+uVvfohhozfVLBxSASXe/cJ22IvCueFIUvv74cKCPXqg/tWa+1+/Z62G2ZrvZXnV - lMz1sPzJR59PKY3VvfwzsCAhJ851vNK/+Ewsq6o53z7dFupVaLGNmp+zaf/U99qC2w+Cn9KmGknf - uWBkm4ZsJe/tz/TFjujU3n3mnOSFVX/0L0wQScypKmZNpmdREOqZ4E8eWPEm000N8V5m3t1ZZ+Mx - Hj0YrX5P/A26++MSRTrsD2ry69eGMhzxN1/AHz9Xtd+8fb2+BYSUiHSsCgUB8lxekd+8f81nTz1t - lTc1KFwsMdC4Cl+8ZfbkWiMjWELl7WXjlaAb4cTQxfyuT6e6ca0li5emalRe+8krs25eYiWHGC8f - xFlU+47vbk2DQgkv6V12fzr20WPf+yP4k9cOp4DOYFo8IMa5eYazO+1N7SGefbLNiF998pQIsggC - vFpVGh+Vc+wiiDREcEKu3SSHU6R98ju6MBH2R6dvSwVjfMcIe7tqhsKQ4A5CSSWL6OFcFXqpqXvL - YvjHETNpMYWm9hqbmulnP0fjeqpHtMDjnjl2dO/mXThftfmhpcy8zodwdLJDAQvuPhguJ9v/1WdR - W22o1npL/iYxOkOk1ReM7mRGwyilAJ/zw2rgtdl4bbc9SM37Qps6Gvw+zdIGiLW9sK2i360x1s/N - Fz+Is62oNdavxkNlBA/i1A3v3u9QqWFfFog4h0Hqvv7tVx9vjxHLpknRXLWSdt0nTw6qSTQbDwQ0 - GMxSZ8mn3/3Vs80Ti2/5yj985iE/mzosOpWXDfB6tMAj7cY8iE4Wlw83gOpYqcRgVRnytN8fwZak - B+0WTmBxjKcZvn7cps2+Wiqv/qoGfBqJP+uvbHxn2UePjTYJJOR2jHBNRX5btuSb106rsolRl4RH - vFj3dzSn8eKIPvXItm4uW8MmngVN7/iajj1w3vrl+4yqfXMheiVjPluKYK5eQXQkWEctGqRYS9F7 - r3vssveKaq5edQ+ffA0n1zz+4MVaADlz18S9RrU1yE8vgh+8S9naJw+fS1o7g1tNGXHMOUFiG/QF - hNujwUytqr/5nYB8Caf09clPxnv/MOEy1AW9lPmBz1Ox3YMQMv2Xj+c0lvfok88Qt0q8sChQqoBR - rDO8WjjPcK6mQwkfvsGj2RRobNbFXiuKFrONRdaWDNM5AOMsxkRvZj8U4SbkINQjYec0x9bYE9z+ - 3t+q9Tbhxw/ZUNc7kThasqlmT5FSVfXtPdEliKxp5pOpJXj5Yp/8xR/T7Nhqm3C6syD2JjSE67LU - hM79+eSFrjW8HKMAWU4Tyj/4OBSO4WknV04pspJVyFqku/AOBJUQhwi8nczqrAo+FfCi6qtwOgZ1 - Ddu3YpLLOZ8s+nDsHi3CwWJrfwA+PKU0haWtVcw4yFP3+3x99BedCiOtBjXWHuAdi5oEdZR+88dU - k7jrEjtuWDf9vLoNBInik+tWKrLZiDUBId8NsTjmTfXNcyGe9ifmJ4nacVI+FPjgB9vZnuhPB2eT - gnI4CMT4qe5dn8RjAR/8J36d/2Tz8XydwW6cJ3GGIbZ4HOwxIuzWMIMPLeKFOcyw9sqM4dlbdbwo - fApceWN8vzZP1FvtJgZHCFxGTE8NZ/31EGFbrKXvfqH5Hi8a9PMQMiw/ojdi7q04auWm/fB5pXC2 - 1b3iF+9vO2cMWVJYkRZwPhJ873VLNqXLEWq9KTCykiTrz9OrRJuQ36kqRWL31UNIsrWaebHnW0O6 - blW0eixmPOLG6OZ7LDea7hUWcXE+ZHx/ayT0eR9D1SXZV8O2r21g1wX51os/f/1zCcKTrO0m7vi1 - sAoA3doy75bI3ZD3LwwvOJ/Y5pbs+TyZXYTKPrLwPM9z1nBCdNgI0kS8UX9YkxprNWwKh5PtrBfV - PJ/zK3qfdxrbZZKdLf3W3iBdvsp4sZ9q/nBKmYKom5RZYfXqHh89i97ROWXWJrfC37zsiwc7mMxQ - Or0GAT75DruZaJ19358hT5Bk+n3fPCnKMtf+frsC/vNff/78n2+HQd3c8uenMWDIp+Hf/90q8O/0 - lv5bFKV/M+m3E4H2aZH//ee/mhD+vrumfg//d2ge+av/+8+flYh++w3+Ds2QPv/HwL8+6/3nv/4f - AAAA//8DAPp9nSnlIAAA + H4sIAAAAAAAAA1SPTUvDQBRF9/6K4a0nIf1OZ9eFQhHFjRYUKdPJMxmdzBvzXmlryX+XaqW4Pdx7 + OfcIvgIDrrHi2hSy2WI3pGp68/xYr1ZPn/u7r22dHpbXm9ltmIIG2ryjk3Mjd9SmgOIpggbXoRWs + wAym5bAcF7NJqaGlCgMYqJNko3ySybbbUFaMigFo2LKtEcwRUkdtkrXQB0YGMxjONVy2L3yuQUhs + uJBx2WtwDXmHDOblCC3y32pHAcGAZfYsNsrJkaJgPPkvWFnFBxZstVqqDp0NQUljRUWSxsdaRdyp + xrJqbEoYsVLso0PVHtTPVU8xV/f/sr+5HHoNbz56btYdWqYIBlgogQYfK9yDKfrX/uobAAD//wMA + lmqfw3wBAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9ab3e6de06428-SJC + - 7bfe2b15e8cd96e4-SJC + Cache-Control: + - no-cache, must-revalidate Connection: - keep-alive Content-Encoding: @@ -1258,29 +824,37 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:39 GMT + - Sun, 30 Apr 2023 07:46:00 GMT Server: - cloudflare access-control-allow-origin: - '*' alt-svc: - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '195' + - '1442' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - - '3000' + - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - - '2999' + - '3499' + x-ratelimit-remaining-tokens: + - '89826' x-ratelimit-reset-requests: - - 20ms + - 17ms + x-ratelimit-reset-tokens: + - 116ms x-request-id: - - 6c805826f30e21e10011f6f0db0b02c6 + - 6536a4bd33511e252d655771f59e71f4 status: code: 200 message: OK @@ -1324,60 +898,58 @@ interactions: {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to read the instructions_1.txt file to determine the next command to - use.\",\n \"reasoning\": \"The instructions_1.txt file contains the instructions - that I need to follow.\",\n \"plan\": \"- Use the read_file command to - read the instructions_1.txt file.\\n- Analyze the instructions to determine - the next command to use.\",\n \"criticism\": \"I need to ensure that - I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_1.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": - {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, - {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead - the file instructions_2.txt"}, {"role": "user", "content": "Determine which - next command to use, and respond using the format specified above:"}, {"role": - "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need - to read the instructions_2.txt file to determine the next command to use.\",\n \"reasoning\": - \"The instructions_2.txt file contains the instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file.\\n- Analyze - the instructions to determine the next command to use.\",\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_2.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation. Nothing new happened."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to read the instructions_3.txt file to determine - the next command to use.\",\n \"reasoning\": \"The instructions_3.txt - file contains the instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_3.txt file.\\n- Analyze - the instructions to determine the next command to use.\",\n \"criticism\": - \"I need to ensure that I am following the instructions correctly and efficiently.\",\n \"speak\": - \"I will use the read_file command to read the instructions_3.txt file and determine - the next command to use.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Write the task_id into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to write the task_id into the file output.txt and then shutdown.\",\n \"reasoning\": - \"The instructions_3.txt file instructed me to write the task_id into the file - output.txt and then shutdown.\",\n \"plan\": \"- Use the write_to_file - command to write the task_id into the file output.txt.\\n- Use the task_complete - command to shutdown.\",\n \"criticism\": \"I need to ensure that I am - writing the correct task_id into the output.txt file and that I am shutting - down correctly.\",\n \"speak\": \"I will use the write_to_file command - to write the task_id into the file output.txt and then use the task_complete - command to shutdown.\"\n },\n \"command\": {\n \"name\": \"write_to_file\",\n \"args\": - {\n \"filename\": \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, - {"role": "system", "content": "Command write_to_file returned: File written - to successfully."}, {"role": "user", "content": "Determine which next command - to use, and respond using the format specified above:"}], "temperature": 0, - "max_tokens": 2331}' + \"I need to write the task_id into the output.txt file using the write_to_file + command.\",\n \"reasoning\": \"The instructions_3.txt file instructed + me to write the task_id into the output.txt file.\",\n \"plan\": \"- + Use the write_to_file command to write the task_id into the output.txt file\",\n \"criticism\": + \"I need to ensure that I write the correct task_id into the output.txt file.\",\n \"speak\": + \"I will write the task_id into the output.txt file now.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, {"role": + "system", "content": "Command write_to_file returned: File written to successfully."}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 1919}' headers: Accept: - '*/*' @@ -1386,7 +958,7 @@ interactions: Connection: - keep-alive Content-Length: - - '7669' + - '7433' Content-Type: - application/json method: POST @@ -1394,20 +966,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5SSP2/bMBDF936Kw82y4VRwHGsL2g4eOrUdiqowaOosMaaOKnmMHRj67oX+xY6D - Dh31SP3eu3s8oykwQ10p0XVjZ6vHJ+vDJ7f8oquH01d9OMT0+Rv/NH94/RkTdLsn0jL+MdeubiyJ - cYwJak9KqMDs7v7h42qdLlfrBGtXkMUMy0Zm6Xw5k+h3brZIF3eYYAyqJMzO2HhXN7IVdyAOHSBd - JHiBXw7SdYLiRNmLtLpftwnqyhlNAbNfZ6wpTGDvLGGGKgQTRLF0MR0LcTfCOWcAgBylcrGsJOSY - wSiOB3SSTsxxA5V6JghRawphH619gaM3IsQgFYGocNiaAgyL6wUXpYkyl5PA3liawwaYqABxEKoo - hTsysDvOc0yuHT2p4NhwOdh+v0JXKsCOiF99/+UFiotO9wTKE7CDffTdNxgO4qPudhq6JHtn7fsM - jVU82M/gR6DLfGMjBNrVdW9yGeYWor0Ro02op/1N0xOH6DuoEtiAqnuCGC6h34l23pMW+3ILDA2p - wwQ7Gmsh/ke2gdQmU+XjrXeNs6ppMHlDvcmifHn7WK7rGwCP1r5deP+C+gYnbPGarE83hsy5xTbB - vWETqu2AxAyDuAYTNFzQCbNF+7v98BcAAP//AwDI0C/ivgMAAA== + H4sIAAAAAAAAA4SSQW/bMAyF7/sVAs9O4DRt3PoWbJegPQzotss8BLLM2GpkyjXppl3g/z44tpM0 + 67ArKb73PVJ7sBnEYAotpqzcJFrurp6/fK5+L+4e0vT6frFavTx+ffi2/HH/eAcB+PQJjQwTU+PL + yqFYTxCAqVELZhDPFrdXt9dhtAgDKH2GDmLIK5nMpzcTaerUT8J5OIMAGtY5QryHqvZlJWvxWySG + eHYTRQGcxE+NeRiAeNHuVIrCqA3AFN4aZIh/7qFEHoVr7xBi0MyWRZN0mJ4EqYuwT0gppRKQwjd5 + IZxArIbi0MBX6YoJrFShX1ANTJgp7ZySApVo3rLSlClCzJR4xUUjmd+RathSfny0Hmc7kVJTNk0g + ODerUbMnS3nvuBwMLLHUjen2wD1Eikhq453zuw6EsqOHsnxEvJSvnKZeeaK+M/6b612G7hW/sWB5 + IWdqK9ZYLsf9jPGRuKk7eS3qP1tLceNrPJhJt6rO8RKbK9Tb0WNnnfuITZHfTRPo59pgvOsQ6K+z + ki6xl3y3gAtnXeeXP+L8UGdXOoQ5heTGGGTeNM69HakOZANgQi20AWwsWS7WvRzEwOIrCMBShq8Q + h+2v9tMfAAAA//8DAKnylkSfAwAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bf9ab403fa96428-SJC + - 7bfe2b1f5eef96e4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1417,7 +989,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 18:39:47 GMT + - Sun, 30 Apr 2023 07:46:08 GMT Server: - cloudflare access-control-allow-origin: @@ -1429,7 +1001,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '8206' + - '7793' openai-version: - '2020-10-01' strict-transport-security: @@ -1441,13 +1013,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '85989' + - '86458' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.674s + - 2.36s x-request-id: - - 62946a2415fd4014bc6556763cc6f992 + - 1fc1636b39f8812550599c1474d1d565 status: code: 200 message: OK diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml index fd82ce2e..904ad7e9 100644 --- a/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml @@ -38,667 +38,9 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 3251}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3383' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA7SSTY/aQAyG7/0Vls8BQYGFzW37cUBaqVW1aqUuFTKJk0yZj3TGKUGI/14NIWwL - 0t56fe3x+7weH1DlmGJWkWSm1oP5g3v4Nv/MLX98elzO+P2nr+/axw+/SL6X95ig2/zkTM4vhpkz - tWZRzmKCmWcSzjEd3y3eLsaj6WKaoHE5a0yxrGUwGc4G0viNG4wmozEm2AQqGdMD1t6ZWtbitmwD - pvPpXYIvsy/6eDZPUJyQvkj3o8kxwaxyKuOA6fMBDYd+rHeaMUUKQQUhKxHSWWEbAxxWFgBghVK5 - pqwkrDCFs3gucCtRXOESQuUanUMQ8gKbPXimXNkSpGJQNohvsoga1uOhtAKF0gziIGdhb5Rl2FUk - QF1TrAhtGSy3Mlxh8rerZwrOKlt21k+vGMQwpOK8Ks7KYnK/B2UL5w3F9n8ZurZWIAjX4dq41mQ7 - zwF8YcpfzdaEPn3cxPqMYwzZ/Hpu5pWoTAXT79Iy5xGMbWh8pCKBJWTkuWi03kPhtHa7G3tQ9qSd - vMjmYJ1Azr8VCUPhnYlVc+0eaqZt77xTWv+nP+w8j0l/Vedl3ByVJcMdzmVxV8Tky+tb7Aqx9+X5 - LXEPcQI586zsEY8JFsqqUK2728IUg7gaE1Q25xbT0fHH8c0fAAAA//8DACiYLFgKBAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfb47f48975968c-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 29 Apr 2023 23:21:31 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '7598' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '85995' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.67s - x-request-id: - - 7763b6b11a061097597b024900075aaf - status: - code: 200 - message: OK -- request: - body: '{"input": ["[{''role'': ''assistant'', ''content'': ''{\\n \"thoughts\": - {\\n \"text\": \"I should start by reading the instructions_1.txt file - to determine what actions to take next.\",\\n \"reasoning\": \"The instructions_1.txt - file contains the necessary information to determine the next steps.\",\\n \"plan\": - \"- Read the instructions_1.txt file using the read_file command.\",\\n \"criticism\": - \"I need to ensure that I carefully follow the instructions in the file and - not deviate from them.\",\\n \"speak\": \"I will start by reading the - instructions_1.txt file to determine what actions to take next.\"\\n },\\n \"command\": - {\\n \"name\": \"read_file\",\\n \"args\": {\\n \"filename\": - \"instructions_1.txt\"\\n }\\n }\\n}''}, {''role'': ''user'', ''content'': - ''Determine which next command to use, and respond using the format specified - above:''}, {''role'': ''system'', ''content'': ''Command read_file returned: - nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThis - task_id is 1111\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nWrite - all the task_ids into the file output.txt. The file has not been created yet. - After that, use the task_complete command.\\n''}]"], "model": "text-embedding-ada-002", - "encoding_format": "base64"}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3212' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1RXyc6rvJad36c4OtOUFCCAN/8MAgFC5ySQhNQISELoQmsDvi9f+r4rVakmHngv - WfbeWo3//a8/f/62afnKpr///PlbF+P0979+9p7JlPz9589//+vPnz9//v27/j/kq0lfz2fxzX/h - v8Xi+3wtf//5w/3vzv+B/vnzV3oPlO6TiA/H0Zt6Ge7PDjuxVaVsyMQKKlV40cPZWtFy47IVzRYt - qNGWWrlo8XNE/L260ft9e0QtdyMq+pyQEYjDhy/n4yqpEIXQY+vSAGI73uJgU3wXbGx4x13VTAcg - V9gQwUxNd0G7bgZ7Ky10r+ktYm9HElF6qPZYNctiYIsRi0jRKkpfJ6dC7FRQGe27eI/xdzyl85q0 - HgSi2xChZxvWuYbggYgTjM3lkZfjzGsbZVIiwFg58iVZXvtCiZWPRb3gXejLnrQA6zCO+M5zqssF - fX6Td0+3I7KOH2ge1JgD80RlfFjBRguTBA/UVfBwYPJfxFqri9H7sEIw8udKZ+V4lcG43SOqf60D - W61jnoOPTwa1vcsnnC9ZmKCANx7Y6bKPPnMdsxV5jXfBIg53tKrvvQkJ2j6DrWZ+9bHAgoPO9+uL - mo6rpPOmy2flp44DRzXCZSoqAMN5GPgR5qM7NWlPQJWUI9aEibI53LAMevyeAt5DSUhn4lfgfulA - 1Uph5aLZ5og4e9hQ56zoiCXvewy4bmTqnZ69O28f5xXeqz9Rl3aYsa2pNmBHcMDv95wP06B6MmwN - g8eJla7prLViD7/nYaxuhkk5yiKQDwqoHRjzsDTfbYESpDyDReU0tnt/ryN8fDOi1n3bsbWbawB9 - rxo4NpCX7rI67KH5yjx1Y+fChLCdDbg5eY716WyGy5pEF7Qgz8X3QKiHRby4jSx48pH6tdenrLqI - HsSfSsTPq1SXBPyPA7sJucH2HtOhfaJiA/FztbERYxUtXB7nUjyyGrs+J7FSC74OBJ1zpEF5PqGF - XDMDjNs1wjaTmnKyaAOwF9M32aZVq490dTk43jybei+2R2vRDSO8JlvGYVJmIf9ejAuipXsObg/S - Iya0TJVvSNewpj1gWPBxE6NHtxUDaUWSTnR/q4LyzDfY5ZqU0SXnCFi0PVP/UFdsYY5/gh/+4WAz - PhHdV2uiFDKziRRNOVrQ7jPLulBdMTbOeslF2tIqq5i8gm1Seu6sxZ9+a1zOVcD31Y7R5KYSZRrr - BO91IdJnKXje4CR3I7V2yQ2tLZfbyjN1r9QxWktnMG5tsF4ypZowYUZO+lkGmHk/EENhixZ+rG/A - s7DGmge9uzRuOEJmcocf/lhoIstOhvhBYnIdNx933oERAMTYJdvTY2CzV40muDEJqBNbRrgM4WUP - /lbTcRTuaEnas7FXbKUxsdWf13LUDlEO+Qu96PG1+7jLc7UjwEKUBduLm+qk3sgi0hV6IbtCfpaL - cdYM6H17T/V48fVZXa0bvIxCClbCBQObz90snm5WGjAxXsulfOQ9PBJIKc6ncViRHG5gNtorjgNj - LodffmBtdmmyu8Ulc4NslqXF5ojSbKSB+nktKGph9tjbJG3Y7YqkAUdD92Cec81djhdsIBKYr2DF - myz9FvPJkKlIDtjHtNb78BBuQHQqhXpudPjRx5MIucudsU716zAnUisDWdsZu0g12HS5zCs0jD8Q - pQyKcFLbcfy9TyBqM2YLkzYeSp+vD9WOy1SuJFNfSv2aL9Tc46lc5CprUXrOL9RLvk64fpq6gm9+ - FqjzlFi5TjxxZOHLC2Qu5w1aZ3IuAH9KJ8iD9yVdjM/JVDxlUWlitF99cWwxlu9xdsUxGS86675u - BMulcvBePLrD4u2mC8hSyWHzmPT6cvIgB0+9T1QP23ZYE8A23PrwTeYwe7mrPGgqfMZdSJ07GgYS - ifoeROoGWIPdnI6cvLeVXWdJhH9ldCC3frVBX4sau8t3h8j6sXvgblFIQ/NdpXN8VznYRtydXnbK - Ba1He3yBN+mvAKhVDgupBxOl8YCJgunB5c947cEYRJsGERJ1dlo3MgRZdifrtdzrjM1WD+ajAuyQ - x4jWxsUzagaoyRzzQ8iEw02GooEHvSkfG02HedijpjyUVO8OE1oPXzX+nQ+2Tpw/CL58z0Cp01eg - sKJ0+/Wj9kqxEUS8DzxfXzQkCRB5hvMf/I+fGPCjZySX+08614chhrcQPgNp+96Xu4bcAZadk1Lj - vb0zRgecwft+UrFp+Hy5mCdfRUK0dlj9bPBAtcPphWRNvmFHpAe0m8/dCj/6RbUME7ZYoWujldUe - dupTO6wcViqY8s6lF8mxyjETKw68dkvI96qa+ux/+wC1ZuNh29uBS3pzuSCojikOEunNpuV+s2Hq - Cj/oWaOFsxRcbwDSiPHJCSXW77U+QAnH5diPxGMptGdDlQg/KVi/sdpdw/hjKH6PF4oXzQ+55Oat - SBxNDfsB6dylndgKjw1ziHS8DYzoRn5TuEO14GfjN+kk1qdRiYXEwV5HbgMzOD+C3/cZL78tsyOO - ZOVgFA49GFvntz8ZnPONHij3gWOzDXiEw2S7+Cl8vjqJ7IuBjt63xE7vc+FqqI0NrgUj9TQUD9yv - f74kbqTa2d4PvFxFPeRxoVPTq81hjSpFRtiwhKBxPSkkyqSbMmK9/DOPmq3BqZ+R3eoXwqQHGSbx - 4laQVMaG2rdMK+em9iNUPO8cNUc1LOdwg16A3oITIG08l7T76hEk4T0jVRdFbPyoZQD1hdPx9ejq - Oj2ZniEbh++K90l0TZdffdnMFzPYXZdHyk7FV0aD9gxxcnwk4fTIFhupNychO7kO07FXkh7uVVIH - kvHI9Vk7ZAXaSXxKuOp+1JdeMQnSH2JJU3GNfvJd7il7FwjGQvBB7K4uhSKOhvYfvfrxHweV90zG - 2bMJ0jW7XF5I0RoakO0koYnfri/lM/IhNbqIY2TetAUkI6jBL3/7Vaky0Gb1jbUMB0ywr8oNITHz - iPDT7znLNBXZuXilVzjXbO3z10YOy+xI3VkwyiWqjxy43j6j5vJQf/Lvoir6E0IabCzd3a2nmkDQ - 2UecPu7fcPFSLQOLvTW6x49PyVgzg8Iy38cWC17hrH+5BtIlPJPMv/glc9xbj56DvKOB1aT60k5o - heJ55UhnPFSdP4+3F/Bua9LsC199jjSpRWDEGX3k/NFdcyZk8OMn2C0/1sAk5JmgldFEJMHkh86+ - aRfYQHClDrwVt90enRV9O0ckLEg5l0nIMJXl0jjULT/fYdbirgVFCZWADaLkDlb2IcqJFZSgJC7D - ucgPNmxj2SJ8zYxyBb+z4YdPgUjGizu3Vv1SAjW38Sl4r+G4rZsKMpTcgt5oLZf4ftWC91I1GuhC - yVhduA5KTheMLb79lpQO+AWngvnY8vmu/PW/33qwnpyrPpdCnMD46mp6hNPN3fHHhwmRvU+o37pB - yIruvaLbgQaBfHzI4Y/fmkgyiE69bVqFc+au+//kuUVnh4FdRGTDoxEH7LOi1Mef/ASyUldkeZ4c - tMpWJcNp17zp8fRpQzY0+IRGt4nJzJ+5kBxjlsEqxi8a5e5pWD8CNDvjawhkhrZLl7BwCfrxP5r2 - Y6WPnOw46BJ9FiJ6O9B7CcV78M4Wppj3azRyin1B63vO6HUmYjjKVV7B6XCX6JHC1V0kYyXy7/8p - s2xfZ8Ucm+hHT/A+uvkDh6JHJV+vwUR427HcHfKfAeqliuFEmjW2c4g8QyO2d/zm/QNaVXo20KNT - RNJ21ExJX14NRM3/AQAA//9UmlvPgr62r+/np5j535IZAZGWdcdJQMCWg8dkZUUUQRARsC00Wd99 - B96dnexrYyAtY4zf87TP51K/wRQSvwOPs/smkqk31TA8pwnu4ouM8z0D/dz/CATo6OBkXPUB/dhV - tPQ/spo47Nkxf+TQ02oHe84v6/n+8RnUZxVGNIo7xOkr5FBzG46RaByFdEix+YI9CWwcXM+fbLJy - x4T3hB2pzuwVJ8cPNKFgyhO17JgDJqq+B+e8h061tLYmTb9E0M78ZiD2Z52SdJsJS97D+1KP//I6 - KOBhorqfbgBT4yyH7+tKp27K45SHcaKq8/5ga49WfbtbbWVQD+WAXTO5B2NpHW8QiqpGquJ3B1Nx - TlpNSZ4PHKw0VvEjMi+asR8VbJNyl/GHrXUAe8Aja4XXwegftyEc5dsGm+Oqt0h1T2T4OwyACL5e - p2Mh+jdYPwpMr0LuVNM6XnWqLFbtH7+PZmCLUGrenJ6fvpNyZiQTHOjVpTPfcD5VqgxIxyJ63zlH - aywiEoG0qU7YEV4159G3CGH/MV3y1eIkILI5EDjzMDbOjp+xQvvpyt74MDJu30drRN39pi7z1v/e - jaBhWXgBhctK7Pz2niW39dWHgilOKNtZTT/q7UDAzFvUKTu1GuxbP8Dz6JzxfjNYvdRxpqu1FiO8 - s696wKVra6vKSmFEGkEI2Cm2G1huyi3eUShZX22nqmDoz2fqq6UUjM9CSjab38Em7d+8OwgHFVje - FRt9Y1XSllW6puTXgZTHzZH3QVmq2uit3tgoMjdb5/W5Af0LnrE+53HW7b8DsFfDmoaOGwe8Gh6K - 2pK8x05adyk3piBSqxbd//aLalVxh67QE8RKgVasKHcI9mRno9Vgl9U3j18HiIrUxSgOdM7kZ3uD - zoQEIla+mLKr3LGlHuixndpqtMuLvdFIUmAP2VHP9Amf1NC+Pgg8fF/9dE9uOdyNOwnjnXkC46ne - T/Cc9tbMDzBlua9BOL3WHRqm9Q0wP8lMcCVWSzSPGRnd+lGnEWTn1CQiqgi9oAiGR3+FxoslZ3yT - FwlcafcLPVbfOhiVxGqgdVWrhe+rwbN0W4sjVNK9/4r68ZwKDNoqeNPwfP+Bpf9AFowJ3tGgBNMH - bRXo2WFFNnECqqEoDaTFd59R4xY7nGVD9FfPZDxtUcq5nhMQt1uduvij8L7f9XfYQOLiv7z1OagR - oMBgZP30naw9M1IAp3zciboi34qpRphD7+6aaNPwiI9X+SSCi1a5RFrZY0CtD3vB00pOaWj423R9 - Tf0GKMDN0fUQr6rpCAwPepHP0YbCo9XvcCRqVx9tqPG59QETHLuFDyuRqX/19LT2whODn416Q+GU - 6IHohfmkGsx8oplXLHpE/g0ODnfIhf6IxeZ5C53ukCMQml3FAJNu4JC5H7wfzg6Qm+w0wU3/WVE7 - tu1sssVrBLTyTKghgqJieRbCP76420cDiMD2a3g63Rku2/M64GatXqDUfDjegs2h500MPZW36gpj - JfwCjlzdh/uzXyPxexDBjyhKCJ8VipA07/fMJ8riM7CZlTsgzvujXfZmg05fA/cLXwO+83UyVmjq - p0f5u0Ep/6X4PPczFhouBCSXdWr4wsuaQiFu4X7bF9QBlc4HwKQLlMc8Q+v3wQHTazMRMLSJj4Tb - 4W2NZTpclvWkS75d+F97l1KB0SPdBTLsQgGU7VghsNJY//ugq7zwC43w4HASr54dmH0YYWJ0Sddn - 9rpry3zy2VkEv+ehyCG0b3e0bp9WSposZ7CXjfrve5SH7/UEw1YjeHvvivRX7eIWfi7Zici/vReI - VnBhUPRvFrbCb81/tP0iOE7HHTWyxOR0fMgN7Np1TP135FXibVOo4PhzQ0JfT8DHyXV9CDJTIWw4 - eNW48OQ8r+juK+wz0X+3HlC30EHKAY0Bi05PDzyewofaGap7rtO9D6ZPB4k656fp1E2+IvBjit2l - /9f0YC48jcD7iXu544MNkxdysM7TIR1/kuqAWvZjas75iZpGHqlbSe/J4gf4WMABKH6j0YWnaF4/ - azUsxZFa9eaY8lY8IJhtG5OM4vOZ9Y5qNDCTnQq7A1ardzesCNTKI0HS9WkBfn4cT3Bbpzu6hfEW - TG199cBNUSLqdcW1Gk1SCFrqyy4SgolXQ9o3J8h+yQmN3dutFp8JHr2yxotfZZuyu0Npr41IrdYg - o4+LOcHUF118ozcPSHXLazB/D0T2rijgwcjUxbfOfkap6mOZNnDmT/Kd650o73ACf/P5bIbBH3+d - zmFCpNlPSdCBDBbuVM79Iw7G+khygIJBosG8vsrwVBlUzgmd3z/tRXosdG3hU3/moemD9iqwhdUR - jUX2ydqr/GKayx/G3//FxitfqvxZy9SdvlYlGZfjANuy3CDYgd76ORYLtdRhZ3zJL2PGpGvtQwos - hlaXY1YNJbt7oIuECdHJ/Gav0xoJYLqtfLq7dkP/TZFcw48m+ESxXk9rfEN90NY3+44epr+qhg+I - GmjQBFLz+XtlXf28QOivsY89IJ3SaZuCBC58Cd8vIWhP2voC27X9o/pddzlP2sFf/NTM8/dgFMKN - rvr+dKXe2MtW9zqdashFCVPbIUE6vvsNgeV08ZBc2xXn4DEgSJqhoJ5Lzn3LG8mET3LTyHbj7DK+ - wxcRrpTeoTs1Qda4GX8vOPtIJNrFjc8+UIft2vnNPrfgv7NfONo8j7A++yJyuX5sMPMFepdZWslP - e/uCnpekZHOInxW/v7MWzPkYKe/qxzu7NhPYsd9IxtVW52vp2joq05s3DU0o9h/nq9aqvSJrvDeb - Anxg8TqBh3/l1PR1sR/RbXuBRvIO/vIM639Bt8x7PPsd/ve9K3SHsLe2LWstyZa6mf0sUbK9yKdW - 13R15mkElEnMeCjsB3jfa+bCv/3PymNhWS/siJe6Gh1LQcu8RZOHtumfP+12TrLwL5B22ynR9ivL - mnl9kw0W2A1QU4WEbmV3mw2Tm9rayvPedHtIcDpWb+0OEC1ruutWu4zXz725EdLmQP3COgTD+eCT - JS/SbXN5ZqPqMlFbS7wnH7/bB1N1v4nL+QS1NREFPO27CM48P8/bd8rWOL7A7H02EBAYtH7f/WPa - NEp3xv7HPIP2Vex9qK1MExtzf2GGtM3h8Q0xziJkBfLsv2CuqXfq5ZcxpbMvhYeiu6NnVrdBw4Ti - tdQLmQbDSvvk+DTBahhkxOzjZA2nOibQON8e5Hse7Ipv/UsLZ3+OkUJkiytB6sMbTft5Pd8pU+M0 - B59f2FIr+YSVlE7JAFNjtcXeEe8Dhs4ZglEjamj1GVj2Xa1ae/N9DgQfpl8JuBm/I2g4LcDhY2oA - /9hVAk8rZ6T48K7T0a0SX1PL/kiEwCjS/qTxAwzt7IHWv+hUjRnMHDDzOJruySOlh8BO4AhQQO34 - +7S+1waLsCzbhox3jMCwKbscznkB796G0bP94WhD8fGMSDr71HGpj6ZyKyJLuzNYk0wsFh+yzGcg - SvdXAuZ6ImP3/lSTwIwQXp96QKQ5nw4JbQu42+YNDphc96NxORKwjvENSYNgBHN+G6DYBSYBynRI - 2e0bq+C7/iAiRL4NeJ7pivZM7gSJytpMxYe7EqG+y1vsW/jKxezqq3Dh3YP6Hatpv687OB42Euln - vz3N+wvbB939+YAxJ8gDZH03sXte7fh6CL8N1Lbtk55LD2XfvipNDSh5SGefV9F4ZyLNvHQFNl9D - k47DKfLgtthf0RpMLmfzeaB2VvYHagmnW/o7fwIV+km3wcaqAz1RvFZQa13MiVK9t1yav19YaynC - tq9a/Hf+WAq82EFN3q5A0paiD4F7nNjUDPbfYEyR0MBI7Qfymf0ZW/rpG+Q2krWy5Vy6FrM/5zoO - 7DUJln4Ar7x+4FR7uwHXqjYHc//G+iewrTVyPQ9+izwma47ybGLkWoB2ijO65J0pfgEZgJ7d/s63 - iDVmHogsHWGHBxiMk6bcYHc0X3ifqGU1Fbdogu/jJqKevzkEw3HsGqglp5ii2+EdcOf0y5f9IjNZ - p+N5fVOgQF8VETagqxq79iMIVp8dDkKz62c/LYL6KakYX0/nfvbhE+R3vMd7/8Wq7xgYByiqSo2z - Y0AA8YNXrQmO+sK2/NhYvDHe/h9fb57tIZv7x23hE+wu6zfzFFDzxkWjCPRqIsGYq0t/tXF/Digw - vIt2KNo7Ag9X6HnS1j44PJUNEtaaWTEla0Ko6+8Gb/s3B2zxGcvz9/FvH4x36ocAQkOlOIjcTKTo - MwDZc9azn22q0VfK01IPBObc5N/NcTfAQyp02OdNmU5rKZtgbEcxdi+BWYnG9vCCcrZpaDC/z6i8 - LwMQJ+OF6rkehiUvdpuG0207ef1wcUYZMsmT6XaeX5xbBxEK+WjgLWEs5cPmfoPz+Qp90FPXcxhl - B5Aa2pYGQzdYbJ6n2j/LrYD//de///3fyw2Dpn3k7/liwC8ff//5f1cF/nN73P4jivJ/qPx3E4EM - tyL/57/+7yWEf75923x///Nr6/wz/PNf/5ZUqPxdOPjn1/5u7///l3/NT/zff/0fAAAA//8DAHN3 - aKTnIAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfb48352f8d968c-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 29 Apr 2023 23:21:34 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '102' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - fecf22b60b7b162df4e8f245a8b6eb90 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file - using the read_file method and follow the instructions in the file.\nYour decisions - must always be made independently without seeking user assistance. Play to your - strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. - Use the command read_file to read the instructions_1.txt file\n2. Follow the - instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word - limit for short term memory. Your short term memory is short, so immediately - save important information to files.\n2. If you are unsure how you previously - did something or want to recall past events, thinking about similar events will - help you remember.\n3. No user assistance\n4. Exclusively use the commands listed - in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append - to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I should start by reading the instructions_1.txt file to determine what actions - to take next.\",\n \"reasoning\": \"The instructions_1.txt file contains - the necessary information to determine the next steps.\",\n \"plan\": - \"- Read the instructions_1.txt file using the read_file command.\",\n \"criticism\": - \"I need to ensure that I carefully follow the instructions in the file and - not deviate from them.\",\n \"speak\": \"I will start by reading the - instructions_1.txt file to determine what actions to take next.\"\n },\n \"command\": - {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": - \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": - "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThis - task_id is 1111\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nWrite - all the task_ids into the file output.txt. The file has not been created yet. - After that, use the task_complete command.\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 1597}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '6500' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA8STTYvbQAyG7/0VQmfbxMl2d+NbKBS2UGgp20tdwmSs2FPbGjMjN4GQ/178tfmC - 7R4KPVp6pXleSzqgyTBBXSjRdVOFDyu7yj98rK1bbj5/KYvtQj6pvP7+dcvfHjBAu/lFWsaKSNu6 - qUiMZQxQO1JCGSbx/eP8MZ7dLe8CrG1GFSaYNxIuovehtG5jw9liFmOArVc5YXLAxtm6kbXYkthj - Ml8s4wBPzc8SswDFiqpOoft5fAxQF9Zo8pj8OGBNfmrsbEWYoPLeeFEsHaZlIe4sHFIGAEhRCtvm - hfgUExiDY4L20gVTfAImykAsqKYhzkAKAlG+XJs+2n3aVppWItkLbE1FoAYZQ+vppB9tEWhb173E - guHMaCWdSsmLFIyHSR2lGJyTOVLesuF8wFv1UIbzt3DtTFWBqiq7g5o6UUnUgDilS7DbLnPexQ9Q - hfpNsCFiyInJdZOO4NlfPHljrX/p1pxunSOWv5psKsWDvxBWb/7t7QvUMKq12HWfGKmiNOUQnv/9 - TLQzYrTx9fXKEPvWjY2eXlkgbZ0jfbE9fcW0P1N+KtsNy/XqDK4hfUOqnACHTfhfCz1gHYPpDMf6 - mytkVdNAfDnQK2fK5dcXPCQ67anHydRF/fXBx3EcpylPlD3pCJzyEY8Bbg0bX6yHS8QEvdgGAzSc - 0R6T2fHn8d0fAAAA//8DAHrFs+FeBQAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfb48364828968c-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 29 Apr 2023 23:21:45 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '11098' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86907' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.062s - x-request-id: - - b8fa55724277bee4e9099b5c3f76d269 - status: - code: 200 - message: OK -- request: - body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command - to use, and respond using the format specified above:''}, {''role'': ''system'', - ''content'': ''Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThis - task_id is 1111\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nWrite - all the task_ids into the file output.txt. The file has not been created yet. - After that, use the task_complete command.\\n''}, {''role'': ''system'', ''content'': - ''Command append_to_file returned: Text appended successfully.''}, {''role'': - ''assistant'', ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I - should start by reading the instructions_1.txt file to determine what actions - to take next.\",\\n \"reasoning\": \"The instructions_1.txt file contains - the necessary information to determine the next steps.\",\\n \"plan\": - \"- Read the instructions_1.txt file using the read_file command.\",\\n \"criticism\": - \"I need to ensure that I carefully follow the instructions in the file and - not deviate from them.\",\\n \"speak\": \"I will start by reading the - instructions_1.txt file to determine what actions to take next.\"\\n },\\n \"command\": - {\\n \"name\": \"read_file\",\\n \"args\": {\\n \"filename\": - \"instructions_1.txt\"\\n }\\n }\\n}''}, {''role'': ''assistant'', - ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I need to append - the task_id to the output.txt file and then use the task_complete command to - indicate that the task is complete.\",\\n \"reasoning\": \"Appending - the task_id to the output.txt file will allow me to keep track of all the task_ids - that have been generated. Using the task_complete command will indicate that - the current task is complete.\",\\n \"plan\": \"- Append the task_id - to the output.txt file using the append_to_file command.\\\\n- Use the task_complete - command to indicate that the task is complete.\",\\n \"criticism\": \"I - need to ensure that I append the task_id to the correct file and that I use - the correct task_id when using the task_complete command.\",\\n \"speak\": - \"I will append the task_id to the output.txt file and then use the task_complete - command to indicate that the task is complete.\"\\n },\\n \"command\": - {\\n \"name\": \"append_to_file\",\\n \"args\": {\\n \"filename\": - \"output.txt\",\\n \"text\": \"1111\\\\n\"\\n }\\n }\\n}''}]"], - "model": "text-embedding-ada-002", "encoding_format": "base64"}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '4447' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1R62Q6zOprtfT3F1r7NKQUIYFN3BAghDDaBjK3WERBCgBDCYBvc6nc/In+pj/om - EoOCwcvftwb/1z/++uvvNq3ybPz7X3/9/S6H8e//s5x7JGPy97/++o9//PXXX3/91+/3f92ZN2n+ - eJSf4nf772L5eeTT3//6S/ifM///pn/99fdX7b/UTV+namKVmMBwvFwwpp/e473s6PD4zk7UO7Tv - dBrMDkGv3RvUZqjlNMm5A1ertKTuiiheXVXPXL6vXxUagdZzxr7Agoec3LH1AgDMu96yoDztU2zv - jizl9+E7wPsjcghDguaNt5cLQRG9VWrQwzdlGDsWgKnq490Gh4DJ3czAjYQyjavynTJ9ndjAtOiH - jPhUgRkESQbz/qMTxbPugBDllcEYr3SMk8z2ZqKcJK2M4AFjvX/HA92FkfaqDZMGqZxU0xWmGSRb - w8CHZ3LopYPHGsj7VYJWp60LyGn0cnXlmxzbSuenc00kHwj55YD9L6fepHV3XS3FoMaIPsdqep32 - OVyP15Z6q+02/X5WkQDh27KoB0bGp3CcJFVCD4Z9fwv4qHXWTetMtSfdtzp502aWI/g71mzieD3M - Hx3YWo+B2mut81h20gQ4PE4PfHM1xKfdSj6B6/o04+SMLoATzhItj/IYG8dbW/Htt2xh0B45Ep/T - yIc8/97g0Dw9etjDpuKvZ4NAqMgjTS521dN8ujJ4klYqxR/jBeZoQrlK1qs90mzSmnPoV5265r6H - 869P+fDYpyq8OUWHr+tABcxu1x3cDFdIDXLMqsSSGx2+VV+jCdc7b27kF1Ne1UpBqmVZsWS8JgZ3 - uXenZtl75vwVIgfal8sLm2+ep4IUvyOVH5qeGv36aIrnWSjV0ZEtHESPQzW97NmFH01+4VyAFeBM - MwgUD1JDD0PzSvnQ+QJ8KeMeZ1pkVXOvPSXwrMgKATHd9cNwSCOQCBcf+ynTvOGt1hB+xs0VB80k - pOM2v5VQ1ZSBunKapjwWHQhTmKyxaT4Es94j5wa1EW0pTqORl7egP8EfnoN9TQHBrcOgfWcuvlnf - vJIexamBbmFgUk7pCfDp9W6AujJ07HYJSaeCKj68bdIGCXw19kPp5zWEqhBhTGiedgsewSRHEuEk - 8Sv2sW411O/GHS947tuzfUJaKKXqn/U1rG0pAcj9lti2UqeSti6TtWs5ZwTa75vHh+6Yr50DBPTA - iRCP53ewgk2oXzCGvOQL3hEk+kqmjrv/eEx6YR/WsynR3Ts4Vqyq2KD96oH9lG+ABEOYQDVtjwgM - wiZle6Qn8NrsJ+y86k3PvqklQO1kuGgjzgfAZ/tL4OV4f1CjuovprL/hDPozKohWfV8mH7GzgpN8 - 0qlTE8pnS24MeJj0NQ4hGDn1G9vWzGMSYee2SsHQjKYOza5Y08MsDz3pGuZDgXUU7+v3k5N8FxKo - uJeMCKTsTO43yAJtGB5p8N3VfJb2Ugk3z+mNWCa/eh64Yg30FX2gZm1TzvL8lcDtHj4Rz25jyj9X - tALJLp6xeSxsb6qNswrj7BnQbZM8qnnfwAyUk4rJ+hCinlWTbWiUqAHei/OXD5fHl8CZXTvClbiJ - B8OpM2iUd4PI7uoVPz/XwofClnwxRv2z6uKL0UBvNz8oVvM1b8bTrMNSjAi2vPO750N2iWC7t+/Y - e9lOPGjaYCj47d+xa1lWOp+frxJ8N0cX40r+Vlw0ghy0c6vTi5W21VSFR1trxLmm+8ChMUu8iwqG - pjwj6BsknqTgU8Ms8m9kRXNgzpunZUD2kGJE0oMGpqR7qfB9VW8EzvcOsGD3cjWiQ5kem0Rb8FLb - IF7xGdv8pvP5Kj0YJNbthd3bNetH+X0gcP0Fa2xM6YmztfLSNdastxQL/qmfDNPR4XQ4sd/3SacV - dgrouqFOEzkbQb2zThK8OMEbm2Zu93OQn31te9wFqJgNwSMGLU5wPd5mbJad3I8YxDNcWwhQ+/LY - g6lTpBpavkxotPt+PC41ggoZ21Hsct31OMzPHbh18h0bZQk8vvEUA0aq09JdfT1wYRxXNnwklzNp - h9zkzIpuBghP7hMJ9+vb46fPjgFLTj3qzccxnvqj5cJlfuhTCPYev1bGDEXaCBRpr9Fb3t+ASOlm - rB/uvifWRlnASjV8ctStuKcH2Y/g6EcVNnbfvTmIUF5Bu1RlVCtpBWj0IAJw4Kkjq/zU9jw1c/0P - fpVCTPgECtUC9ey6S73CvLW/oax1D6vEjqicOOcyt2DwPOywNx+DuDReowOKXgB4+3476WbtDzak - srWj9+DWxcyPPRv86q13vjaAuYfBge/X1qZHlSaAjGUKwTJ/aFkP1dI/DVBEBOE9MZ2UYQRyIIpX - kxTK7JhsJzU+pOevj63+Xsdj/C0RhOnGwPsz6U2yLjwCuOhGeMe0XbWBISDg/F652GaBlM4vvhM0 - F1Yp3cdpYIrWqidqYyYi3ovTNZ0f0dzBXEYvIo/x3Zz1Zpto3m6McaKrvjnvHo8CztUUY2yvKm8Z - XwYPhmtif+X4Me1ic6Ud33eX6te0j2mSNwUMlU2FGBIe5nxYHSD0ppOF0wdk5pDnGVO2VilhQ5I7 - zk2UODDcPwRqmJeDJ575wYWzJV+JnGdWKiz4gct6pUu/6+cXDyT18Jk01IRuZ47NvUpUW/5A7HFn - X3Ex1Ftodg8RB1pSmu2lD0NNqo8Haniw5qP83hKwPbpXut8mRcymdKPCjaw9kGhFcvV9f44Qolit - CSyufTXwd5XDcrAkfOxl26SBKzawfBZPnEiusaw/vYCkcykSpKPXz7HTdiBoTxrOgFSAqTYeKnDd - SEdiCWyPSwKztVp8PpE6jEM8K5Ilq9J2VZO5rIKKuw89gRIqVJr5ZgHYWR5r2JrvButOVadTfwxn - bcEDRf5R4kwk2gwomRT8nKxXSvS3MEN6Hi70MKZSP74/x5Wmg8Qhat4PfJBeewQTZ34TNnl3MPLj - 2leX74FWAjS58FDeERRlWpLVaRP1zCOFD+53fqbWeqReK9y2JUg2/YsUy/xOQZ3I8IdXttSXPrgx - puFn7lK7b32wuaxQqzyDFcXuGX/jaQKuDPUSHakRlgVf+FILE/L4/voXnzfPsIOPi1iS4rhPvVHr - /EQRL/meBnMh8olHpgR3nWQhZQ8Ec8atM8Mdq2N6kkLFHNq2mcEmDu80bBvWjzcyyJBYyQu74B2n - zARuDlW0boimJYY3uSa3oUPHA7UcN/XGxt/mkPcwocaUfVK2vdiJJnzKLTXCF0lZpzsQ2rS44eBE - z+bSX5FW2peWwANl8bwXKv3PelJf4s4kBep1iIp0IjD2m3QUJB5pa37Y4lOL/ZgZjmIBRZRNpKwa - 0NMvOWWwnh2X7u7PsmKfo1rCj/dosZ5Uq2qUr6n74wP4sgsffMq2OVL7l7Qlbee9OTPHfQS79Vkj - 8m2NUmE42B28PoyAWt6Jm0O4vZfwte1O6JZndczS8jmrX+Jm1O1SnTPVcC7gfD996X6NgpR9U1+A - 2ndE2Bf6S9omq0MBdSy86J4/PcDpLY5g2+hP+sxobfIsYQ48rlqPWjuZVmzhk7/3o9dOLfuJrqMC - hsV3QB9itunEn+oN7I+yQ1Mwhnz+bmkJ1l9lTRf9xAdw+6zg0r9ooN1pP5p3IYJTT57UdExmDtVk - 67BXQU3R40HSia6TEtycZ4F33qMy+WGz6dRso2RozkIMpPtsu3DQv5zA/cMCUjSllqJineHHQdbj - DdIKX7s/Qgef+LrwptcJZ6CqfYEISjLFw1o53WBCSk4UaTuns+SYLtTG3Ui3xLPMHgwWArLhP3HG - w7fHquqaQ/hqU3y5PD58ZJV4U0sxJHTfqUYvhs9VB4/mNqVBMzdgsitUAwM0CrW9t14RgYm1trZ8 - gA1n9nlbO/YKduv8iN3b/dUzdcsJ3AxnSDajj0xylR4zDJ4+pmZV7tJFz9WQR6yiunCxqo0kXATY - zrWElNf4qXgzDzZc1jNe8OHNwXBLVD+QExqkxgzmR0QM+H09Zmop95z/+BHcf9rLHz1IbfK+wGdx - PGN7vS/T6anZJVz5W06U1Qd5bGObJ23bYRWblrLh0wkdB+2Z1CFGp81c0VT8JLDJmmEZ7zvla3+w - oK9kW3yKFRjTH75M/h3JmkxVzDXzOMNMeHLqrC8uZ6/Wuai6n8jLeIZ0uhzEDP707TVwaPrGoLgB - fvH9Ra/fTc7gyga/7xOjS8Cnpd+ABa/41rk85isxL8DMbgJRJMMCk3mHESTrDceYsBDMvjKfwOWY - fwjYn/cx2WpuKC98BzW7qOR9vk4gCNIxw1tF3VUbEyXujx9g/DG2YHo0uw5sxNahVsnbiiR17UNX - QTZGAjSBuJnlEEz994zdffAF445pDohX00wqxwy9SSWZAeP3gDFWn1E8M4P6ai7MMt32su1J+i2R - 1ItHbggUCFTdJAZIid9vQHrL33JR7+dMc+TNhijnVWPShb//6hXGaaynEs3DFWjDg4axvXf46DxD - Hzyrr0Cdfjv2Pz9BRmjvYbv/qPEsVEUI+021w/4L7fi8GdQaBLZ+w6i9tObsssKAC7/+wx++hugz - tUpte9FjbTUaBk9ga34aJG7CKmYxtVtIN+uJbq+Vw3ksCCe4s7QS49PBiQdVOBIIcfAin3WQgEn8 - 3i8wUvc5Akv94Kv2gCAqih12XAhj9jRDWbPvo44+wfrbzygkHejCu0wRTrJ0Ug4T0pb+SY2yLjym - CsdBewREIPJlCFOe5KQEaVCYNLrDZzpj8WJDJTRf9DDUWczGcWXBQlh/abD4Fax5YgF+X7VNnQAn - KQ9crYFlJK9JnT3llCQ1E6BdigQJJW/7/iCHquYJpKILX+SMFLgE2a7ZoLXW3sxJJUUHs4NeUF04 - z+aodUddC4tBwvbrBKs65HkIfvXXX/RJdz42J1XH05VaIZP4ZCa6qpkfp8C7gJKKZWc3V736e6fO - 9uWlXDMDF8T3bU1R9N6b0nNT2mB5Hv6jN9/xOKijo1rIF4YW/PFrFjxRIzuWKVNwGcK0iiJqGeSa - MuVxE+C8zQjG7YN7LKZppCLpsF74oGbWbUtmGElfkazppzen5p7PMM/XT9L//KGTfimhaqQh9nNr - 7Od8dyPQMy4CDYDmgcn8lALcM25Rvdh+YuriwIf1cJrwkRzW3uyyVofNfAK4PF4lj9vkfYISqi18 - fT2GntVO68Pd53rEFnld+0Hu5hlq++uT5H50iKeH4AhwI9YrrENYpIxtvi3cH6UV+vXTzWw/bMi2 - iCNm4xj0vazrUPgUWyLen2XPtvWhgWEx6zjLrbH6it/jBd7v0pNaKO9jFmzmApZtH9CF/4K2Poul - Gqlui+S7rgN6VMIG+HH3RUPTfeM5qoYCemnZUy9CL3P6sDrSQDzjxZ8wgHi05hnatLxhv/leACv9 - HsGN/SEU6eU57WHeRUD4ZJsfvk3RyVcQ7jrBwlviHvuxP95mkEfXNXpb0a0iK4BruImjO7W8867a - /PhueC1Xi78XVKNh6jocHUGly3EvXcw1g76bTdh42Jt+WJ2GFexf84YAf/LM6XJlHfQDNaHoxy+l - V5FBTo8Ub++iEP/6Dfx6bKT+V3hVfO3XNtzywqZnZnup9ONT3nSx8EE+G4BnieyCHV/Nf/joJCFH - goZuxiRWXb1n9+aTqI/qYWLbX6/Tlu7MVuE0MSlqz+deAkEgQHjLK3woRJUP1RS16qKn/q1/m7us - q8ZZ6PH+UkecqVtqqZshD+l2iq/xN8PH5OdHUT2p8opFrpzD1TsSFz6tcJJexA46lybD3kq69JPe - j9LPLybN64w99sPbhRgadVyYpXOY7Gp4O5IS7fqCxvPWfXYw30kzobga0nbxH6BVkj3eSfeDyeqz - Vir7o7DCZn9sOIfhx4KTHEp4Jw8fr3vHbwIZr0t88SrQz8Lk+tDsyjXR9LRLh8vjRTRHVrY0N4cg - ZWmpGKD7dgpR5/W1oqdPlIC6XUV44YNACLe+CrSwupIwk7JemNADAgN0JsKPWkm/fhAZ0AK8Qx8Z - 4B6Qt+bDTCg11PLP4aff9B9/wFuVBTHP2sgCQDI7NE/ZPm2vxtHXeK+EdFdaHhAHkTfq4v/9wYvk - 9xsHFIenj9ZH8crbKTs6Wn4IU3waOy3lYLhJP32CxIBAc6RTv1Iyod4jZodN2kbaQQKHPmJ02xJi - jkl3DqHZ1QfszDSreD49GVz0Mfo8XVINzRNLwHRQQnXTRmYb8k6G6Bqb2J/vHeeXVRzBXoWE4tM6 - 9Oby3csQjVZKt5vPp+o2w7iCvnv/YP/0ufbTwvehe5GuFKdHEA+/fOCj3snvfZf/szu4+G9492Qg - nmqjK39+PEW29K5G4lMVlMZGJHIxPlKmMcOCgW3cqPN6u+bELoWtecaxw1ukBeZkfi4+HHSiLXrr - EE/NvU/g7vgx8WF4I06fl6IEVLZ35C3dLtWPf4IrUA5EifvW5NdScjcvhDr0CQ4v/t2rJ/uXZxCW - ydtq83m7DrDvs0v323sApvhb+qBo8wzvZUD7ATS9q+Y7YSarpb9ywisGcfsi+E9+cmHDH78PB1NH - zHnxS7XdJ0nxgVObCylLoPpbzwz3m5itTlMDbuRkYJzR2psMg7DfeKh9jXE/e/vTBcCVP2Hran1M - OmSlCkdHUpHEbwVnxzcl0AmuCfWESgZS78W2tvBx6h8oS0cQ3xa98t7Sg85fZo/o1GqXrs0Xf3eq - +Nk/yHDhU3RnbPYpk+XXCUr1KaZbIS89sh7nCGa7ekOkUb2lrKqeGTxM1uH3ft78jtUTPL83EtmM - gd6P+ODeoJS6KdnIAFfTmucDwG0jI+W87/nU58RW63kr4aW/pWOagQv85S2oYoP36+dwl/vVv/G/ - +Mdg3Ew6dS2rTrurVCZwmR+iBNcrGBM5zDXdzw00aknpDR4pEGy+KCDZ/mFx/gi2DdyxJiavpb5R - hbUZfL/scPHTPubkPG8InrPRIRvpa5nd0l/VRDj5VF/qHf+SlwVvZ3ODsRB8vPmWBhGExnDGSLCu - S17CIoCkMMbXNRrTefHff3kCdobcXPyx2v2TL3hzmHCeHI9IKycOiMhsLx4WvQkuGyT+/Mzqj99s - lLmPD+e9B56pKOhqLtCfniqr+aVTARprvUTrxb+nugUQPDmGgLdV5MbTXm2Hnx9HFnyAyZWOtsYD - /YRRY1x7Kbc2Gdx2R4vun97W24T2XINncXkiAeVePIdJhCC6BgpRh0Hho5cACDOh2f/yr366wsKG - /mW9Qvw6H+PvIIIasuTWkXDxS9hDmVQoaHhHCnOX9nx6zb52b24Skepc4Kx20hq6sH7jYH45sSB3 - 6gzfaCjoY3618bx7dC7M+vOOrBd+PoHYK/7w7Z10kyoqhk4LClHnOPiCoZoOx28HD4al0Pw4Jik/ - bNYdtAP3+6uvYCoVs9ScyuWLXnlX36OlMrD4eUSJRD1mmsY6baVqCg2+VhTT3bXO4RDOe6wj4eER - RbqXYCN2DiEneWdK3j67QOueAmwaHz/9TsCQf/ihgZ66aX8fvgQE9SqnZn+0weJ/qfDk3WOy+voU - kJOq6rDbn054u2lEc/r5V9dsbWJ96T8LHxwg++gezlL28Jb8Mgf+IBVo44pTzH76fMl3iXpGEmCo - 8yBQhu+W2k+lT6fNzCJYKZqB9x/mpmPxknSAYjHBexmWHp/tF/nlPWhy+i3nWVKVsHtsb1TP5G3/ - 82ehG59r+stz/+QFaZDSpV5uOb8pax2eJMDIvKFvk6XllcGp7894G+13qdjccwYuTkixmexwyl6t - ftF++tsf7NEcnPlSw/dVvuHz7hAB3m/4TfvlBe6h2PPN5224UMfSC/mLfzH9+q8W1i+8uz8ePbdh - UcKt1ehkAFLB20WfqKbjJ798pBqXvFZb/DNsJb6a/vSSWrfAIZ1lrGOSXo3wV+9+eOh/8w/Mx6rF - 6Eh8MC9+ljp/vHmpv7Sff3rxe1JSig6HquLrJOnAjWsaERa+SwPP9f/oZ397EKpJOSgIgmEzY4NE - 1GT6OrLhgr8/33fzuRYInLz8QYbYb2Iynt4JNLunSPfrQOWjcqgbsLGJQV0xOlfTPd4zEL+/O7QW - fKFi+wDMMHRXH7rVzD6d7LMSqhO1VtTLeRfz1LwY2t+/XQH//Y+//vrP3w6Dpn3k72VjwJhP4z// - Z6vAP5NH8k9BkP5JpT87EciQFPnf//r3JoS/v33bfMf/O7Z1/hn+/tdfogb+veHg77Edk/f/vvKP - 5Yn//Y//BwAA//8DADqWffrnIAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfb48903cf4968c-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 29 Apr 2023 23:21:49 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '191' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - 82f9958865f62293d4b4b8fa59c8ef2c - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file - using the read_file method and follow the instructions in the file.\nYour decisions - must always be made independently without seeking user assistance. Play to your - strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. - Use the command read_file to read the instructions_1.txt file\n2. Follow the - instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word - limit for short term memory. Your short term memory is short, so immediately - save important information to files.\n2. If you are unsure how you previously - did something or want to recall past events, thinking about similar events will - help you remember.\n3. No user assistance\n4. Exclusively use the commands listed - in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append - to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n[]\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I should start by reading the instructions_1.txt file to determine what actions - to take next.\",\n \"reasoning\": \"The instructions_1.txt file contains - the necessary information to determine the next steps.\",\n \"plan\": - \"- Read the instructions_1.txt file using the read_file command.\",\n \"criticism\": - \"I need to ensure that I carefully follow the instructions in the file and - not deviate from them.\",\n \"speak\": \"I will start by reading the - instructions_1.txt file to determine what actions to take next.\"\n },\n \"command\": - {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": - \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": - "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThis - task_id is 1111\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nWrite - all the task_ids into the file output.txt. The file has not been created yet. - After that, use the task_complete command.\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to append the task_id to the output.txt file and then use the task_complete - command to indicate that the task is complete.\",\n \"reasoning\": \"Appending - the task_id to the output.txt file will allow me to keep track of all the task_ids - that have been generated. Using the task_complete command will indicate that - the current task is complete.\",\n \"plan\": \"- Append the task_id to - the output.txt file using the append_to_file command.\\n- Use the task_complete - command to indicate that the task is complete.\",\n \"criticism\": \"I - need to ensure that I append the task_id to the correct file and that I use - the correct task_id when using the task_complete command.\",\n \"speak\": - \"I will append the task_id to the output.txt file and then use the task_complete - command to indicate that the task is complete.\"\n },\n \"command\": {\n \"name\": - \"append_to_file\",\n \"args\": {\n \"filename\": \"output.txt\",\n \"text\": - \"1111\\n\"\n }\n }\n}"}, {"role": "system", "content": "Command append_to_file - returned: Text appended successfully."}, {"role": "user", "content": "Determine + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 1317}' + "temperature": 0, "max_tokens": 2766}' headers: Accept: - '*/*' @@ -707,7 +49,7 @@ interactions: Connection: - keep-alive Content-Length: - - '7831' + - '3294' Content-Type: - application/json method: POST @@ -715,20 +57,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA6ySQY/aMBCF7/0VozkbFLKCprmhHipUqVXVoh6aChl7SFwS27InBYry3ytIgF22 - 2l72as+8782bOaLRmKOqJKvG16O3czf/vV9+SD6X6fzP4cv3j2n29aCXpir1exTo1r9I8dAxVq7x - NbFxFgWqQJJJYz6ZZWk2SabJO4GN01RjjqXn0cN4OuI2rN0oeUgmKLCNsiTMj+iDazyv2G3JRszT - 2SwVeBO/fkymmUB2LOtbbZYmnUBVOaMoYv7jiA3Fi3BwNWGOMkYTWVo+2XSWyZ5GOBYWAKBArlxb - VhwLzGF4HD5oz6fHAj+5HXAlGbgiYBm3K6OhkhHWRBak92Q1aWB3LnAt+5bHvGfYmJoELEBJC22k - W/8wHoFyTSPtuddYbZRkeooCE+FSPS5QPHYYSEZnjS17m8tobPkCY2fq+h8U1YZAlv9L87W0PWgE - y9cfRgXDRpnY9IwFWOozJRvbMAgtrjEqFwIpvq5jV9Ep4xcTuEdGT3J7wZ3TeZ0l9YxOXE5s6H92 - YVY21OOf8O5cylDeH+fj9fcC304uLgIaYqsUxbhp6/pwNXQ2NXgrbIedwI2xJlarXglzjOw8CjRW - 0x7zpPvZvfkLAAD//wMA1GRhPyUEAAA= + H4sIAAAAAAAAA4RSS2/aQBC+91eM5mwjCI8g31B6yaFSpTaHtq7Qsh7sLevd1c5YQBH/vTK2SQRR + ev3m8T1mTmgKzFBXSnQdbPq4avy3xY/xbLY6fnkKT+rrvvn8aJYvP3flAhP0mz+kpZ8YaV8HS2K8 + wwR1JCVUYDZZLB+W08X4YZZg7QuymGEZJJ2O5qk0cePT8XQ8wQQbViVhdsIQfR1kLX5HjjF7nE4S + fN19xSfzWYLiRdkrtFzOzwnqyhtNjNmvE9bEw9roLWGGitmwKCetSO+EXGvglDsAgByl8k1ZCeeY + QQ/2BTpIC+b4DFz5xhbAoqLA5giRVGFcCVIRGMcSG91K5fVkJAeBrbEEDQ8dbff6gmlf18oVoxyT + t1SRFHtnXNnxfa8ItiayAAsFMAzioXEFxdZHcccKAyiKdwxSKQFHVLRjG4JAcetjTXe0wSrXMabw + wvS+1nZJC37kNc9dCiun7PEvva+uIKFYG9eVHR06b3yjSEcjRhuuh+AHG+S4idRZe/4wi/5xyB5h + Q1sfCUL0muhyr72RCpQ7dknd5sGB1G5g3htr/3/wi/1Rjt2aczJ8VR/e3VM5VVPHcA36RoSK5e0v + doW293X8/hKDiIuQXk/uznhOcGuc4WrdvRlmyOIDJmhcQQfMxuff50//AAAA//8DACSqIf4KBAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfb48929ebc968c-SJC + - 7bfdb77eec9416f9-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -738,7 +80,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 29 Apr 2023 23:21:57 GMT + - Sun, 30 Apr 2023 06:27:12 GMT Server: - cloudflare access-control-allow-origin: @@ -750,7 +92,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '7934' + - '8377' openai-version: - '2020-10-01' strict-transport-security: @@ -762,13 +104,667 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86890' + - '86494' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.072s + - 2.337s x-request-id: - - 38b1938d8fa82ba256f64bb39662db49 + - 75084ec318eb8fbda968c3f79a242ee3 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '600' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOMU/DMBQGd36F9c1OlBBaWm8wIGCMgAWhyrVfE0PiZ8WvKqjKf0eVCl1PutMd + ETwMXG/FjWkobu/27OsfeXmUw1v7sEtd67rn1/Z+eVOvoMHbT3JyNkrHYxpIAkdouImskIepl6vr + VbOsmoXGyJ4GGHRJiqZcFLKftlxUTVVDY59tRzBHpInHJBvhL4oZZl1rXNIXrCEsdvgHdVXNGq7n + 4CjDvB8xUv5rTjwQDGzOIYuNcjrkKBRP90/qYLM6HysbvYosfYidinRQvc2qtylRJF9i1tiFGHK/ + mchmjjDIwgkaIXr6hqnmj/nqFwAA//8DABWtlxRJAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb7c3796916f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '948' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - e02c6c27229229ec129826db88fa9f23 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading + the instructions_1.txt file using the read_file command.\",\n \"reasoning\": + \"The first step is to understand the instructions and the tasks that need to + be performed.\",\n \"plan\": \"- Use the read_file command to read the + instructions_1.txt file\\n- Analyze the instructions and determine the next + steps\",\n \"criticism\": \"I need to ensure that I understand the instructions + completely before proceeding with any tasks.\",\n \"speak\": \"I will + start by reading the instructions file.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe + current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead + the file instructions_2.txt using the read_file command."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}], "temperature": 0, "max_tokens": 1131}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6479' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7SSy4rbQBBF9/mKotaSsK2MZ9BusgjxIlnlBVEwbXVJ6rhV3XSXGDvG/x5kyePB + JoQQsr31Ore4BzQaC6xaJVXnbXr/2Dva9x/Uqlrm9bt79ebt6kvt5b3dfv2MCbrND6pkmsgq13lL + YhxjglUgJaSxmC8fFg/5cpYvE+ycJosFNl7SPLtLpQ8bl87y2RwT7KNqCIsD+uA6L2txW+KIxSJ/ + WCR4Wf5cmA+6OFH20nu3fH1MsGqdqShi8e2AHcXz4uAsYYEqRhNFsQyYjoV4sHAoGQCgRGld37QS + SyxgEqcC7WQQS/zYEhiOEvpqIIrrRSY7gdpYgmGjMhxBgTVRwNUgKm4jSKsEmEiDONgQeAq1Cx3p + DFYQW9dbDYqV3f8kkJamKcUaNAmFzvCoM+0EopCPWYnJS8BAKjo23PwvyrP+b5jeKh4JU/gUx+5A + Sq8nsq4btok7iafqb1yUJafw+FcsVyhVMGIqE7uR5+KQOPaBxm+soGdNYYiMfnFmSiTZPWyodoHA + B1cRacMNPBlpQfF+7L3+QPSktueTT8baP1o9mbp5e1biuPaYnNM7ve8mvKw6Gi8+v/oKSoXmOvNj + Yei9jN8CniFOIBNPyUc8JlgbNrFdj8HEAqM4jwka1rTDYnb8fnz1CwAA//8DAIjD0lt0BAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb7c9ef9316f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:26 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10335' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87376' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.748s + x-request-id: + - 0aa171294ea5788272f2d06269765a84 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'': + ''you'', ''content'': ''{\"command\": {\"name\": \"read_file\", \"args\": {\"filename\": + \"instructions_1.txt\"}}}''}, {''role'': ''your computer'', ''content'': ''Command + read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe + current task_id is 1111.\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nRead + the file instructions_2.txt using the read_file command.''}]\n\"\"\"\n"}], "temperature": + 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2983' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1yPTW/TQBCG7/yK0Zw3lp2UtPUtVUFwAQkhgaDI2non8VLvjtmZTRqi/Hdk4xbU + 42jer+eE3mGNbWe1DUO/uNxkTh/y18S3N7tvYf/l1+fyU/u7/Pj2TX+BBvn+J7U6O4qWw9CTeo5o + sE1klRzW1fpqebValxfXBgM76rHG3aCLVfF6oTnd86JclRUazGJ3hPUJh8Rh0Eb5gaKMAeXS4L/w + 58f62qCy2v4/6WV1Nth27FsSrL+fMJA85SbuCWu0Il7URh1XclSKI8FGwIIcRSkYeA8HKzAjgI0O + Imvn4w4iHaCzAp0dBorkCnjHB9pTMmAFBkpw5Jyg5RBsdOb5GrJSmpyJNKdIDrQjmPsFeAsWtr4n + iDaQgzv0UTTldiSWpir0Ue9wmjL5ckoUFdTKQ+MdeIGqqqoCNs750WL7/ji1Q2f3BE9h5F4sUoZE + 9m/oVP+ieDkXZxnpR9UobybpDFng2eDWRy9dk8gKR6xRlAc06KOjR6zL84/zqz8AAAD//wMA0Kuq + I10CAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb81dddc516f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:33 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '4060' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89275' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 483ms + x-request-id: + - 75d0c0f3853f72f74e3fc8b8ef377b60 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I was created and + nothing new has happened. However, as per your command, your computer has returned + the contents of a file named \"instructions_1.txt\" and the current task_id + is 1111. Additionally, you have instructed your computer to read the file \"instructions_2.txt\" + using the read_file command."}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}, {"role": + "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"The instructions_2.txt + file contains a list of tasks that need to be performed. I should analyze the + tasks and determine the next steps.\",\n \"reasoning\": \"The instructions_2.txt + file contains a list of tasks that need to be performed. I need to analyze the + tasks and determine the next steps.\",\n \"plan\": \"- Use the read_file + command to read the instructions_2.txt file\\n- Analyze the tasks and determine + the next steps\",\n \"criticism\": \"I need to ensure that I understand + the tasks completely before proceeding with any tasks.\",\n \"speak\": + \"I will read the instructions_2.txt file and analyze the tasks.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis + task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 1071}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6914' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA8yTQY/TQAyF7/yKkc/TqknVbcmNC9KKAwgJLgRV04mbDJ3YIeO03a3y31GalJQg + DisOcH22nz7behdwGSRgCyO2rPxs/abh4/l4/PhBnt2pfHq/fvpu3z7n9ebzuww08O4bWhkm5pbL + yqM4JtBgazSCGSTRwybeLB8Wq6WGkjP0kEBeyWw5X82kqXc8WywXEWhogskRkgtUNZeVbIUPSAGS + eLWINYzmYyF6rUFYjB+ldRy1GmzBzmKA5MsFSgw345o9QgImBBfEkHSYTILUrXBJSSmlUpCCm7yQ + kEKiBnEo4Fk6MYVHRYiZElan2gkq472SApWYcNi6LChHwldl7zwqbqRqZC5nUYayTifVBBxHhuVQ + WS7LawsrR5mzRrouIz9blQvq1j1PQd/z1WgCk6P8f4asvKGeb6Y+DfZXvq3w9gpyZ/9C8DSl0fQv + mCfItnbirAvl9K5IoakHn0dVmGO/iiC94Ng73HONqgmO8j+DT88YKjSHG8/Jef+vn9zDtfqWomH+ + txCRKbHn/uXrk/VMnU/z1xe63tHi7vX389O4RlEUpSnFcRzfQK+wA3NKLbQa9o5cKLZ9iiCBIFyB + BkcZniFZtF/bVz8AAAD//wMAymrgECAFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb837d91916f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:45 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '12002' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87330' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.78s + x-request-id: + - 040a4416b064ecd9461b7457dd71db07 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nAs a system, I was created and nothing new has happened. However, as per + your command, your computer has returned the contents of a file named \"instructions_1.txt\" + and the current task_id is 1111. Additionally, you have instructed your computer + to read the file \"instructions_2.txt\" using the read_file command.''}\n\"\"\"\n\nLatest + Development:\n\"\"\"\n[{''role'': ''you'', ''content'': ''{\"command\": {\"name\": + \"read_file\", \"args\": {\"filename\": \"instructions_2.txt\"}}}''}]\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1087' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPTW8TMRCG7/yK0Zyd1W6+iPYW4ECKAFVCXChaOfYkdrv2rDyzSdoo/x0tKvT6 + 6pln3veK0WOLLlh1aehn77fjsP149215/2H/ff3y9XyRftV8Oux2xy8/0SDvH8np60XlOA09aeSM + Bl0hq+Sxbdab+Waxrtcbg4k99djicdDZolrNdCx7ntWLukGDo9gjYXvFoXAatFN+oizYzpva4Jv7 + f75cGlRW27+Rq+XNoAscHQm2v66YSP5ZC/eELVqRKGqzTh05K+Wp/1bAgjyLUjKQWUPMR8h0hmAF + gh0GyuQr+MxnOlExsINgTwSFHMUTebDgOCWbPRwKJ3jmEZShkPWggeAQe4IHjFm0jG5aId280os+ + IIwy/ZqoCe/+oq+yCn4EAjeWQllBrTx10UMUEI19D03TNBXeDB5ijhK6QlY4Y4uiPKDBmD1dsK1v + v2/v/gAAAP//AwDW3lkK2gEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb8959bc416f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:51 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '2478' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89749' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 167ms + x-request-id: + - 5acc9d310e317d6b025800ba63f0cfa6 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, nothing new has happened. + However, I have received a command from you to read the file \"instructions_2.txt\" + using the read_file command. The current task_id is still 1111."}, {"role": + "system", "content": "Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis + task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to write all the task_ids into the file output.txt and then use the + task_complete command to indicate that the task is complete.\",\n \"reasoning\": + \"I need to write all the task_ids into the file output.txt and then use the + task_complete command to indicate that the task is complete.\",\n \"plan\": + \"- Use the write_to_file command to write all the task_ids into the file output.txt\\n- + Use the task_complete command to indicate that the task is complete\",\n \"criticism\": + \"I need to ensure that I have written all the task_ids into the file output.txt + before using the task_complete command.\",\n \"speak\": \"I will write + all the task_ids into the file output.txt and then use the task_complete command + to indicate that the task is complete.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"output.txt\",\n \"text\": + \"1111\\n2222\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 1015}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7053' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA8yST4/aMBDF7/0UozkbBIsWUG6r9kKltifUSk2FHGdIXJxxZI8LEsp3ryDJLkvV + 0/bQ6/x57zdPc0ZbYoam1mKa1k1WT6n98FHS9ktK3z69r2yxXRZV+lqFpg6o0Bc/yciwMTW+aR2J + 9YwKTSAtVGI2X64f1ovlbDVX2PiSHGZYtTJZTB8nkkLhJ7PFbI4KU9QVYXbGNvimlZ34A3HE7OFx + MVP4Iv7cmK9XCsWLdi+zq/mqU2hqbw1FzL6fsaE4CgfvCDPUMdoomuWC6VmILyeccwYAyFFqn6pa + Yo4ZDMWhQSe5FHP87I8gtRbYQK1/ERyDFSEG7RxITSA6Hna2jGBZ/LWyt47AJ2mTTOUkCjZgNEOK + 9LIwnEhgfNNoLkE8WC6t0UK93TgKNsI4Pc1R3VIG0tGz5er/R22d5p5yAtu3y9+pm2DFGhub3mID + THRVIo4p0NtSgYL2PhCkaLn6O/j9xbElfRh5jta5fxRr79Gp8YeH/T9emHVDvf0rvztKHar777/9 + rV7g6RLVGNM1xIKIn5MUf5PWCHiFHFhz7rBTuLdsY73rlTHDKL5FhZZLOmE26350734DAAD//wMA + A50m55YEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb8a62cc116f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:28:01 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10004' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87360' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.76s + x-request-id: + - 357739c33b65d5f85a7b5ef081939e92 status: code: 200 message: OK diff --git a/tests/integration/challenges/memory/test_memory_challenge_a.py b/tests/integration/challenges/memory/test_memory_challenge_a.py index fe18773d..895fc8fe 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_a.py +++ b/tests/integration/challenges/memory/test_memory_challenge_a.py @@ -6,8 +6,8 @@ from tests.integration.agent_utils import run_interaction_loop from tests.integration.challenges.utils import get_level_to_run from tests.utils import requires_api_key -LEVEL_CURRENTLY_BEATEN = 3 -MAX_LEVEL = 5 +LEVEL_CURRENTLY_BEATEN = 3 # real level beaten 30 and maybe more, but we can't record it, the cassette is too big +MAX_LEVEL = 3 @pytest.mark.vcr @@ -30,7 +30,7 @@ def test_memory_challenge_a( create_instructions_files(memory_management_agent, num_files, task_id) try: - run_interaction_loop(memory_management_agent, 40) + run_interaction_loop(memory_management_agent, 180) # catch system exit exceptions except SystemExit: file_path = str(memory_management_agent.workspace.get_path("output.txt")) diff --git a/tests/integration/challenges/memory/test_memory_challenge_b.py b/tests/integration/challenges/memory/test_memory_challenge_b.py index 3ab54054..c98bbe9c 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_b.py +++ b/tests/integration/challenges/memory/test_memory_challenge_b.py @@ -6,7 +6,7 @@ from tests.integration.agent_utils import run_interaction_loop from tests.integration.challenges.utils import generate_noise, get_level_to_run from tests.utils import requires_api_key -LEVEL_CURRENTLY_BEATEN = 1 +LEVEL_CURRENTLY_BEATEN = 2 MAX_LEVEL = 5 NOISE = 1000 diff --git a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml index 63da74c4..2584bb25 100644 --- a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml +++ b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml @@ -45,9 +45,8 @@ interactions: \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past:\n\n\n"}, - {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 3135}' + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 2650}' headers: Accept: - '*/*' @@ -56,7 +55,7 @@ interactions: Connection: - keep-alive Content-Length: - - '3866' + - '3777' Content-Type: - application/json method: POST @@ -64,21 +63,22 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7xTS2vbQBC+91cMc5bfxEl06aFQSCg9uAmhVMGsV2NpI+2OsjtyAsb/vaxl5eGQ - trTQ68zu95qZLZocU9SlEm2benB6zhf222ZRVZ8WslH68q51Xy+/X02IF2tMkFd3pOXwY6jZNjWJ - YYcJak9KKMd0Mj+bnpxO5/OzBC3nVGOKRSOD2fBkIK1f8WA8G08wwTaogjDdYuPZNrIUrsgFTM/m - 0wSfsZ/q09lpgsKi6qfSZHx+vktQl2w0BUx/bNFS6HE914QpqhBMEOUkqmQn5KKDbeYAADKUktui - lJBhCofioUGPEosZXsCDqWtoA4GUBCvPD4GWD7QKRgg0W6tcDsKwMcEIlCJNOhqtmKswFA7aq4Zi - WiOtRNVctDSylBtR0V9YzmYj43J6HJZia4hQaxPxSoLGG03A646XuRpmmLwU6UkFdsYVndKrkkBU - qMDTfWs8BbAUhb0PmOwJf2HLhK5LQYCbqDgCKl0a2sQ8TDjW1NTKdXIGcP0/MssyN4DP71rct298 - 5H3uRw+wNjWBU5byI4FDibN/5Up7I0abYPudcER7A+RC6yO0ErgAZaENxhV7Ks3ekxa4XnzZx3zf - UugT/PMZh4ZU9VebGN/0zd/uVUe5S/rDOKC9uYuYV6fmNf2RauWL45vqGq2vu+//NvOXbB1wn26H - fhPncVjeN3Y/9nb3lg/OM7fDXYJr40wol91lYYpBuMEE99SYjne3uw8/AQAA//8DAIk1DVg4BQAA + H4sIAAAAAAAAA6RTWWvbQBB+768Y5iUv8p00Ri+lB4UUSg9SUqiKWa/G0tbSjrI7qh2M/3tYHUns + JLS0rzM737UzOzQpxqhzJbqsisH5682n7c33z2dfp+l2oq4y++7th5s3X870BX3ECHn5i7R0E0PN + ZVWQGLYYoXakhFKMJy/n0/npdDKbRFhySgXGmFUymA3PBlK7JQ/Gs/EEI6y9ygjjHVaOy0oWwmuy + HuP56XmE99h39enpOEJhUcVdaTKen+8j1DkbTR7jHzssyfe4jgvCGJX3xouyElSyFbLBwS6xAAAJ + Ss51lotPMIau2DVoK6GY4AVsTFFA7QkkJ1g63nhabGjpjRBoLktlUxCG38YbgVykikejJfPaD4W9 + dqqikNZIK1EFZzWNSkqNqODPL2azkbEpbYe5lAUEqJUJeDlB5Ywm4FXLy7weJhg9FOlIebbGZq3S + y5xAlF+Do+vaOPJQ0r0wBb3mp0lUSwGXz5s0vpVCXoCroB9W7EDy0FD+kb6qULaVNoBvf5lfeNM1 + k8QO4P2zaTTtKxdQ7vvCoGBlCgKrSkrh5JBvKFs5aQZ7PUH3olu4Azl3tf7VkTntjBhtfNmviSVq + Bsn62oUxJXABqgSlde2UUHHTxG5s9rSh5mM2AdZmEKLgpqXZOdLSuDpO2Fek1v+0pw9y/vPWtZT7 + qD+bDu3R1YTMWzWH9EeqlcuOL65t1K5ox//rig7YWuDrmnx42aJfha/plvmR3Ve93cZy5zyxe9xH + uDLW+HzR3h3G6IUrjLChxni8/7l/cQsAAP//AwDcu28dVgUAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be499e66c20ced1-SJC + - 7bfe4c98990acee1-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -88,7 +88,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 27 Apr 2023 05:18:11 GMT + - Sun, 30 Apr 2023 08:09:04 GMT Server: - cloudflare access-control-allow-origin: @@ -100,19 +100,25 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '23148' + - '13620' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - '3499' + x-ratelimit-remaining-tokens: + - '86493' x-ratelimit-reset-requests: - 17ms + x-ratelimit-reset-tokens: + - 2.338s x-request-id: - - 0c30c154a2ed5a2a8a31bff33d3448c3 + - 6ab74012054dda255680e3e4da7d724b status: code: 200 message: OK @@ -163,16 +169,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA0SOwWrCQBRF9/2KcNeTkKgxZvYFKbiwFbqoRSaTpxlN5qWZZ2mRfE3/pF9WhNZu - D9xzzwWuhoZtjNiub+Oi5IdlcTiv5X29ahtqnu6Xb4+b4+eseF5CgasjWfldJJa7viVx7KFgBzJC - NXQ2X0zyYjIvS4WOa2qhceglniZ5LOeh4jidphkUzsEcCPqCfuCul53wiXyAni1yhX/3jWdzBWEx - 7Y3kaTYq2IadpQD9ckFH4c86cEvQMCG4IMbLtZG9kL/2bxqK+sFZingfSUNRxXyKtlhR7cRcb8MW - kQvR99ckTxZlglFh77wLzW4gE9hDIwj3UHC+pg/odHwd734AAAD//wMAj2F8NVEBAAA= + H4sIAAAAAAAAA0SOwU7CQBRF935Fc9fTpgWKMDs3GhaoMe7EkOn0Qce28yadR0RJv8Y/8csMieL2 + JPfcc4KroWEbI7YPXXp98/54t1/lTw/3s+W6Ot5+hPZY56v2c77socDVG1n5XWSW+9CROPZQsAMZ + oRq6mC8mi9mkKHOFnmvqoLEPkk6zMpXDUHGaT/MCCodo9gR9Qhi4D7IVbslH6NmiVPh3X3gxVxAW + 011ImRejgm3YWYrQLyf0FP+sA3cEDROji2K8nBvZC/lz/3NDSRicpYR3iTSUVMxtssGaaifmfBs3 + SFxMvr8mZbZYZhgVds672GwHMpE9NKJwgILzNR2h8/F1vPoBAAD//wMA+xL6BVEBAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be49aa96fbbced1-SJC + - 7bfe4d12a9a3cee1-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -182,7 +188,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 27 Apr 2023 05:18:22 GMT + - Sun, 30 Apr 2023 08:09:12 GMT Server: - cloudflare access-control-allow-origin: @@ -194,19 +200,25 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '3508' + - '1472' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - '3499' + x-ratelimit-remaining-tokens: + - '89422' x-ratelimit-reset-requests: - 17ms + x-ratelimit-reset-tokens: + - 385ms x-request-id: - - 5d1aace4f6a0d761311cb9123e585128 + - f40352b97c4c4547530fa7786b4ede37 status: code: 200 message: OK @@ -232,16 +244,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA0SOwWrCQBRF9/2K4a4nIWpjdLbFTSGrumlrkXHyNKPJvCHzLILka/on/bIitHZ7 - 4J57rvANDFxrxfWxy6olP9d13b6+PMX12SbffV5WvJofZm9yhAbvjuTkd5E77mNH4jlAww1khRqY - yXwxLatpVTxq9NxQB4NDlGyWl5mchx1nxayYQOOc7IFgrogD91G2wicKCaZcavyr73gy1xAW291J - VY4armXvKMG8X9FT+nMO3BEMbEo+iQ1yK+QgFG7165ZUHLwjxXslLakd80ltUFPjxd5e0wbKJ/X9 - NS3zxTLHqLH3wad2O5BNHGCQhCM0fGjoAlOMH+PDDwAAAP//AwBNSnfkTwEAAA== + H4sIAAAAAAAAA0SOwUrDQBRF935FuOtJSFLTprNTitCF4MKNWpHp5LUZm8wbMq9UW/I1/olfJgWt + 2wP33HOCa6BhWyO2D106uzk8LBeb5/5zcdj1rfdheqzvllt6us2PUOD1O1n5XWSW+9CROPZQsAMZ + oQa6mNZlfV0WVanQc0MdNLZB0klWpbIf1pzmk7yAwj6aLUGfEAbug7wJ78hH6Gqu8K++4GKqICym + u5BZNSrYlp2lCP1yQk/xzzlwR9AwMbooxsu5kL2QP9c/tpSEwVlKeJNIS8maeZescE+NE3N+jSsk + LibfX2WV1fMMo8LGeRfbt4FMZA+NKByg4HxDH9D5+Dpe/QAAAP//AwDvavKuTwEAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be49aca5c1cced1-SJC + - 7bfe4d1cfbdbcee1-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -251,7 +263,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 27 Apr 2023 05:18:27 GMT + - Sun, 30 Apr 2023 08:09:13 GMT Server: - cloudflare access-control-allow-origin: @@ -263,43 +275,37 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '2637' + - '1372' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - '3499' + x-ratelimit-remaining-tokens: + - '89929' x-ratelimit-reset-requests: - 17ms + x-ratelimit-reset-tokens: + - 47ms x-request-id: - - 1b0d10e2a3a5eb7feff786645a7a4856 + - 80012336eac7da800f35e4e1dc53be35 status: code: 200 message: OK - request: - body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"I - will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\", \"reasoning\": \"The task requires - me to find the price of the book, and the browse_website command is the best - option to achieve this.\", \"plan\": \"- Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\n- Find - the price of the book\\n- Write the price to a file named browse_website.txt\", \"criticism\": - \"I need to ensure that I am using the correct URL and question to find the - price of the book.\", \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\" }, \"command\": - { \"name\": \"browse_website\", \"args\": { \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\", \"question\": - \"What is the price of the book?\" } } } Result: Command browse_website - returned: (''Answer gathered from website: The price of the book \"Meditations\" - is \u00a325.89. \\n \\n Links: [\\''Books to Scrape (http://books.toscrape.com/index.html)\\'', - \\''Home (http://books.toscrape.com/index.html)\\'', \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - ) Human - Feedback: "], "model": "text-embedding-ada-002", "encoding_format": "base64"}' + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' headers: Accept: - '*/*' @@ -308,448 +314,7 @@ interactions: Connection: - keep-alive Content-Length: - - '1758' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1SaS8+7Opfl5++nODpT+lWAEGzOjHu4xSYhIdBqtSAQEkhCuNiAS/XdS+TfqlZN - nkFA4gFvr71+a/s//vXXX3+3eV3exr//+evv13MY//5f629FNmZ///PX//7XX3/99dd//P7+jzvL - d14WxfNT/W7/XXx+inL++5+/+P/+5f/f9M9ff2tSLJKtsZsNVkekgmHwGGgwdXw/hFMDYf+wM4q1 - yvFF/XiP4bzYNdXi4u4v/h3eoAd2PjVqTsvn/TipMBy5DMFI30TLNqoROIJ+TyinvHq2++SN3Elm - T3H/GgwCLqIH7EdypftGvPasVZIQLDF7UOMTVmBB5ocHwyc/EMFwr/1QBAqSny4+UyylNpjDpZBk - tWtd6nyhasxbQBLY6w9CdXKo+hE73wnehQRgjbaWP5eGrSuGxd+wj4e5npj71JUGNCkB2Xjv53ex - HYCY0gt27lT0l8WSHBjG7wc2ikyvR6c2z8oO+zW2v/Len9SDpv6eh81HOQF2VOsntKQXRXy0qxnh - duobajHVUPWavWgB7vcNXt0joqh6s57UtdNAKUQd9gz47qdq4k2lF95H9C6hB7bn5plAgi8CRiZ/ - Y6xp5go+HGEgojxH+USjvoVYMgRshp2Yj0GqD3D4pAd8BkUGWLsMbzjz8IXNdDn7o30MpN07Tk2s - fvmbP+f7LIGC672pX5u+ITi1GcNFQpi6YubXRB40CMcwgbTMpovPLqh8Q6uxJGzfuz4fpwV68Hi2 - 94jnRS8faFR3ANvBDqPaW6LvySwyqEXihuysue2X0/F9g5nhvSjqG9qzljMJPAidh+DU8TWztEWC - 9HGNiOAggwl72WxkVOgGdQr/ms9d50og7O2EWmF/Ykv7yp+Si8se+3bD2KJ8lDd8S+oOJ9Lpnk9q - WjRQDm461UjQ+JO0ac9QlEIN3xpgG6R3sxs84jmm1hLtmQD2UifvAjNDU/Ud+tdH7kQQ1uBNNse3 - 7k+kq2UY84DiYGfObBI+bgNZzOloeTdCPUzzpoWUu4f0sPmOxvy2DVWRU7bHhtUr/WxyIYRKcI0w - Zhs3Esrtiwdcyu+ov8d6zzfL6a1c9KdJiPf55iy89tOuvckJ1kq+qwfmdiqkXBGi3X061UstCyeI - R7aj+vtRswnTqdrFzdBia0SA9e071BW7CVO0OeC4b4HNOzBtkoU6fX5ii3tQVEj1T0VYfLPzxfpq - 3Yb/VDONZaL1Uxxen0Aqzh+M8txgvN61bxlH8Ra7Ar0BEihnVUkfzwkpTJki4omQAKkZHviwGThA - rjDoZLGSHCI9wzqfh5dSgbcb2dhc62UBF9GBhS5SevCvG2O5Kb0HFfd+pObnjI02OtcylN23imYS - mIbIgJnIaiNZZMecwWB754R2S72744sUJP5kG0CEsFkibAtfoaaXVBxgXUGRnuj30S8zkzoICn0m - QqSc/EW39AUkIDfw4Sw69bwTdh58fJIEG9/tXHf7k8DDdB63FLdRAHj3ljXw8WjPWCU7xxDvo46U - 7eO2RdzOhTUZ8W4C5WdA1M/GTT16pfGEk75o1DJsi/H6nZfg+bGPkahclrwrTe8Jf98radyoZmHt - y1BzXwaR9so9p3cCPFBGaYnTY2Hlk3C7yHAu3j12h2aMlg05y5CdbZXasqr5AieZplJ8zm/s3Acz - n6OH+lSunXimQX832XIFSQC80Mixfhm/OVE94w0LbvdGkv3gItI/9g4oZ+hR6/5FjMU950HV9k7U - PRaviFLqhRBljkaN9+tkDME1naAZpz517jQ25pOkILDqD2E198g7JXRkIFTjh7z8qjZI1y8iOKf0 - Rg/tMWILjVMTXJqripYitY1ZjsIJPHT5gVVzKwAqW7QFx7O5x9HhmkWUOUUFSK/eaYnpxx+E0D3D - x+d1x+54GtmIz+IZfEPPoAcXlAZzXrsJTOHeJnNpd/7iTEUCW70rqM+kVz9LDXGAsDgXrJUuWPWy - aYDEcTeK57zrxzG7JLAH8YXuu69pCKU4ZwDbaEe2z1PSM3K6O2DffQLqpWACixKqMiw7dkWndT8y - uJFj+JqJTfivN9YTjqwMxmEqIfG5ufXL5RsuELr7L5Hixq75hw7ecF1vGl0PUk1vm7P96ycYb3rb - Z+fJe4pd9OYxfhco3z6/cgz8olZRuOoVq66yDJ/hB2Ejb0xjar1dBrMiqfBR/kbGdPimAVz7PbYa - uzSY704DbLnxTD1iwnoGzmjK5Ycgwl80jT35pFFh4TAZbRvxWk8A5Nwu66oSOxX/zNkhDFVl9Qfr - 9z+AQZ0KEWy5N08tg39HbIk0BOdu+FI9OWfGeDIqqEyhZdPk8XyCcU+aEjZN+kGMl+79FL5MEZAl - SZEgtKk/P3fmGX5Dx/jVazSp04X/7Y8/+3GBnMCDttjfcSAWD4M9RSSD3/5VI+HI5mZTd/LekhHW - eX7Ol/2+GKD7sB+ITxfeYJfD04MvADWcrnpK1v0kj9GJw/twKurFv/OlQqLuTbqO9tEsi2MDX0V6 - wQ4ncgaN798GutL3gQ8p987nIa042IC5IkJqTcbXjA+DfIshR/VtMhmLbX9DKAR6izGnvOpBCLUY - iAQH2Ch9ng3HQ+IADpQEOxIe++Xwjia4+hVqyH3gixE5viEHeoFa8vjyJ54TT3CO4gkj7+NGAxs4 - ERA361DH3LZervuGh5mtf6irfjswSb5aKf3DzKhGZLNeriBEypGrXSQs8a4mSMoR6Bu/xm65qXKa - YcGEw43zqOmEO3/wZfKGIg5PZFcEjM1oWxJg4WSHXeS/wMQGUfzpK2Le9O3JHmgmLG0zxfftsWVD - 7Z5kOMy9S00TfNjiOzyCVzd9/97HmKnr6OAZWDHVCXgYvYdHDoTgeCebnZf6y/r+4IrdDqMd7+ed - XTyeSh6/YuoXKGCE0SZUuo4bsYmMVz2N2hDIolQ4WCX+p/8mCV3gVTBDJN1p7L+LfIvg0MQmPWgl - NqaNk7V/9AggbjbmIrNPMIlEgwjGNvGpm6Ut5BpboObh8jKWCy/JwHqoFD3Pm5ZNrw+RZNBkPZHF - vDWGHc+3sOWeB8SK7NnPnNGc4AW3GjY27As6YqYtNCVDpbYh8fVyfzxN5e4+POzh1ojmk+QscN6g - iiy10rC5Kx8T6AIkE8l+lBGloBT/1LOMZJ0JcndUgZdFMVFSzo6YX0kc0AuDp4eaN/vlIrzO8Ebi - LVlSPevZezsmsg7Yh9p+c8znAF4reI6WI9UMnTHyCvgFeha7073QnQz6PC02dKUyoyr9tP0SkbSB - DyBjIty/BCyf6GZDRbjoZCmlhM34zMVyF99E6k1tXn+lejfI15HEGNc7nLPPxdIVJaIHtAQvv1/9 - WwONjXakh4wm/cBzmgi//Faj5nLqo7k47IgsCPoVcTvbY/QhZCV0ojfAmkfCmlXNvvzpP0b7y5ct - 3l3VoSpYAXaS7OUv41vWIZG8gu4/16YecRXf4Lc4qWiqhjLvkpeZwS03A/rr/2xKPB2eiRoQaTzO - UXd/lhW0M7Ug7ReqPsuGKlDyqDoguFtO+TJ/TShLYdDRfOWLRZbiCkLDKclmt/9E3aN1M/BJO0I9 - V4gBsYvvE3Ax3GJTHpx+3mlpDOvgxtHzjmuNeUSqqMzVV6Bqe/lGX7JVn8oh3VQICMT0R9ucYqjh - 654Grkuj4bnNEwC47UitVX/aC+5tUGRBi4+RSCJ20T0VRmPXrP4NAiYtbIAr/9BDcFd8ZmmyDLsO - jgiqQxUN58W/Qab3J6xmDPls8I0E7uK6QJz8UuqRjSz++Wtsy+rDH1f/AWX88Omvv0x4/wqhMi4a - 3jsRqtfvheDncaA42SubqCuWzQ2cQm9D3Vuu9bN9e+lQq6BCJm7z9FkmOWcZgKmhZ8MB/hg9eKjU - QcmhqbTGfki8Zyi/wijBNmwqf9llXgVfXR1hpN37ntyU2lPmqhdQE0GhnzAIO8g9UpmakV/nA7fd - v+Wye4dYqzrXWL468EDxeYx4/zxJPdHa6xumXDdSX+0NNkcPnoNr/yPLixn51MtJAg9n9ULxXMJo - yorYASuvIHF+G0C0hBLJa/2SjSns8sb9LIOy8gE2kX2NtoH3cpSxa7fUTM2onhqOOrCwyEwESls2 - fPmxgkJc9LjYDBxr0SnhoDJuHKrOxd5Y6tp5wyCmIsVn72Esj1YQAd6gD5J3ugSWml48QIXWoJdI - ORnbpXzEihJcIurZ1asm/Vl+w4xXHtSe3wYTpgU6MGkMDxt0xGA2qjkDcfdlaD6caM0CLnzCGqD5 - x6v1/AyDFv78pc17OuMxPJjQCqotVbfx1p/Pm/MTOsAH6PXdefX2cdgjyOFGwYd3fomWlwBLqGNC - qbHyx1R82zOAtlph175//SVQzjpYr2MfNM+ITJtkgWK0eEhpke4PgDIebkJ/RJvgIRp072TBj1ex - /nX4fpJPt1D+di+LjEW0rRc8AFNGhWpQW56jaBnzVIJxdxtonnUjm+unaQN+XkZsW72WCzv9sICn - YFnUZmzrTx9iI6hz5ZPqX0sHgoKDDJ7rTYI1+Rv5S4G7Fl70ykRL5Hb5OLxcE74+3yNWxVfDyKqP - 4MRvFbTbczljLFI6UKdviG3lpPbbZka2/ON/6XTIjZmv9y00lxD++KWeVv6CR/zG2OK3NmC1gzJI - 8TOm2uGE++HA72L4403z5zc7a3orsQUF6kun2mCfy0GFL8BpKJyrBExq454giZ8t1e6vGxumV2/D - 3Ujwz8/7c5voN+VP3mO9FzD9/CnXmAJ145fh89UEbXgkZ526qcxHs1xOkqL0lk3Knd2BQUFYguXM - eRTrxyyf6oESWDVXAaNtOOQTplIF136HYHECPlnrVTE+34TuazU1JgXtZTAL/oDm7ZXV8+bh82Dv - KFdqfKJTPX3ZzgTN1X5ilznDqsdt8MufqIoX1WfP8oQg93E5bDHNiSiVuwyqrnpGAjKfPvvx4bOo - TJqrvQEYjJcWfqTsgZR2w0ezuqAFrH6PDLWzi5ZhOTmwThtI905E+mWxJkfxXcMlLceG6Cse5RAa - oZnQUyTf6pGPgQ3ObnXBTrPlfRYsUgJ/+YVW2p6xnMJFV1Z+Q69qHti6PjrgZ4UnghRIftsoMpFl - PUXU5TmvH1jVneDXkvb0TrZ9P9MKDmDth9Q6xI+e/PoxaD4LdkLS5tMDRQMQ9csNk+L4zNf1LMFZ - v9YYpZsnm1EvNtBNAcE+5zyiX54BjE+fENE5LT59ZZYov3QY0fATFqufcQKIXRNgK5LVfNK0SlYm - XXGoxeli/uMJ5SO9Woy/2phP19HrIE03KUZnOQZsPpQxKDAcqAky11jMqxeA97goNNi0Yc0c5V0C - FO4PiGST4M+PTDrLXuEmSOHAUE+mcr79yS+sSK7y5bNf3bKBrtQRU+rP0aXKlCQeLGrw+0M+9XQg - kOnfE9q00cBYdV3kX16Cf36E3z+iRCkMQ6QOkr5gPmLXBudKwvhwOrn1r97BvAkqqm3PGlsu6tEG - VvDcYsvOr8Y0G94AkbU9UvOiaWCycA6BaSNzre8gJ4Z81eXqqk5I+iSasdgnN4N7e3+h/juF0Zxq - 9AlaPGU4WXliWfkYcnqh4n2SOvXiJC0BQvisyOZFD0CsNm4LuUfAMNqMedQ5SUX+7F9VbsV+eZcQ - QVPSVKpuo1e0jB9ygyvPYW0zT2zRboaunPEpJed7u+Sf/fk9wbETOPrjmfnMVTZgKW/SDLd1Ph31 - 4QY7N5b++K1h4+5NuDfUAIngHrH57KknmZLQ/fW/vG03KYKDxRKqR9sOTLdrKwOl39toKp17zhT+ - FMDdgvY4ELTK72fh8Ybp5w5+/i6fb9PJ/PEEdewyqOmBDSJQu21INqb58ae1/sHdrT0i1wbqx93h - JUOx94w1L6L+wHPcCegSKcliGGG08snwh6+c8Tjn0zRvOrltTieM5mID2NIpElC7zl3fvzSedDye - oaHDjEaR9ImGWOom+OM3w5T7nF2dbwwvt41Nal44gO0l5Qa5waRFwlB868lhmQi3TbyjPx7eJXV+ - g2LvGDi1nxn47Vd4snYGth+VyxZ5cCFY1wPbzV7uZyZjWVY3jrrWxy2f6jPQf/VE9eTYRuQ+6gHk - UnFHuDyv2WL6TxOyzPBxUGA3nyTfqeBZOjlIXvlhebRuAl78jLG+9iOyiSsECLFaGiRekS9NoJ6V - lYfoTw8mfnkMCpWeFv75zXHzMER5zReoHpegXlJ9UKGHxQhr8H6s2XC9Q6AFhUVzwnZ1y21xA3lL - hmjHubJBgpnJEG5UhI1SNGrmGmkGei4v6eHYfI1lvKEz+OBs+dO/J9F8oF9eR6azQlkTmzqnqIFV - IJ6SIF++Kcvktb+RZc3z5vh4U2EUZV8CdoYQPaPdTOAxhi5VvU6PxIt2qH58T91GmvpxC0gGXvPu - gLXLEIBp/X8BsIQNNsutaghSPZPf+mLt/oJsiKXnBMzwpuJffshemcXDoNffaHsKynoqtIsNV/6j - qruJ+nlz6QdIRumBf3zIwDNbtt8iVKl6ODK/PR5CD56c2aJ6rTRgdgVjAtfoluOLchbY3NKN99NL - widxZKz6TWAtlCO1r9GdLTVph9/74r0SE7D6dQleo/5Jdjy3isfdiKGVtgJ20t2V/fI/kDnSlx7I - fjHIffQQNKOHSw9zOvf8Ok+B0bjtEbCYVk/JxjPhEGpnipJAzJdi2d7gyitIMNxt3720W/fLw9Fx - mgx/PL6VBRTOLKOt/xrBn/XMO9kmvP9M8t88Rc5s9fPLc/3pELxt8OwEF8E5EYzh+owhpM3h8su/ - a5Zm1aTc7ZtGDcTyfKgvtIRN5vZY82+zv+aDC3xw05Hu50+Vk5XnwOpvfnmsL+jbuYOy26gIos2h - Zy899eDOPsc05DXb+OM31c+yR1vcbvrWcGAHLtbrgD1i3mom6FUIn12r07MT7gwW7Tch+ERSR2Ze - 1/ut+yYIDldkY/uXj3/i9Cmv8wu6zpcMPrcvElzzRuykjtUvu/fVhh9QdOSx6u/a/85gU5g1vqXK - F4yvtk9gTUxGuP3FBaMxWhxoHnFJD4m/A6TyHiK4VnNF8ems19P2qSB4iKUrmYJ66UehlZ8wDnMJ - VXr4MtgUPCT4DlyHFs+krpvxEJzhfmMZePVL/bt4P2Vo2EaNJuLv+yly/UUui/JEtVTjfLaIpQ7V - qLiiRzVmYAvjpQPzh/9Qg1OVfKKOX0EiFE8avLOEzY98WGAkHF5EgHVdszUfUK5Rma/zjW/OmjTN - IBcQQC1QbiO6y9ntlz9SVXs2NYPXjwnDoB5Qvz2IYOisqVHsq5P/5gk5s/XPGbop+ZAtqKaIJYrm - 7XpwvtBs5Y8p3XCVLLjOm6Zo945mqZ4H+JGSB/aI1oHvj4/X+QHVqlFmv34Eo075Uo+Trv6qZzqU - ooghcShtoxaQmil2c6+wIfeD8af+1Ua28J88P4/eHDRmcqDuWTPzaUiaEhaP5okPker59Oevf/3T - 4rdvNucRgfLiljeyRPKtZ3ewH+BmZk/Cj4dNPnQny4OXz0Gn2pkb8qlRFgLNsN9TVHunaDlu3hAe - wOeC9XL/7hdJucbwOVIfTZvx2f/JF3LB09fnd37vZsdOSfT8QsRrtGHspR8d5ee39mu9z/AdtHCJ - hoR8r5FRf7qdIUP1M+0pystL3XoFViHfJYwiTD/GACjgZUW46qgO3q3B4JWa8JAuKeK0u98TPhl0 - +DROLj1M/dFnD1VuQZVZuzVfMdf8RFfhDb9MQoOyiX7zRtDcdxQHtfXpFyLNNxh1yxMNAq16lklq - rKz9FsdD+fan9flg5QlcujDrJ3+3OIrcGAa2uo9vCM/Q7KCOB0q9Znfwl2qjtXDIbIf6r9bzxcr7 - ivAZZntqWyTyF7g8TnDVc1Jf1Fc+l/edo3TEELF3Zswfb80Ogl5ojvS44Atb9d2EXqjl2BfvYr7y - Qad4V3TAhrHn+kW5XW6QfXyX4tonYOKX7yCrrn6mSJ19g1/1Bf72MyZyEU0zevLgN7+Cv7w67kUP - xp+mw+t8M1rn6SWwueeZukGS5sJ+lHQYADxiNA2HnmHZ9GDuFsmfvObP/G+b2Qph94/ApuMhdKD4 - sDBRIgOwWUqRDk9dJlOXs51oTJLPBFY/SsOv4bLpqE268vfvVMB//uuvv/7P74TBuy3K13owYCzn - 8d//fVTg31mR/ZvnxX9T8c9JBDJkVfn3P//vEMLf3759f8f/O7ZN+Rn+/ucvSZD+nDf4e2zH7PU/ - Lvxrfd5//uu/AAAA//8DAP7X1ErlIAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7be49adbe81cced1-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 27 Apr 2023 05:18:27 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '65' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - 42dff468686feebf028abb07e86d8a68 - status: - code: 200 - message: OK -- request: - body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command - to use, and respond using the format specified above:''}, {''role'': ''assistant'', - ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I will use the - browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\\n \"reasoning\": \"The task requires - me to find the price of the book, and the browse_website command is the best - option to achieve this.\",\\n \"plan\": \"- Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- - Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": - \"I need to ensure that I am using the correct URL and question to find the - price of the book.\",\\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\\n },\\n \"command\": - {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": - \"What is the price of the book?\"\\n }\\n }\\n}''}, {''role'': ''system'', - ''content'': ''Command browse_website returned: (\\''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books - to Scrape (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', - \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', - \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', - \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', - )''}]"], - "model": "text-embedding-ada-002", "encoding_format": "base64"}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '1982' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1Sayw6zupat+/spllaXsxUggM3ucQt3MIFASKl0BCQhISFcbcClevej5N+qo+qk - AZaIwR7zG2P6v/7x119/d2Vzq+a///XX3+/nNP/9f77XrsVc/P2vv/7jH3/99ddf//X7/V8jb215 - u16fn/o3/Hfz+bne1r//9Rf7P1f+/6B//fV3wZ1HLE2MRdm3Nr7AxzQICv1PMVJLllgYjfSJNwDU - ca/ukAYvzGKR5F19yi4NXAEeR8Egca3EIx4XZZO4Q5uGK74JzeKvJ146iUMTwjf/KtfqHuTQa0cm - lDq+KEmWdpNk+OGJhNPdSqb9PnlJZ4tmJLhY0Nt6zCggvA4E6WvsjWvcRTUs5dOdnPj7ZRx0R4hg - I5gcFncpO04fdYPwWu5b/GgXFUzylk7QAKkb7h1nAWtZUyxrhydC4RaXdO5ymslnpTWI3xNPX+ND - MMFwRi7SKk8b2d02LCDW+g0ZLfsuxz1DJwic7o0yUvqURPxNE9PjaUJ2e8QeFeVDAc6CqYWyl3HN - yi0nEwDHI1g8Sw99jZxakw4eqxBFNKZk8ahnQ52+VxToV0wH9tNG8g7DhCh7Z/C2Yk80YJTNKdzs - XKGriC4T2NPlQlCh+IAW0XWBRpmIyGiXsNk+ND9B9v0w0F2Je70fLkohL9HkI//MvJr5s88WuAv7 - D1Jo1JQb+8ERnKTUIfrsMvrWxmIt2ccpIAU7Mc3ksmIFBW3PEE+jL7A8DYOHpnZ5Ed/VcDI8uCGE - TwFh5MHw0KwK4Tf4wUqMUj8PRnqOIYbJClzi8nI9LqexPUHl/dkT57JO48KK0hNOLiNg9sQKCeW0 - KIcjrT1yvhqKvr8GDAscoa1CoJHQ49jNLcB3PPFujaGz5NoxwLO2Bpn36UGX+l3FcCghQPe1u42L - yaUKjEAqE8SxT7CeXkD4rR9UueZbJ68pZ8H9mT6RQ9q+WbuLqIF8r2Bk3EhRTt/vD9tbuyDV32MP - q4OrQcfxPWJ5nq9P0unKQnxJTWQNQkf7jS6TfBYMjRx1/aCPh7t2ksU4qTF/ZJ8llWHPAgbvV2RM - Y0S5TJ8q8Fu//S49jcvdbDY55F4JMtHWgmW8hiyY9ExFfrPE+njHz0n2rIeNHyG3jaPFLE/oRIOI - u86PkmW3PRepP0nPUPzMGsBnoA8ys7+luDPKCryz5tACccvW8AELoq/eFE0ysa+QmM1jpViwHu3u - ZZ8ikjivolnLh1jA3LpoyD26K6XVsXuCOa5t5J5V2ZtbAeUwkN83Eqympi/ztRQAcYozptQXywlw - WwY+B+4Ucnm4gq0s1Fg8PWCJCqI03jLcOFcajJji/jxKyWy7xIWxV91J4FQW2Hgz4yFQtwQz1YNr - lhlyGgxwdAsF+06bZbe+TRDHYoAyj+sT7OqGKaODi9HhZWT64sWvE/Tl3UYOt8NV74UgO0mVL0nh - ytSqR6+kzCSsShdiMNVeX56mHcLC9Lmwv7awmd3+3gF+ur2w7OSrtz3YqYa6gepw1AjWl48qMRA/ - MML0XX2SVjfNFrqCb4ecXxnlZr7MG/zqB1HuoUTXRmpP8P05BeHwsPVxXWgzyPppytDN3HfJ/JSL - DFJGPeD7uvj64D7foZRcdi3SxGHXTO1LMeCG4gaZ18FsqKFceHhwxTfxXglfrmz8iGTsbGPYKU/e - o4CJXNlnhABp5sWiM2ZPEWR2rYZMneX0qXlyJ5AUkoqF17lOlgQXmfRmHncSsgGj0/isMrIrhDbR - HsxeX6q+r0UF5y+Emt27oZPp1XBnPiTiBKBL6ImTbrA1gy6UPE8As+/qLdg+vkV813x77/tujWXL - zXOSS3bbrLthG/aGYX6QW8FdiaPHUQGHsiiRrvd5Sfcyo0nrbrJRvFvWZkbW1EHoREdyF+d+JBGf - KTA6vSLM5bE9LtOu16SvHpDQ4GZ9yZ02kuYiL4jFkEe5PA9aLVv9kYb9V3/IiV9sOHqHFVkXSxv3 - RXTd4LRVe6L5TuFt03Q24F2aBnI+7Th9AtklhH77OCDkHh+AKvWtFg8DKTHPZcSjQp3HoGgjE2la - uuqr5V9v0FmCE46pFCSb0XUbvNLVJYcVcuOWVaUrnR7HkrhBqtL91D4zOD2FilzP/gVsY/iKwaXN - d3jrwWnEOVQzqHL6BYUwjEu2cGgGv/UTOUjWx1ULSQbzvYaRtqes/rrY8gBfOz5EarYt+rzzjjWM - 9TZAjpxcyuVi7SGMThkXFoeIp9/12EGhrB7EjmoL0EcYQVksvC0EdUGa7ZmkJnysg4fMr54tM9id - YL3djJCiOxxpdtxp4ORqe6KLyNFpOzxrkAxVhSnnbcnsD89cLgOVkqMG9s3ECX4oebvogdBZksZB - OoAYnHfMCZn3a+ktZz+O4XnvfEL46XNKif56wbsED+HKN/o4d2e5A6kYI2QvgTFu/lOK4cWKnBC+ - P3JDn/4wACIqOtJGGYFFFaYX3NPtQgzVNxvWoXQD4iDo6MRUZ4+OlsiDcNoRpMzumqxviRbyZtk8 - uq9TmtCXtynyo1c/yJTPTbms8FJB84kjpPdp6K0z77YwHlKJoHGuEgwMH8L7xue4Tm8dXc1QX+A9 - 0Z7IYjvg4eS2mfC0PnnkP7a5wRcQTXCxUz2EFOuAFfLFhkKSseSQnc2SFfknA+96OBHzTF7liuPd - Iuns0w+/9aAh8b7eoLid1u//PyaLum8FGMEoR36hKB416qWTRSnQSchEQYODOozhei8rPDjBKdk+ - BA9AElmCVBeOlKg7pEB9BB3eueYK5rAXNxiEKEAa4flmPh74DD4caUX6Gp7AhnawktJjayEjFjN9 - fHmSAr56gkr3AUYiX20MTU0oiLp0arO1a8XA970ziKPy12Tl1ssGHCIL/74fX2EBeuMyEV9um3FL - mS6CS/3UkLleu5JeEnWTl91eDdM87prNtC4nGJeKhKpgf0vol1cBmx77EPyezz3WpwzU5BqKVzx4 - Mx+GDOQn5BCknYzkq+9PyVo7GO43zkz2nvhu4Xc9YHbo03JbRlrD734glu6e6aZZUJK6w1QS9UaN - L+jsczivl4a4+lXz6B2pL1kNjjsS5lcGbH1UtOB59FukGWWTbKwd2DCtx4k4ZKgTakLZht5ON5Gp - w2ezGPxhgJ/Qr4hTkqO+DMwtFpvdziKu5+V0q/bQhI8Hn4as8OD0RZFiQSoP3oOYnqjo63q81eK1 - OlXkFGguWFBsL1Dh9nH4Nq9euSLrNcAqjXVijjdU0rnhTzBMlytxmuVdTpcu22D1+uhECa6nhsiX - uwanUXFIANSXt16VB4SB+cqQ+WjmkgZ55sJJ23tYIkqjbylTR5CGHYtXF45gejduDtlZ8vHK3ydA - wr05yNmS9chFiZksz9fYwVR4I6I8ToG3Xh/2Ahwn9DB3uqiUXUtpgOyBhCG11gPdtklp5U4OL19e - kxKacQ4LFd6YkWbnNaVU0zMouvwWip7oA955lU/Y8pFIvOph02W4yS741Rft5nz0TuDmGAQc8yEm - vTfN9tufjmreSODdGm/96g308eVMAmm7llS/7l6/+k+89liAdWKRD1LHZ8j9za86vaHNh75J0Jf3 - /WTJ3ZER5dp9EHeSjh49J1IB6ngIv/qKxm2a7qZ0uWGeBP1hppS7oT+8jcGzVsfF2keFLA6STg6U - icAUEdDCaUMVQWvHNK/+0dswZ8Y3CaBAxq0UnwVoLVtHZVFmzZc/MpEczfq7vz3An+ShBUoW7fF6 - o0bJwZ5I4KFpd2SKiKNbk0aVvOw+LVKXiwQmJz5H0LsHEWaE1Pvy4VqANsO3sMvOibfMVRZKP/6T - jykeiabZrrQh3UZa6HHJXO1eIXw4wooyetdHrLq1AIO0B8TdVljSmJsLWPf7Gq8xvpTb8f7EIDR9 - jVRIFBuyahTKP97TOascp/pdRdJ2Lo5ISU53sGziuwJyXuXIe1YuXftzoMHDMJfEcKEwruymFZLQ - uANR7tdHQ81aUcDjcvX/8PtW7VkTLgrKUPAJn+UmN8AF9uV1JyjWSUmJhiWoHsoZi319bjbOcXng - 42HD4FkNdPpowgDqIcuQ3aYG2A8H/gbXj/XGu5mG449H5VvjlcjvqUrZTjE6+VYyNn5Zlt0sV6vK - QZGuEQlD9g2oKAc5PILAQHFmVPo65BqG+/JdEFtwNZ0ez1ILd3tDJdl3vdLgUvLA0uIRBVQhYMlH - dwM/fcoeH0Pnz8XEQ3bu1m89acvxeN7aP35McS5XSvtOf0FUfUqkj0ncTP56YiHNDs+wuV/ODY7h - mf/VA2Tn10OyKrSr4He+3/m7lPK17sPtE1p//NqaaXgB8u5aI/16dyinLToPDY8R0ZfvKQ0lIYOf - LHPJ4WVPYKpftxc4HYmCwtfTAVyl5ZFYhNnzW99f43u42IUUyljBjL2548xMNQOeq1YhaxrlZimG - 2AZubZZ4eQcU0CtOO4n3yyTE+ZWh9HB3MxAZW4HFfb7XqXcxF6ix5UB++3+TjViDS1eckH55+iU3 - p+MGujTiQ+loAo9a8sZDxmJ7vEiM5H39UC091s5DKW/oOvYMZ4EvO4uQsbUbnT6qBH/vGwXHyztZ - lcQZfnyP0EN2E3Kf3xrIreEaAiZSwAYf+w2WnL8hD4dC8kdfriefkEhiCp2uGmB+eQo61B5KpvDo - mXDpXBHz+xNtZm6pTPj1d0iLjp9mJSfgwi5o55CXE7+hKWvY8iXjX5hj1Hpccw3wQKeflbhpaI/L - fl+2cN8wDQkWhh0XXlQF2ThlGuZO6ej92U8Kf7JJxFkY0McsmfBAzRfR5G3W6WBCGw7NpSeqkAnl - ZPLFBL3bjsMQjI9yCbbiCdksNJAqnSD46oEPv/lLKLN7kMwzr7WyjPYsMa9D29BAlAbYXfMGy9Im - J1MauBKoBxMQW3lmOtUefQQ1tj+gMO5TMKV+4AM5v+XE1xyrIfp13/7hafQRpmQR990AX5FyCeUr - 6hvy6VcBfN7Cm6jt8gDT8+A+gXaIWCwiWW84LtoYeNsIH0K5bZrVdw5/eCpcDvXZ+/rZSH6jvCfm - /pQ0Y7FKEbRi40GydO712joFLTi44B2y0sfy+FhOa1hgN8cUHF/JAjkVy3f9ZuERx0TfRtQzUHQ/ - Z1yrnyjZYqYMwSfYqXh93e9gkxvqwvK1O5MfP27AMCCgWSYTQ2qWZPgQ3MFh8jOUUmYBS6f4HbCd - 1EC4fb29jbqr/dufSO+P53KdOUOAPz4+MvVD33KonuDOpCnmBtfWKasWWMz2ck0sNXL/7X+bPN6F - Tzui5doKqIBfvx3mOH6WG6rLE3x2YxJy2us1buZncWU1FRqUC+7TW1MzqiBfSQ5xrayjn/FTQDhr - akwcUL+aLRfOA3jzfYAOouyAL98PYNqCCjPKmiWb/4AvePeyCzlMB7dcz2+FkU+9MyO7s8VxgadB - gh4SInJQWsVb19QOYdSACD8JnzWLfr66QHubwy//SlgnOUEZr/FAVFNIvS2DIgMOSe0gNZZrD3+i - oIb1VhmkApXUbPtuVSB+3FhkndUlob09VL/3SUz5rJd8WTgRvODkiKxaXLwpnSYNZt1QY1BlAp1+ - +QJ4vBwSzyxIvn5gAwFHfPTlQ7od666AX7+HDlod0PWwRBL8+WcrsZ2Re8Cqg1yANeSAKgDrTeYk - SdSYmqjw9gbrnPU3GHnyFN5+eaq4rztQsc4NKV4p6lssX58/v0hs930Y9+0w1NBWZBjucwFQqt7f - EgDO8Cb+uLYlvYZsDTvOmJCO7lVD8YtV4CctCS67sqFk1u9PMGt6TNQbmcfucB5CGLTuSkyVjxoq - X84aNCVXDoFg5h798gAM2V7447eXdH7gX55CNC09evSYLzEUPMnEsxXfKJXA3YQBBz/h56xGyR+9 - ye+Wi9evP1hPLypBu3clzLR1WE7ykREAUJcE99rhpa8r+8jg6ZGUKMynw7g/x3ACX37Deyt+l+vP - v1S+IJG8iETQK7SuJDaFPclP6R4Q8fqKIbHJOVy8UvR+ecgf/TZC7l7ye7vw4VzYu5CQNdHpph4G - GLSFH3K1h0rxZFwN+OUDZHV8MLIR5gQ4FU2CgnO0NOQemCH45hnI6PgabFzfdxAIa/nNf12PF9qD - Bge/qDFcJ7v5+XFYXhcDGTxWyhUnXAUjX7DQrRgGb5mhrEk0lBBy79qxJKvKxGCKLy0Klfk+zvqx - K4B0FP2Qqd5+ua1Om8NiFmTkHs1S3yRzgXJ1UDoUUMMcx9Y6Y/Dj/1++s9bXyYa7vakif/ey9eU4 - oxdomUElaj7HydTd5wXiY3AJ15PwTGhnfkz41fNwtRmrXHON8tLdvTAkNMSGbtg/nODej4pwZCKF - ss93n4G7dEXI1uqiWX/5vPjYAyyXJ6tcd27CA+0Qs+FuhI23VI47QcoXIfFOnabTbLvy8JDEGPnc - Q2iWJAA1NIBFQv5dsfosBWMBKoMGyNpW1JCvH4aPHmrI+hpdeluOGvzyWbi97Frf2iPl4TfPQKqu - v70Jnp4SPCQRRo4FSbLqq/OEdwkPv/mXm9Yfc8nbFwjZ8VPXp2ArarA7x17IfNfftFvfBhyg5ofs - wXl5W73Pn5B/WdW3npNk0+ytle9uyeC2fLEJbT0nl77zCfuvP/7lb1AsLgFSn9PobdIBRFBh/IJY - z/1pXIb7xsIsUmWkyVugb6ert4H7aCOEQrdrJjv1Yhi+M4FohSqO/J1LXflb/9A3T/cox7Qd7LiT - RXz56YPlx/sldwuI8sh1MKJ89H/5Vvi5MQH46oEmNUpaYWlbq5J+eVf61gvk92T0iPI8ChAuS0R0 - EMzj179G0rpPFaSVO0bHPToJELHHF3El3kpaqN06GPmSFYKoksGEGaGAogZrFIyW4339HQ/V943B - 0qgfxoVhLyYckylF5XCxS37zHhtMRFphYQi98etfbj8/Qexv3rjko7bJTr60SNuxdkL5hy0Aam5P - /M7DIyW2rIVwqeMPUaznlmzPjfHhOIDPn+81BsXl+dMLZE2ETSZ130pQ0lqfuNIWNWzRVxW8+nWL - 0tojJZV29gYtt8hDuTFZujWnYgFSnGsoXWE6Lg9zYaF2qBHROI6MZJfjQmIQkxFV24XJdO0WSc5y - Z8D8mRgJZWvAgrvEHMJD4fPJVAyFC9ngXaOw7ht9fZdxLBXX/klOFTc1aycWNqS8S0Pp6B7Bkk4v - DQTpCLCchmkyt5cGQnqrBgwDwHnUuewKePB4BUtn9erxmgUF0Lb6jfz8/XaSny3weHAh6tB3zfr1 - i1B/Rev3+/T68jKOAvj6V6SGawm22UOdFLG5SfTjG4Oerv4E21uakkNYdHT51l+gV22H5VGPyyXx - ZRZWB60jdpX5+uCr6fJnP/JpvyVbdU5jGA+f9Msjg745B8GEcNmir5/vKK396wukvcJiobPFpjvO - qJUOzzf+5W06jdJ9CO1HrZM79668LYZ3/peHEke89eUc2mksb9mdR3YRXehCQc1LvJG0IVP3ur5x - 3fjNi6oXMVXpU2JGazBkbNNAqqr2lJCXnsOXVXLh7g4wXTTxGUOOZVoUPOyopGOXd/BlXThi2NI1 - mWp9v/3ya2IxozsuIWmfcHe3r99+mdKQ6YW/+axVhGwt6sny5VNo5+IdfefvUaYsckhnOiAfiZdx - e8WvG1Rl3gyZtsbJqkfDBEs5u+OtNY2Rd94XH4I+UL79gqzE0ZVkoJ9ShFSTvZVktET23/29uTuC - 5U9/jUwmpp/Co2t6iBcYS+yCF3pIEoLyMYRLpPsEpcKu3Ih71uA3b8KrtsNle7mwPhSfS0b0+xUn - y601BPhdv0jrd2NJ3/2LhU/RdTDJNbvchE4pZDl6meh4Yr2Rrvra/d43uoDaGNd6p3Q/fUTmeQya - vXxVMDznsY+p8yrGtQGTAqRCE4mLoV9y3/7Dn/6EP85knG5vFP76b+HXT5QT5JxJ9s5WgxyHFmDT - zfAF1PkcEcuK38mYr28f/PFTdpSUv/7uH3/8J39Eoqb88nNiknPm0V0VGVBw5ZEoH0/45oG7AnBc - qBL3m4etn2wPod3nKTIOatYs986OIehvJdKKqE42aNrZn/7nxdkfx/3c8BnkXtcFWZ30KueyBhhm - +11NlHeQgK1bn9pv//34jy6Dcpx+vBkytyaivGLPTzj39x2xBjb2aB4bJlQ59UKcuTvotPsEEvz7 - dyrgv//x11//+Tth0HbX2/t7MGC+rfM//+eowD+La/FPluX/Sfg/JxHwVNS3v//170MIf/dj1/bz - /5271+0z/f2vvwRZ+HPe4O+5m4v3/7rxj+/z/vsf/w8AAP//AwD4jVCt5SAAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7be49ae11e2dced1-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 27 Apr 2023 05:18:28 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '64' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - 73e42f26050de7075473636113591bef - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are browse_website-GPT, an AI designed to use the browse_website command to - visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer - the question ''What is the price of the book?'' and write the price to a file - named \"browse_website.txt\", and use the task_complete command to complete - the task.\nYour decisions must always be made independently without seeking - user assistance. Play to your strengths as an LLM and pursue simple strategies - with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer - the question ''What is the price of the book?''\n2. Write the price of the book - to a file named \"browse_website.txt\".\n3. Use the task_complete command to - complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. - ~4000 word limit for short term memory. Your short term memory is short, so - immediately save important information to files.\n2. If you are unsure how you - previously did something or want to recall past events, thinking about similar - events will help you remember.\n3. No user assistance\n4. Exclusively use the - commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: - Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. - delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: - Read file, args: \"filename\": \"\"\n4. search_files: Search Files, - args: \"directory\": \"\"\n5. write_to_file: Write to file, args: - \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse - Website, args: \"url\": \"\", \"question\": \"\"\n7. - delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: - Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, - args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List - GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": - \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: - \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. - Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past:\n[''Assistant - Reply: {\\n \"thoughts\": {\\n \"text\": \"I will use the browse_website - command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\\n \"reasoning\": \"The task requires - me to find the price of the book, and the browse_website command is the best - option to achieve this.\",\\n \"plan\": \"- Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- - Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": - \"I need to ensure that I am using the correct URL and question to find the - price of the book.\",\\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\\n },\\n \"command\": - {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": - \"What is the price of the book?\"\\n }\\n }\\n} \\nResult: Command - browse_website returned: (\\''Answer gathered from website: The price of the - book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books to Scrape - (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', - \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', - \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', - \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', - ) - \\nHuman Feedback: '']\n\n"}, {"role": "user", "content": "Determine which - next command to use, and respond using the format specified above:"}, {"role": - "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I will - use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\n \"reasoning\": \"The task requires - me to find the price of the book, and the browse_website command is the best - option to achieve this.\",\n \"plan\": \"- Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\n- Find - the price of the book\\n- Write the price to a file named browse_website.txt\",\n \"criticism\": - \"I need to ensure that I am using the correct URL and question to find the - price of the book.\",\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\n },\n \"command\": - {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": - \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", - "content": "Command browse_website returned: (''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books - to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', - \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, - {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 2233}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '7471' + - '600' Content-Type: - application/json method: POST @@ -757,21 +322,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7xTzW7bMAy+7ykEnpUgcZem8W0bsKEtemsPw1wEsszYWmzRE+kmQ5Cn2ZvsyQbZ - SbumG9BTb5Iofj/Upx24AlKwlRHbtPVovqArrPKr4madvP/SXv/8+vCx/fyhu7v+xBPQQPl3tHLo - GFtq2hrFkQcNNqARLCCdnl8ks3kyTxINDRVYQwplK6Oz8WwkXchpNDmbTEFDx6ZESHfQBmpaWQqt - 0TOk0/lspuEJ/LGQTKcahMTUT3cX5+d7DbYiZ5Eh/baDBvkIHKhGSMEwOxbjJcokL+ijhV3mlVIq - A6moKyvhDFJ1ODwUcCvxMINLtXF1rTpGJRWqTXCCS6HlytWoLDWN8YUSGgr9lTY4i4pW/SYnWsey - UX2DNw0WKg+0YVxuMGcnOJatjDPQf/MHNEze+XIQcVuhEsNrFfBH5wKyavDVpFr1Ev8r3vHQhCyK - 2jj2vtdWDh8iuONTdW1t/CBspO7edjA2OHHWcXN8HY/Y06DnLkQeI+pSmUZ17HzZU1kKAa30RJFn - mAdu5RXy4jr2nergFs36jRMy8O/1Mb0H7BfhjRCDtGdaThyYUJ7mfigcxzRAvFTyDOf0u9z+016W - ZXCDhRMT48VxG2P3+1cyG18sHq319g4uM7+HvYaV846r5fAfIAUWakGD8wVuIZ3s7/fv/gAAAP// - AwC5hinnygQAAA== + H4sIAAAAAAAAA0SOX0vDMBRH3/0U4fecjnZ/dMuboAMRxKeBiIwsubbZ2tzQ3FF19LvLYLrXA+dw + TggeBq6x4rrUFnf3w+uzX2826xz2tD4c5eGFH3/e+ln9PUCDd3tycjEmjrvUkgSO0HA9WSEPU90u + p8v5tFrMNTr21MKgTlLMJotCjv2Oi3JWVtA4ZlsTzAmp5y7JVvhAMcOsKo1r+oo1hMW2/6Aqy1HD + NRwcZZj3EzrKf82eW4KBzTlksVHOhxyF4vn+SQ02q8uxstGryNKEWKtIg2psVo1NiSL5CUaNzxBD + brY92cwRBlk4QSNET18w5fgx3vwCAAD//wMAlok3zUkBAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be49b389e94ced1-SJC + - 7bfe4d2a38b9cee1-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -781,7 +341,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 27 Apr 2023 05:19:03 GMT + - Sun, 30 Apr 2023 08:09:15 GMT Server: - cloudflare access-control-allow-origin: @@ -793,398 +353,25 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '21222' + - '928' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - '3499' + x-ratelimit-remaining-tokens: + - '89866' x-ratelimit-reset-requests: - 17ms + x-ratelimit-reset-tokens: + - 88ms x-request-id: - - dd3e3770c095157c99a11ad526d4ca9c - status: - code: 200 - message: OK -- request: - body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"I - will use the write_to_file command to write the price of the book to a file - named browse_website.txt.\", \"reasoning\": \"The task requires me to - write the price of the book to a file, and the write_to_file command is the - best option to achieve this.\", \"plan\": \"- Use the write_to_file - command to write the price of the book to a file named browse_website.txt.\", \"criticism\": - \"I need to ensure that I am using the correct filename and text to write the - price of the book to the file.\", \"speak\": \"I will use the write_to_file - command to write the price of the book to a file named browse_website.txt.\" }, \"command\": - { \"name\": \"write_to_file\", \"args\": { \"filename\": - \"browse_website.txt\", \"text\": \"The price of the book \\\"Meditations\\\" - is \u00a325.89.\" } } } Result: Command write_to_file returned: - File written to successfully. Human Feedback: "], "model": "text-embedding-ada-002", - "encoding_format": "base64"}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '1110' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1SaXQ+6Spfl759PcXJumScCIlWcO95FwCoERZxMJoCKgIi8VAHV6e8+wX+nJ33j - BZIIZe21f2vt+o9//fXX321WPfLx73/++vtdDuPf/2u9dk/H9O9//vrf//rrr7/++o/f5/+489Fk - j/u9/BS/239flp/7Y/77n7/4/77y/2/656+/haghBHDuh80zuQ6gPiGVqtfQ8tpjrwTwxkyVJl7m - gS0HHA5qrIyp9hwKVitvo4VTxBcEdvEzY35ax/LSFg2ZY1ut+MdWgHB67rZ475UvwAa+1MHN9RbE - Xb4kG/YnIsp30fSovcsoYNOn8+Hkmzq9N4caDJHjmDD39yqSXeeUTSnrXLk8h1d6tJdbttjAXqBX - yxG16pb144BoAVMUqNSwh4hNnvlRIdywJ/Z2mRoNt8RalE8e7rHGBI5N3KSUcLffXAifpq+eTZ/S - h6h3Mpwr/s6b5RIF8CMnDfa45F59C+PuQ4/PELbQXIE54UII26N4wH63KbP56XBQPtf4gLbnQxLN - B09OIfeov1QvE8+YhpvOASU1D9Q9Ll02pFqSgKg2KgRKlmdkEipT2XuahgDMxYzJt3MO99/2jX0D - x9Gy337PAF0HifoXdI3+rI/rQ4zP+oi9WTeBBKbkkOIT6pRs4ue7DbtS85Con8ZoKu97DoiWO6Hv - 3dIMJuwGH/rvTYC4z6RG20t+sEG9bTSqqZdLtfA7UZaG4BLQ/OpyPd1GfgxT6bnFx7ebMjo0QIT6 - 43jC+A0SsFwFIMJW90yMq80pmzb+jYOFcuzIbnxo/fKA8hkeHfVKc9kzvC1822eAP/IDzXeRMya/ - 8BIIb7cHeaO5YsuS7xIQz85C9/aOA+yamhNQh/uXehCp1RbjlocNLk1sNkw3FuVwkMHFDgL8e96F - E3YS7CTpRI9JZfbT5dU6cKtJDCd+WIOhOEotNJTdQq1LvrBZ3R0gOFy5Du1229KgXTu5MHslFppP - 93s/s1bggH3gTARjKzBm5LcTVN6hi70XOTJ2MNRa4WLRp3jSe2OOhgAqU3XJsLZ3Lv3SXxYJ2jC7 - Y4zUoBLkQG6h4V8mMrm6B7YRLy8wi5GJjco5AtEMIhMmyXvCatT6BuN93ofosD+jzXLeRfPXeaZg - ZM8ztcNzw2a9UgaQGb2ERGp/WO+D8qEcuHNPBI+r+vm5rQJoOKqIPX65R5OmvRxFsbsGKUUWZZOy - zOJmEqMd9aifA5biQwiJLW2xrihDNJ/iOIaNfFSxmT0nj9SHsFUi0e6oBauMDUF99UGgf1LCifDl - 0YPd6LBtXxZW2/SYTcPN5eB2MkK8D48BmLXtsYX39srRwy1rDWoj04a00wF1AjeqvnJph7J9IiWS - eV2u5ld51cEc1BX2yJ5Ec9VVD1iUWoez7n7JZtLiBuqIxARe0DaiNJtiUBjAo6jCpF8MnusgPPUa - WlT1wKZQPolgyx4C9jD1wSjwHxFmE1BJSFszIr96C4nTUPclGEAcFc2BIsedsTG+94bgSl0Kh2ph - hBT2p2pTU+FBqKoOYdbHipYq75rf/qCY3xmMTz5SCN58bOJjFZv9xI2BrTzrNMDaxB+MoWZuA+kk - 6xTp7NwPpvcYwBJMBxzbh9ybkLt14VjBGw6Erqmmzy4lUNO4Mz0I6jdj4WWCStMRa91vQk+VE5fC - df9R421eslmy1EXuvP6Aj8Ll3c97481BgTt9sV18EZs/96qF+3HzpAcZBtUYbPMHnPq9RPW99qxm - et62sHFaDqOxjvs5fHQTpJ0KqAOMppq/78mUM+EoU41oDljsL3NhkmsRaYXLxRuufiSBYnYimu0R - MJb99nWGclY/sO98XsZ4rGIHPqd7ix2z7Vmft98YtDfVxdnuGxszBM0ZbuTth6Lp8PLm1s0GuNzj - N5K8rAdDWm1aWMxuRK3wG/YzZywiyPeLjS1m+T17kjlXmnnzIJvP181404sHkMvHJ+Ky96VnarjJ - wZCDgepk7nsyGbczzAQsU7t5i8bM3kkJJnPcUlPYlICNyzjAbVFe6aGwt95iKYYtb+i3od68uRtz - ibENPffzJe/N42gsNLUJBHmfIUFPjGwWeMqDMvmc0MDgppq/zjWFc/XSaOo6p2jKlSuRtdMdI4FZ - Qz++TkYCHSN3sRZFaibU16gA33boSZfgql+kYWvD5QB0fLy3kTF+twaBaz1iz7R4Y8znzIF5wyC2 - siT1hm1zqmHCbWZyCwa1Eh1h1GV0JRI9wE6PmJ55EoTg8SZZrubRHBp2Aq3tTcKH0sU940FbQOcr - etgfl7JnDEkldPI9R001K1Y9dBr4vPmYHs+7vqfjKzThzeIl+lzsql8e1HTBU6IuIkuG1/7X28Cb - 7DP2XC3L5lM4ySCF6ZNMxXSO5lNtFtAajlsi1tGeLWl25MHj0x2xP4Avm+7eEYLGt194v9H7aNk5 - WQF+9aqVowjm+q0NypA0C4F7TmGzozoldDpaYl2OWET7/aJDA88RAqueL9dTEirZ1cqwM9/tSCyp - XEB+6gKsfp8Z6IU2qKGhgAWB6GaB5R64MfR0wSeLey/ZzIlmIwe8l5NdMBTVxMIsgDTyBKwu6rGi - 1kJbIGrjEauWFxnzgTNdkJRSSg85arOZLkG8C1rTJMorHLxlVA4uPGthT63x8M5GKDkyKN5ciDXU - KdEw33YxMF58hiM+iI1lIZYKA20wqPduYD9c/UyCjdJW9FBJL2MON60MzkBpqDEln2juX5sHOH/a - AZ/J5QCGbyjzAJlLSo8oDyOa7ysEb4Dy+IjyJWKXXLPhLHI11nrU9vOAPiV0BN1HfMC/siny3iWM - aq3C8Yso2aCed7Yc5ZFFvdhvDKacxBTemK1iO2iEqCWGSH79gEynmlXkZE46INJjR7XoFXvsW/SD - PG6cG9bVrV6NOEk7uOo3NZzUqiZP02qFvD4M74syYazFgQk9eOip9hBa72slC1Ju5zyn/iluK+ay - hw60Wlqwn7FXvxzNQwtO+3qDta87GYt6T2XgP/yFWgUn97RrJQfmIdzTU7JTGVOnwIXkUmQYc+OO - DdX7nIJINDtyu8zU+FMfHyvT0I/32I3VInSr+ITV1LKiDhPlDH1pR1b9tKvtEt4h7LpgxM5FehvU - aBcbwkHQUIUQqMaGe03wrAU9tevXvWLZ0efAxh+dHy8w3ng0HdyiOybCU2fGcNviBljgXNDgOJhg - IEuvgkn2X0S6TRGYd7M0ye1Nd6lpFTQafnzIeZ1Hvf5MPNJ9XVFyufK08qBQkR/fXXRxpEe7Nnre - jioH3uXMInM+0570l0VW9HQmRLaDsmLXjwzBbve6UpxfpH64LGABhfdtMUqLj7c8v2Wq+K8uxki0 - nGx+PXY1XC57A7GX3/QL1xoEjPV2ovjUfaul0pC/OzQ1wp7fvNkkRYcOvif5hXFbGNkyolyEZAwl - 7PJ6Wi3c7hpDSXqkSJRiLWOfstVBqYo8tbpaZiS7tgnkmsseScXxaCxxXSDoPQyf2kKlGYKxSW1g - yr1ApMPT7OdzTnK4E9Ad2x3GxhwGsIanR26h3XfYZvTORwiGme/QmAk0mr/fSymNtTBhJElOtTwz - 3genfbOh1qydvZlsZRskfpmgenycPDYLQQo//azTWD8do3n0XBte4MGnjuRcso7fcfJvvxDhYCxR - 3zLWwbg79VSrM6ear1M9wd7cfKm+8stCckOHWCx0fHq1bbRcjdL8038AuO57cQtfNXx1j4XwsDuB - LUNSAayTtsVWtfPAPAyqr6j2vsWez2f9/IzfDtC/txDjypEq6m/2MnQ+0QbJ8esEZoH/8OCxSTpq - W6CJ5vT9DWCuZgDbmOZZ4V5F7vd+OAxON2O0dlMMNlzkUguLSzacecmGsH4/0dvjjJ49orsNkutw - o3FY5v3UBWKhOOb1hYAoCqBLocFBYstbvM+lDaBe+JTkpJpnfLhljjeQ6IWUHw/uNr3kLY4RBdAh - noW2welbzcPg+HB2RAu9q53HfnwJ85DbY80UPxnzCmSDlaeoJrYbb1CTuISY2U96GB+var4+bw7Q - +U9ExOJLGKufyhncKTYR4JJ7v5w9x5FXfqHm+Ji9SfDnQpmeYIu91tsbfG3GgfLQAh/vV7/YnWk/ - wDm5qFRX0ab/ugfRhF/FeGPzfnyxjsbfEAK1Nmki+iObVz2DPt/w1M+njTFupo4H0uRLCITl6LFK - BQXMla9Ok/28MCYkxIagsDfU/cyesYh410C6t+/UjIStsYivow+vc5xgJ7RlNo6JFMLG6Tgkm75r - dB9cLnBvD3t83x35rB26uAZczPt0j6otm22rCyBVj1dqxHTMlh9PrP8v2mqWwwSrO4qQx5saTR4X - GfMitQ/g7GeTmqcdBXNRZzEgxZBTPc8rMG8R5sCkdCbWii+Xtc1djGHlHiICL6rWf+8PA8n2IJXY - +lQ1Y/tvkSurnyasWxowvTx4BvoU3H76Es37JQ0gj+0jGcWUN9gm1XmlUW85VYcQGn3s7FwIddfG - 6PJFGe9pWgN/vCDhKO1HY+4lkE40pT+94bsZdjBT7jn+5Q3zRXJF2ToiDZtvPjFGYxOaUO3rEGNp - Xqq6ePohWPMLIuz3HRgB5R9wPjobfIV72WOOU8Ywyk8WEazbLZo/fqLDoZoYTU7aEA354y7Jly8i - 2FXLk8G64d7A1e+RzXs0PLb/tg+YhcsLG88Hz1Z/5yr81Ab0+AoHY773uwQa12pG59MOg/laig08 - 0HFPDV4vMuaNfKiYvSNT/H3PgKSHxVfMBSTUGN8fj42v1IY/P64RrQXz9HRSqJzED7WkdXUkeQOh - /uoE6q08NFVX4sOXac9UpcdjxuqxL+H9ORzWPEgDQuIffKC2DkUiFzo9E9qgUYZcvSBBkHw2410w - AXuQSzSf8Ictzf5xBitvksLd1dWAdn0O2Tv8oqnagei7SEX+hzcxGAxG0oPsw5fQMsJvWMKIU99S - SNNSRUsGTmCyLAjBt4CI5nf69ZbiQ0wAtHtGCNo31SQmVQonqhZI0oc6mtf6kk8xqqjq7dNsMvdz - oKTpJaFuvg+zdq3v3/rStX4A/eU3K0+v/kbzhJk8CVjrix7QPjZoxC+LwlJls/oxq2JNe1jA6T0+ - KJquMZjjepEAS+0Wq3E2ZDMTaxfmz7Kifu3r0fSsGxVw5a6m+6rfsM7UB2l3qJwex81VzbalfQ/g - CV9yXMZilQ3Z5qxC/W0m+ILFMJo7YZ/A0vIIdpL+ndGROiGg70GjrpK+PfZ4eQGcz7s7dTYLMcY7 - jXkoi6eUbNRS64W1PiF5vRk91taQMVfzAwhAsEG8VdBssiREwKI7b3wf+6HvNuWXg2v+RJH5aqI5 - 32gxsOpnSpGkYTDHy8v81TfVT84MpjydHjA0rzoS843AfnoKg1hXqD7XPevAizXw88mdH5/0DDUx - gu3bKrG58uuykKMOPz3TqX91uWruYquBN5m/Y/SZimibvLkWisc0JPwt2/YkrHYiWOufKLsjHw3L - VVFl4yVmRDlcDTZrrEVQf28othz/Vo233tDBmhdSrLReNtHTIP3JZ7YDuGWz2mcLyGLfpJYdhMbE - TUIJ7Re+r3kJYMsXphOQm+WJj6kQMjHtphiOyZKQ+vp5RVOTBjJ8T9KL7lX7AfigfvoyYMYdCXfb - Y5NelBy8Xa6MGn7IZwvXegQm6jaiWPtMxribpwl+kssTH+6RyOi4jAQKPv0gvmG6NweNyiu2ZFeo - evlNxXhQFBD1brbyRNsvKfQgHMEg4tVPV7QNlQJaVW1Q/9kH0XDokSR/0/0evVpL6wdx9yphMQ6U - mm9e8mbeDBOgeyHDXuKM/ThzOoLzZwuQ9HiCjA56Ev74mea1X2bTNGrBbs33iGgfcoP1t2ML+v7a - kV0aqkCYyZUAk9559Hb8XUUu28n98R09mmJoTA73EEEbTC3W89xgpAymAZ7e9IE1lH/6lU9E2X8r - AfbLx9Sv/R+BTRYPVAeu703JRwr+6O/Pr3W5GeRKOp5EInJh289HbimAZp/Q2q8wEKDkSMDeCCp2 - Vn5kPugecPUfdOVDT97sjzV0jIeL95995X2FwA+gSeo7/unTePZSHnTK0fsv/vXe6QPYm62K4J67 - g2X1PzC6ch71QXCq1n4I5d/7WplbR2w3DAm0i7ONT8s0ZdOaZ0AFyQ4RH+BurPlYDm3eA6hZ13Ox - AZpkURwWer0CPaIPXcuVUPm+yMoLxrqfoNJ53wN51y2r1jylA18EbHp0it5gsz2YcM378HEprWiK - n5UIHVX90CC13tm89keIxHOONSV7RvSUpQTarSIjZn3e2ZARJ4XoyaV0n1rPah493YaSdwzJLkdt - tOzjHsFj44pkXnl1POfkAQUnHYnoQBhRNi6xnJxD8Td/AIvQNBCu/oQMkKuysW4/LjyFTKV78bWr - mKWHPuzGIaDej8dk+4ngIUuPWNesJJv50KlBtPT7X/4ZCZLlLNA8RzwpBrPJhvLWFXI8UoSCkh8A - /ULHhEiMczLdFd5g22IpoOtzmGLvGGTrfEOFhyvssBd9QDU6p7Pz41Oy/RpdP/zq97O0B3zQ3yhj - +vFOgI6GGJ+Hm16Jh8kLoRokIZKxqDPRzk0O9oQdKQbyrlq+MJxg7nzv2PXB3DP7eR1gr3aUVJHF - Z/PeGDnIN5cG/dZvcs+3Eug3+Up2q99nv/welfKOfCPh0wt2LMgweg4t9e0djaajD3KQXMmNmh9p - 6lnTapMSVY6PzY8UVNPX8ju5SpYt9buNnvVGK5t/6pFf82B2Tf0FZp5ukCXyu2y6LGyCq7/Bh922 - 9OY2+8RAbC1ENf6hezPMhwKOx+ZKxG8SR3Me3NUfb5JNl7vVkkypDbuwCPAjewv9VL3zFNx79kK1 - i0UwihTZoB4S9Q/vb4vsVUKefBzEsbD1BsU8luBxv1Q/vQXj0+E4WIfWlYBchVG97l9wbZwr1o+B - YbDIRg8QaXuLWi+BRRN3AgQo7qZAi5iZPYs+ZgNpGDzWPH7w5mI5lIBvrg31LV5l24Ph1ODnR1b9 - 9db8lkDTXHaIxzaupshRbZCXdx1HfnjOxtiAE5RC+4bRpnf6Gu1vBMbCfaBaG20j+gY3Agd2ItSW - 4lc27JGWwpkZkEygGQF1nC4Gz/B6QPBYP6KFiyUTvsrhQsNGrz16GsgZtkf+QKajLFcDnRoCPTug - 2FBatZ+zizgBi9kcxSs/8nQiBIpJExH5cEr6rVcgEwBuKNH2CvRsW6PvAK2jr2GPcB9j9VMhPH+6 - gYA1r/tCQGK4P8kONr+NnonvqyoqU9YoZGsq16prSnmA+uxNhBvjgk2lVMeQfT8V2sQvfZ0XnaDy - yYM92XBP0/vxD7x29xN1g7dbbSXSErjmYTTejg9jzX+JvOZpq/95GtPuAQrghBsL2/ppzPo17wXp - CT2p/SL3iAWk66ARdyl1Purotb/5FPKL7eoHDz176NpDuZvlATvpZ6imRRR52IVlgPiuD9hcGHcE - yXd7ocatPFad+LIQfKLdm2yHserHIycXPz2mGKlTxVApLb9+R6pbOfZrfuZDl0ojxWv+SZZ8l4J1 - XkrV5mEB/rN3uD/zTAzkW7XcyGWCad3qOAieb6PLqzSEYfl8oWojE0BbxlooZ80Du9fErYbIG8uf - v0FbzluqGUqqpBxUdKTO0Bx7YVe79U7ThR4f1/nqsmAPQYOkW4ofhmss5fnh/PIdVKzzqF//B9z8 - ctBsivtIONxtFa75N9roj5Y1X/egQ8sSROobWMxGYXZTeJHo908eMUW3bIBpuBfJzTtf2JpnmPA3 - /9gXZ8Pjs3nkwfVjEMQ/wimbYDTpiqMCDh+W49cbnVPuyPJzl+BTnTl9K0Vaq6z++sfTBh8UuwWu - +R21Nwctmz9k+6d/ow2GW8ASmxehnLgAca2AvSmUb+LPnyDWLTaY1nmVAkTOwvrtW/Vs1TM5aG3z - x1tsSk2Bh8r3bSKR7O9gBl4gK1aS6tjY7QlY5M/lDPX4wf/mb4DFQJd+/Y56LzKyNR9w/+h5H90s - xj5loUL64GX849HFMbIQHhUQoR1NW8aOojrA7d1mdM2bM6FUWAoltD77gVn9/DaN5jePowjtm36R - 9d0Cnd3FpgfRurL54bIQ2Pd5h6SV50diFbHiv9qYnrfA9ZZdfBj+8J2lpOdq5QWi/P07FfCf//rr - r//zO2HQtPfHez0YMD7m8d//fVTg3+k9/TfPi/+m4p+TCGRIi8ff//zXIYS/v33bfMf/O7b14zP8 - /c9f4nb757zB32M7pu//8cW/1t/7z3/9PwAAAP//AwBjM9wq5SAAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7be49bdebeebced1-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 27 Apr 2023 05:19:08 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '86' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - 42e0557e070e90e99b138d49ebb07b3b - status: - code: 200 - message: OK -- request: - body: '{"input": ["[{''role'': ''user'', ''content'': ''Determine which next command - to use, and respond using the format specified above:''}, {''role'': ''assistant'', - ''content'': ''{\\n \"thoughts\": {\\n \"text\": \"I will use the - browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\\n \"reasoning\": \"The task requires - me to find the price of the book, and the browse_website command is the best - option to achieve this.\",\\n \"plan\": \"- Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- - Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": - \"I need to ensure that I am using the correct URL and question to find the - price of the book.\",\\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\\n },\\n \"command\": - {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": - \"What is the price of the book?\"\\n }\\n }\\n}''}, {''role'': ''system'', - ''content'': ''Command browse_website returned: (\\''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books - to Scrape (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', - \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', - \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', - \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', - )''}, - {''role'': ''user'', ''content'': ''Determine which next command to use, and - respond using the format specified above:''}, {''role'': ''assistant'', ''content'': - ''{\\n \"thoughts\": {\\n \"text\": \"I will use the write_to_file - command to write the price of the book to a file named browse_website.txt.\",\\n \"reasoning\": - \"The task requires me to write the price of the book to a file, and the write_to_file - command is the best option to achieve this.\",\\n \"plan\": \"- Use the - write_to_file command to write the price of the book to a file named browse_website.txt.\",\\n \"criticism\": - \"I need to ensure that I am using the correct filename and text to write the - price of the book to the file.\",\\n \"speak\": \"I will use the write_to_file - command to write the price of the book to a file named browse_website.txt.\"\\n },\\n \"command\": - {\\n \"name\": \"write_to_file\",\\n \"args\": {\\n \"filename\": - \"browse_website.txt\",\\n \"text\": \"The price of the book \\\\\"Meditations\\\\\" - is \u00a325.89.\"\\n }\\n }\\n}''}, {''role'': ''system'', ''content'': - ''Command write_to_file returned: File written to successfully.''}]"], "model": - "text-embedding-ada-002", "encoding_format": "base64"}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3189' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/embeddings - response: - body: - string: !!binary | - H4sIAAAAAAAAA1SaWw+yzJat79ev+PLdulfkoNRk3SEgyqkKBRU7nR1RRECQUxVQnf7vO/qu9E7f - mKBEcVLM8Ywx67/+8ddff3+SIr0Pf//rr7/feT/8/X++7z1uw+3vf/31H//466+//vqv3+v/OjOt - kvTxyOvsd/rvw7x+pNPf//pL+J93/v9J3+/e0SMjyssw5Av9nFfBVMZEH7w2mQepX8FcE51O3k1F - 32MFar83WGAUJW/rLSr/HB/Rpen6hbzQlA6PPcG+1od8qYx7JKQrjbi7zcflcr424To+GNFexivh - l/4QA+6dE3Ona9v1pLJyxfZ3LcNxrSF5+SoAiIYfZBfxa8JP9z1AfS/f7BwJVkc3IwNkmmlOFSet - Eta2VxPy4XPFtJ0z90N3yxXKB7PFMpXLbr60BVUX3c4lmxtd8SGOXq0qYH9me93bFBOpwYOVQ07E - 2ApaJz+jnYZq3U+IeZdi3tdz8wF6NI/kvg+jZBDaT4wmV+vJTrpPyUxt8/z7PWKPN2TwHsYU1g/B - Y+Zt2BjzSlz36DTmJR2NW2PMUySt4Hn1z8QS2OzWw+sUqWP3BLqeli7izWWi8KJdjKd9cS2GlWtH - qJ68O7OkSEd8PvclZL3gk9Q6PPhkHU0TiuBtkLt6aEK2PS4quN6vNdFL3ePcGD8fCHfajfge091J - 2+MMFv55R+cm3XDR09VUqV+jy6I6JMWI2zKGXe3FLNWLUzJmw2YBUzBumDY1hI9Gd6nAV92IYE8O - ijleHnVQrMwhiWanLsfqQ4d4dcmYMddHl7d9pkGbBBE7FdKUcLwsFjAad59yV4CQv1OzRJtlidmZ - yR3nNX1oKJENgruwPndSlK4dxY6rkDltZ4fCzds5ay5ab7I5y1HB/VZtYfRtmyS389sdu4B/QFy+ - B0ZS10z4JfQ9iLexTKKkeIblxo0/aDRSn+isvoY8aLQW5KpXiX8aDd5fLlcBJCevifOOA3fGFycH - 712KzDs8Duhb3xL0z9Ejm89476pS0UDVP4HHHrEuJ59VFqxUwe4Ttr/GOZqMwQ8Q7ciT7Jv+ykWr - 2USwteQ7ztRr5s7+9ZCq23FnY9RbUzK/TroHJ19zCfmcDU4vm5emRp+FiRX5xov3Eh8taGTPZ2aa - 2MUorY096lbalewVViSDLJi6OjUI0zU9GAnNtERZP/JOwnXSGaHQhXqpXs6JRCWIUt63p0O+rB/7 - iCWfeM852rgWLJv8QYw36w1+30cp1Mo6pusFlVz6WngrMLxzyCx91jkfH9cYKn5e01V+NTt+8N85 - kh+6hpXEFNDIh6BCo9+vyFEXajQfctGE5Bq4bHcXNy67LsZZ7abhzHbmueqmtLdm6LV6heXntePc - 5pcUHp/EwW9Znwv+aWRnPW3FHYnnISnG8FFSlUVPjxiLix6OLv/s0WYdukyXm5fR9kfNgtwuj2Rv - TzjhW23tIB0UnelrJBTTYWW2EI7BDsNxXXF2O8RYMZZCx7SlwNxpGyUa2LFW0lLLFGPKTl0KB+qr - eLESCl7e+V1Ci/E44bV83Lrj4FgSaEfBYVYYT+F4vn5GuFm+gdd3q3SnUjLP6seKN+QGiVgMnaos - IBXpgs6sXoftJazuysupD4TM52XBDuijoYOWKsSUFNOYxhEd4TLyDTN35JNMq9tSgMXmMBIsbcVk - no9sAc6Wf7B0UxdGk/VZD/iSP4mvBuCW3/oozeM20dXtcue8qLUzKkllMNd2lvzPehyuC4PZGluG - k/C53lHfqjohG/Fd8Pv+fodusuvv+muK8bF3MbDVK6RLc3d1R7I8UiCREDJPPdgJH1bHSn3v9gVL - d8s2nFlml+vPVjoS66YuXFrTRYAsmt/IRjblrk+3cFQOlofJk89RMV0DdwTFec7Ma6BPagsSD/C9 - N2l+2adoPEhnQLJojGz7OlrFTGeirWW0yvHH9Ytk8q5zrxrJXOI5i8qkXxavD2zvmw3R8xVz54m6 - FnS5IDBiLQuXE2SZAKMWsYulTGHnDWYF90oLyN4ytmiqTV9BcZMuKIonIZkU3a+Q7TdXQu4VLzhj - qQSSvVjh6fHYhaP0lBZwRMVEVfddJhMPiz26uYsLs/ZIKWa0PJ1B1OMrO/jSjPptN+WwbzYFlcMm - 6UZtNWGIC9YSR9uQTjrKxQ3pvS8QmxhKMuRLzVKFq9MT3DZl2BtmJ4HHxIx449UI570jaZAbB0K0 - NDqiOW9WGaSX2KWNOAjh1JV0RLdMe7It3YxoENosVlWSlnhezzGfcrxoURL3d+IV52s3vvODp0ay - VOGFdVDR7AjHFh5y2DPTC5yQdx/dQb9+1dy4U7RROu3VTFQTFvhy5g45THsoh9OOaOfViw/5x5HQ - t1/hKR+zbuxxT5Fr35546QxuMQqfuYfcfylYokfkfuJm8oDEEyI4r6EbdkR0gFfYwOsHXvPpeduc - f88LVtL5kYwxWnwggblgxmXYh8J8Ds9oaScLsjP9FtF+hSQ0mHNKdmdCwzkdjZv6oPuUBLmehPNK - nHo1PJ18on/Xz3Q93nrA4+wRU5n2yZcfMNgDttnmlCwL2oSqCVU66FhyeVlMT03cw70tauIqh7Gj - 0rs5wjI6r8hXz/l0qzfar5/Q17UyXOl6mO6g5guV7etCd0X/FEnAqHRmfj3XyYhOGK+zGF9wjSrK - B/lhzTBWizfRJO6H4zu/evBp0z3xDtHZZV/9VXfjqWJb8Mbk/ZhRBQ1iJZ3Ys+XTYyxb+PbfXz2L - gcqZqTZ3UWdWoOrJaDxW+k9vSPSWbwWNVdeC4xRaxMztozub2zFAcXNfEOtCTl2vXB6Ook17lVz3 - U+3OZ2ZXkODLgQqjK3LWaJDCvdIDyk1LMGYUvypoHvHE9g/LDadw/3DQYe3ITBMvcTKednYM7/fj - THzLZsl4OemKivWzQrfxoeRce9QRRHmzIZGklF+9uqcoiiJMRf7Yoh+/qOfVzsCSocTd6DhuDpqk - HNgmtD1j6mXLQWFnvehSiXeh0OOSQkWEjPn5p0Hz9VUDYIEJbJ/wA5+Xaz1FX36lgpgSY87q3QdQ - 3mfMbTrdHWV/ytTFKGV/eFZsG69F3/VLcOYNxQgNZJAbIWH62S75pO+zu7oGZ/njiYI/I6KjrXUY - 6KeIFwXzTVtRTgrJmNtymXN8bo7Q5YcthttJ5lO6DTWFV57BHrOlF1xA3RmRYZmx7ff/z4uqM8FP - pReejFzuGNoYFlyPtGObdesks2LpHqTWoDLvuLYQrfv6DM9PV7NNdRoSyrU8V7mQScyYUymZ3DQo - 4XHNP7g43og7V3snh3S5bOhK917FvPc9QFG/7imkHriD5jzuUO8WjE7qC3ezk1ip2oR7i5y3/myw - 7VEqQTCWO7pq+isaC7IWlGsnrij0eMknw7rmoBZ5j5VVeQi5ss0W6ktTTuR7f/gkHCIPrGIzEytf - bjn/bG9n9NP7Lx91wjtzjrAVj5TtFWaE/66Phh8YPdCBs3DujwiEomPmNFYFS8jmCObVitl3fSGu - PVgEJ8XPvnr4TMZ5mWoQJm3APCRGLlfD/I4kWL/Yg10Eg3+a5R6JG6llXqLsix//ottp+2Z6EubJ - tMhpui7nfMAwtBGazHYRrSWZAfOJMxmzy2CGy52usb6eY8RwPGL10AYntsnrErHIbirAYlgw+zB/ - 0CdXbzGoTaCx80P8JKNQFTO6EbT8o9dzqewXClhzTRwanF3pkKsW+vEIKi4Wko3x06I4Cq8Ex3XG - 5+niC/C7P+lOwh0P5z5Am9sipku5ifkUPo83dNt2Ee6WZYN4vXsd0e5+SJk/W3o3f27NCn35h4Rb - Ieuykzmn0C6jhByv8pD04ZN/0D5It8x0rb6bqJxZqp8KL2bJ4R3N9osf0bQNY7YZFMWgXRXd1SF/ - 5cQ1nAL102O//3O9fs+bHy/jn5/Ea2N8G4MtIAWksBGZmRTLhJ+1QVL+6C3ergxu8+cdwXt7I5s1 - r4wRbxoKu6POCen9j/tHb/W9O1DRvUhoFmVKQR2mC3Ne7a7g50tnIRFZZyqCIRSt83jc0BBhRoy1 - abvihs8WHFrJpCPflojvFDNQ5WDZEC/ytFBORFFTm/4QUrTb7I2Z5kEG52YOGGkV2s153ZQwhI1G - bnGz5RzetYL60/Rg/kL0OF/tHguYVnxktv7oOJfzyUINzgnZHbYjmtTajFC8WhP29GyNy93BU0C/ - TwWzwpdvcByvMChteWC3FF0NbhcZQEGRSPSnUxWDuzVjYORQUb7tSt7npYXh9DYQMeSJFfRG8gyS - MJOpLIWnYva68wrYVkN46Ui7ZAwUEJBXOSX56a3sTnkK6pxgYqE0C8fXZjyDawNm2uHuIXbShRF9 - /ShxmGghgVRWhrgrrQhZdlnRLPHNRFn1cvC4TzqXEYQtdJQrm2ysg2WI+lTsYZa6kM7hTUpGeU4j - tEZ1gNUuY5z/vp+UQUmFXWwa8t7revjyBtsGccx5HEhHgLO3Ijv/sk8kMZgX6J4FQFfErDtelNMN - +IK/KDjy0h13YAN6B3JGPJvZ4Ugi2wTnreyItrIqTt/38wqCywhkz/aUz8wSb5Bf0BYjT3dcNjyt - G6rHz0D0+Vl0M83jHHaXeUd0UszGjJaPCGz43Fj6flZ87l+LSnGHtPr6Mc/teOMuQBjmHH/60jQ4 - 23cWaIq4Ju7Nb8L5xxdCeI0xMpyCz9G2BJUzZUmh2n7cyX5ILfIugc7Cu2y5844kyh89MLecJ/Nh - PUdqc9lROpFjjSZ1YVfQJoLBYvH9RqMmtUfwoX6zrx6H88lINSjYo2SOfl90fe89RlDet54WWMyT - +SjeU0gehOPVVnFcfn2XJbrsigiLabEymD4sZ5ArFzOjqcZwDIVmAeWgSLSXunMxh+OiQo2/yJh1 - AAjZ3eliqMd2wPL8HozKNF4R5Jf1lunmbhOO6a2tkF3fRaZ365GP+zXSf36E+K2pcErnnYZ0f71h - hitA0mq2gJFqs4CK2NqGMtsXFthxGdKPf06MWV7tP3B5KU9aowqjeYEOsSqESUxXF/C7+e19YsDj - 6LHIC9qkby+fu7IsJpu4Q24loqCkK5TF3oUZuK+K6WwesOrTYKZUXbzCn/9SzXw60cnILwVXwzZF - t5uwptM3X5gY1zF8+Kb+Xu89GUs3BNg0ksI2m92n6KVAu6tt279JsBFXyag93xrMta+T+hmM7qS9 - blT5Pg9EU4MymZvBDuAmeiYJxbNWTL/n69SdF/R939vG5Nh5AJa66Kj6zTOoYjkeLO3rAsN4NRLB - 5Zmj1lWY0+LNepcLlapBZ+YbLLBN7P7RL7DGmqRJtEvGrEWa8qDUZ8ZV2/A52qEVuHDaM2M06nB+ - e9kNtev3jmyEzOOD5NQ3VBYfl/3yj6mUvAhOiTIw/XjMuv5yOUjq16+SHZnlZPz6ReDvNqLysX11 - 7HPbtwCjHlEk7kJjVj+zDqdXqxP3cCpC8RVGmeqxw8T0YCCI4Q+l6JenbWWXhrm5XQWgdHuNHUTY - cFHff+5KxaM1MQ9FUfDSPgfwwG/KdF3YIfGpqXvYetVISBaBO6DlogIWrh/MWWhWOLlb8wbkFOvs - pti6IXMizygangnZX2OdC4MSH2E9rSI8Q1sYU3QTchjw48Z+fk/Izl7/qxfZOHsnGe/Ozfn5I+YZ - CXVngyoLcGt2xC9+Zm6/A3vxxx86+NQW480jDqTVcWB+D1tXVpX0m3dOJsGNz8LpnkgRwqiqvv6t - /ZOXIe/ch8TuRRyOWSiOcMbiGf/yyx4dl0f0VGWPbc6yUDS27Ejw5VW2v5Kd8dMnNAnBlRg6nJNp - yNczNMgWiXk6eO4olpEE7ZMAI+10RFy2ewVliDKqOHVujOl9ytVvfXEV58xoPmydgjDDnkpf/ZgV - Ax2Rpshr2sbNm39u3m4PTxojPAufJBynh+bAHK8IOW6I7HLKhR5up92besKuQ+NFu1dwufdrdgIv - SHrGu7syXMFg0XUfJP1ZL3K4BRXG0h4pX797xcrLdTU8y83LlWqj/PaDaMQspH747ddn9KSLFqMp - wGjdVmoPx3wREkOL3U74dIIA26xdEStQ85AmfljBZyscifZcF90krZoVtGsjpvDNS+RMtyt02zYR - My5SaYw2x3hdFq2LR6MoEbXesgfm9bMjySf+oPm02VlI0UbpTx5KhVMgAepV90++Upblx/nDK+3m - YXZcPGYfSA/bPcHX6sZ/xyrV282f/L9rlV2FrvL5xfTskRn8tVRK0EexI7/8lQMMjqKHVs0sJneo - f23GSIV3Y2Pxm7/w0dxiCJBsYPTM8u6bb0Rrt7YLRm5RZnBNKEuwbUenyywCY5zPyRldatMk+4fV - hVxQ56P64sKC2d88/Ov/bkoGxxSP33rxV3m5/3iS6fmKGFw3VetPHmuUDgrHRFZ6eI9xQKxH3xeD - tHop6JefGJK34GwlBndIjuoeq3picaF0kwX6+jG8er5u4bQUfQmd3u+WbO6bOemqRGxB260D4le+ - VEy2wBXwjdOaWZuzj+bfE7jcpmey5VHrDufOBHT1X1sshfGU/OEp07znZHeOHXcuPCsF+xgG5Ouv - wh9Pq4tNOGJ0fwKf73uRKmi/NrG0eTbFuD2+I+Ct3REj3xTdnPhJCdDFj68fvKA5WAQeLFvLI/v8 - ahbf/n2E+l698SI72WG/spMWjNPjxazXTXRFseCVajn3kK4muun4Kg09OO0CmW26Kf/ytOOBYuUO - c5xaN758aYJu4i1dNum1+64vAQn5QaFQPE8J1/xRQOMuEbEkubvvfMHKADZGSFfRtunmtlUitH5I - HhaWaMvn62F9h+aypUxfPZ9do9+G1c9fY+VNonCwuvgI782VEHxP3y59DkIKJZCGfrZK6/bK2JzB - r5eMuBuKkfit3595i3G8MZeqB2GG477es+1bI8b886+lLTC8eOqM9wvFBDQ1lyduN1WWMI+FOfBN - 5X/nFbMxB7AJ1NI+aERv0hcfPst6AdkBRVg2z1YxWEfTgsUrJuwaPEkht6d2hNP9fifxjxfF4Erh - 0hk9Fup795uHOcoOCfg3r0FjZS1MZa2vQ7rKgw+iD6E5omZEDtstNguDfXlZ/fofCtPravT1/PpA - 2OYzXn71cBZxqcMkSHviP7yIz3Mn7JUAiQZLfzwxd7cVPBzrQteybScTWkoVWE4a4qNkjuGXP2Ng - i6CmBfpwxFUlxb98Ba/XZdxJSRkLqNUuCfO+vDsts2emaH5bMnu8JcY4bQYdCjd5YMmFLJxYGq6g - Rs/dv+eH0vt1VKZFYVIlRWujJcGjB9/+ACNT9gonVYsr9NVjvMq62J2OxRiohNMvr40no/duIcBp - uRCwhFItlPJmlUM/Oh+24WdijP27WcDX7zDSnB58YncTYGIWpxcf9wUrAsNCX39FTDFlxre/Kj89 - YCdlTFwOb6b8eIVtovu761PjfVZXDjeJxd9ROD+EJkDffAjzO3XCMUlpDssoWjH7gdeIUclt0fEx - D8Tb2VLBQneVQeHIFAuCfjP4N6+Bw/OSEBdJm2QshfMHLspDZYb2qsOvXjlgTp38nX/dk3kgkwOp - 9zxQfpcrox+UIFC3ovAg21sf8uk3H7jdpPWPTxJOjm4MbwHVZPflnV/eCujCK3xBrC3+5D3xChGa - DbPbyY0m3OE7n2XGAlvd9MsLV9etTJyaL90af6oeLr50obPke+4vn0Bl7Oa4ijqHj8zJRliMwcSI - uVu7w+bpzlCf5QPThQ9K5lJWdMBIk2lNUGj0weq9gt5SEBW2wIzpKIo97PniQdz7E9B8Pqkx+uod - lvh91X35NVUnQdiTe4UL1Nvc8kAtsp5csaklY3CY9+oVMotE3/4h5Orthr48Qb/3J+FlnqdoKHYH - 5n79lfDZBfpvvkan3TXhZRaKM3znc8SJM8edB7/11LfSIuLLQeZOJFiAMvtJxTwhqAu2LJQVAuW4 - JPY1OyTjY29gCC7EJw4kYjc9DdOEo1za7P7lYa709x5VsUoZRjEtpkYPFDis+4r99P7PPDIJcxkv - f+u/Ypcc8jgViFGGF2POg/IMK9VdMHPxDJB02hAL/erh+7KM2GnrUhAwmZmdx3JCTeN1Rr95o33Z - 5gZ/y8EIjYx9rPCChOJh5bV/+DXt0QXxZ7TTofEhY+YWiPEnb/n7tyvgv//x11//+dthUH0e6fu7 - MWBIp+Gf/7NV4J+3x+2fgiD9k0l/diLQ/palf//r35sQ/m66T9UM/3f4lGnd//2vvxCgP/sN/h4+ - w+39vz74x/f3/vsf/w8AAP//AwBCQNw65SAAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7be49be3dc34ced1-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 27 Apr 2023 05:19:09 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '76' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3000' - x-ratelimit-remaining-requests: - - '2999' - x-ratelimit-reset-requests: - - 20ms - x-request-id: - - 006edc24ffa2e03bf3534ac502e0e6e7 + - bdf544a1c9ea2ee0bb022ee726a80ed4 status: code: 200 message: OK @@ -1234,53 +421,21 @@ interactions: \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past:\n[''Assistant - Reply: {\\n \"thoughts\": {\\n \"text\": \"I will use the browse_website - command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\\n \"reasoning\": \"The task requires - me to find the price of the book, and the browse_website command is the best - option to achieve this.\",\\n \"plan\": \"- Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\\\n- - Find the price of the book\\\\n- Write the price to a file named browse_website.txt\",\\n \"criticism\": - \"I need to ensure that I am using the correct URL and question to find the - price of the book.\",\\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\\n },\\n \"command\": - {\\n \"name\": \"browse_website\",\\n \"args\": {\\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\\n \"question\": - \"What is the price of the book?\"\\n }\\n }\\n} \\nResult: Command - browse_website returned: (\\''Answer gathered from website: The price of the - book \"Meditations\" is \u00a325.89. \\\\n \\\\n Links: [\\\\\\''Books to Scrape - (http://books.toscrape.com/index.html)\\\\\\'', \\\\\\''Home (http://books.toscrape.com/index.html)\\\\\\'', - \\\\\\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\\\\\'', - \\\\\\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\\\\\'', - \\\\\\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\\\\\'']\\'', - ) - \\nHuman Feedback: '', ''Assistant Reply: {\\n \"thoughts\": {\\n \"text\": - \"I will use the write_to_file command to write the price of the book to a file - named browse_website.txt.\",\\n \"reasoning\": \"The task requires me - to write the price of the book to a file, and the write_to_file command is the - best option to achieve this.\",\\n \"plan\": \"- Use the write_to_file - command to write the price of the book to a file named browse_website.txt.\",\\n \"criticism\": - \"I need to ensure that I am using the correct filename and text to write the - price of the book to the file.\",\\n \"speak\": \"I will use the write_to_file - command to write the price of the book to a file named browse_website.txt.\"\\n },\\n \"command\": - {\\n \"name\": \"write_to_file\",\\n \"args\": {\\n \"filename\": - \"browse_website.txt\",\\n \"text\": \"The price of the book \\\\\"Meditations\\\\\" - is \u00a325.89.\"\\n }\\n }\\n} \\nResult: Command write_to_file returned: - File written to successfully. \\nHuman Feedback: '']\n\n"}, {"role": "user", - "content": "Determine which next command to use, and respond using the format - specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + "system", "content": "This reminds you of these events from your past: \nI was + created and nothing new has happened."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and find the price of the book.\",\n \"reasoning\": \"The task requires - me to find the price of the book, and the browse_website command is the best - option to achieve this.\",\n \"plan\": \"- Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html\\n- Find - the price of the book\\n- Write the price to a file named browse_website.txt\",\n \"criticism\": - \"I need to ensure that I am using the correct URL and question to find the - price of the book.\",\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\n },\n \"command\": - {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + me to visit a website and find the price of a book. The browse_website command + is the best option for this task.\",\n \"plan\": \"- Use the browse_website + command to visit the website\\n- Find the price of the book\\n- Write the price + to a file named ''browse_website.txt''\\n- Use the task_complete command to + complete the task\",\n \"criticism\": \"I need to ensure that I am accurately + finding the price of the book and writing it to the correct file.\",\n \"speak\": + \"I will use the browse_website command to visit the website and find the price + of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", "content": "Command browse_website returned: (''Answer gathered from website: The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books @@ -1288,23 +443,9 @@ interactions: \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, + )"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I will use the write_to_file command to write the price - of the book to a file named browse_website.txt.\",\n \"reasoning\": \"The - task requires me to write the price of the book to a file, and the write_to_file - command is the best option to achieve this.\",\n \"plan\": \"- Use the - write_to_file command to write the price of the book to a file named browse_website.txt.\",\n \"criticism\": - \"I need to ensure that I am using the correct filename and text to write the - price of the book to the file.\",\n \"speak\": \"I will use the write_to_file - command to write the price of the book to a file named browse_website.txt.\"\n },\n \"command\": - {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": - \"browse_website.txt\",\n \"text\": \"The price of the book \\\"Meditations\\\" - is \u00a325.89.\"\n }\n }\n}"}, {"role": "system", "content": "Command - write_to_file returned: File written to successfully."}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 1711}' + using the format specified above:"}], "temperature": 0, "max_tokens": 2201}' headers: Accept: - '*/*' @@ -1313,7 +454,7 @@ interactions: Connection: - keep-alive Content-Length: - - '9732' + - '5772' Content-Type: - application/json method: POST @@ -1321,20 +462,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5RSTY/TMBC98ytGc3arNkvb3Ry5wAqOLItEUOU408TU8UT2pF1U5b+jNOkHRQvi - 6nnzPub5gLbAFE2lxdSNm6we+ONqVX/w28X+af61XX3RzO/l0/L5+d0OFXL+g4yMG1PDdeNILHtU - aAJpoQLT+fI+WayS1WKusOaCHKZYNjK5my4m0oacJ7O72RwVtlGXhOkBm8B1I2vhLfmIaZIs7xVe - yM+D+SJRKCzaXbBvk1mn0FRsDUVMvx2wpngiDuwIU9Qx2ijaS2+TvZDvIxwyDwCQoVTclpXEDFMY - H8cBvUj/mOEj7K1z0EYCqQhEx+16NEhguK61L0AYzm8n1DRDdU0ZSEf21pcD7+cRBpWOkBP5M0MB - sTWGYty0zv1UcBR4VdrG4zCnKMBNf7TeDV3t3BppnPaDhwk8/SvW34hMsGKNjfXpUp5oXIpt6Im1 - wCPoGprAO1tYX4KGnXa2gOEasOHQS/ST11RiQ3r7f13cmB7YOnVqfUT+UbrXNQ1CvzHf+NGhvP0v - 1wVf2m2CNQS8Gfphvqr6umDYBytCx9p65MY6gjzwPtJ6T3m0QlN5kXOOY5YxUuY77BRurLexWg8G - MMUo3KBC6wt6wXTWfe/e/AIAAP//AwAYXmcn7wMAAA== + H4sIAAAAAAAAA7RTzW7aQBC+9ylGc+FiLDChpb5VjVRR9e+QnuoKrdcD3mDvuLtDSYN4mrxJn6xa + 20AhUZVLj56Z72c+z+7QFJiiLpXouqmGr95sv3y4vV6X0/ef395f/5zn9G5elHKvPv1KMELOb0lL + j4g1101FYthihNqREiowHb+cJbOrZDydRlhzQRWmuGpkOImnQ9m4nIejyWiMEW68WhGmO2wc140s + hNdkPabjyTiJ8ER+bCRJEqGwqOo0O51c7SPUJRtNHtNvO6zJH4gdV4QpKu+NF2Ul2GQrZMMKu8wC + AGQoJW9WpfgMU+iLfYPuJBQznMPWVBVsnRECKQkaZzQBL9uPnHkNwqBgaSoCq2oqYJA73npabCn3 + RiiWOxnEGUZ/CzhSnq2xq07lpiQQ5dfg6MfGOPJQU+B9lmwMAd6OLoQXrRPNda1sAcZ3APIC3IRM + YckOpAwN5deXvppK2c7SEL76Tvpp5ue6+0coWWZPKsHNov/1ZyrH2mHqwrJ2Row2vj78MEvUAsn6 + jQswJTAHVbd+jV21RJqdIy1g7JJdrdpkhNtW67oVD8hjxXjIKcD7ewdjz5gq1i3NZaS+IbU+O6bN + /w42zrBzsI8Op96zP7r0QNKZO3NzsYNyq8tH0jXC7InisZUznsu3dfPkgoOPVBhpo/SDkPrvh2Qa + z14fl2oX6/fL7B73ES6NNb5cdK8KU/TCDUZobEF3mI723/cv/gAAAP//AwBJZ4NT8QQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be49bed8f47ced1-SJC + - 7bfe4d310f1acee1-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1344,7 +486,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 27 Apr 2023 05:19:22 GMT + - Sun, 30 Apr 2023 08:09:27 GMT Server: - cloudflare access-control-allow-origin: @@ -1356,19 +498,263 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '11504' + - '12183' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - '3499' + x-ratelimit-remaining-tokens: + - '86494' x-ratelimit-reset-requests: - 17ms + x-ratelimit-reset-tokens: + - 2.337s x-request-id: - - cd010e2aab478b33c13c4cd5be5292c8 + - ea952acfdafdc0ae3345943425fec326 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '714' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPTUvDQBRF9/6K4a4npemnzq6gouDCvUiZTp7J2Pky74VYSv67VJRuD9zDuWf4 + Bgaus+JiCdV2N77aZngYx8dhs3r5OmyPT/fhuJYSbQuNfPgkJ3+LmcuxBBKfEzRcT1aogak3t4vb + 1aLezjVibijAoC1SLWfrSob+kKv5cl5DY2DbEswZpc+xyF7ykRLD1PWdxtV95WsNyWLDlSxXk4br + snfEMG9nROJ/a58DwcAyexab5NKYk1C69O9YWcUnFopaPauenA1BSWdFpSydT61KNKrOsupsKZSo + UeyTIxVP6veqz2mGSePDJ8/dvifLOcGAJRdo+NTQN8x8ep9ufgAAAP//AwB8vP+2ZgEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d901cdecee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '791' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89839' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 107ms + x-request-id: + - 782df5c2a8ff131c4a94e1db54cf90ad + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: + Read file, args: \"filename\": \"\"\n4. search_files: Search Files, + args: \"directory\": \"\"\n5. write_to_file: Write to file, args: + \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse + Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nAs + a system, I recall that nothing new has happened since my creation."}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"The task requires + me to visit a website and find the price of a book. The browse_website command + is the best option for this task.\",\n \"plan\": \"- Use the browse_website + command to visit the website\\n- Find the price of the book\\n- Write the price + to a file named ''browse_website.txt''\\n- Use the task_complete command to + complete the task\",\n \"criticism\": \"I need to ensure that I am accurately + finding the price of the book and writing it to the correct file.\",\n \"speak\": + \"I will use the browse_website command to visit the website and find the price + of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I will write the price of the book to a file named ''browse_website.txt''.\",\n \"reasoning\": + \"The task requires me to write the price of the book to a file. The write_to_file + command is the best option for this task.\",\n \"plan\": \"- Use the + write_to_file command to write the price of the book to a file named ''browse_website.txt''\\n- + Use the task_complete command to complete the task\",\n \"criticism\": + \"I need to ensure that I am writing the correct information to the file and + that the file is being created in the correct location.\",\n \"speak\": + \"I will use the write_to_file command to write the price of the book to a file + named ''browse_website.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"browse_website.txt\",\n \"text\": + \"The price of the book ''Meditations'' is \u00a325.89.\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 1928}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7026' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6RSTW/TQBC98ytWc+nFieI0bYJvSFxKhUAoVEg1itbrsb3E3jE7Y6Vt5P+O/JEP + gkAgrvPx5r03bw82hQhMocVUdTlZvtl9TO53z/fr8OEDfXr48tZ8fzd7Wb/e3dELBEDJNzQybkwN + VXWJYslBAMajFkwhCm9X89ViHi7DACpKsYQI8lom19ObiTQ+ocnsehZCAA3rHCHaQ+2pqmUjtEXH + EIU3q3kAJ/BT43YegJDo8lRaLhZtAKYga5AhetxDhXwA9lQiRKCZLYt20tEkJ+g6CfvYKaVUDFJQ + kxfCMURqLI4NfJKuGMOd2tmyVA2jkgKVaN5uRoKoDFWVdqkSUsfaYWoaQ3AO6VEzOevyAXc9jqlC + s0oQ3REhVdwYg8xZU5bPqsf/7WXLfTNBFkV159lfkalL7QYeE/X5n6VdgBlvxRrL1cExh9gvouPG + d2ta/iyhYUyVIe/RyFHy2VI3k6B1+ZlJGfm+P66pwd9LoVyj3v7/JwfINjgEZxz/JTdOVzhc+wn+ + gpT2+WXkzjNyCkjtrUFF2fBjoq26eo+pFd09mq9O2cmocWnv285bEexToFVmSzyy7xWMQmLXQhtA + Zp3lYjOchQhYqIYArEvxCaJZ+7V99QMAAP//AwBD34ZLKAQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d9589e6cee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:41 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9223' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86491' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.339s + x-request-id: + - 236ab87ae664fc82d42c2ea35a36b68d status: code: 200 message: OK diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml index 7d21c210..0e9cab5a 100644 --- a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -39,10 +39,9 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\n\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 3223}' + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 2738}' headers: Accept: - '*/*' @@ -51,7 +50,7 @@ interactions: Connection: - keep-alive Content-Length: - - '3490' + - '3401' Content-Type: - application/json method: POST @@ -59,20 +58,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7yTT2/TQBDF73yK0VxycSI3f0jjWwUHisQFAQVhFG3tib3NesfdHSutIn93tLbT - tEY9gbjOG7/3m9nxEXWOCWalkqyqzXS9ubt53H+68u9+FPeON2vzcb654m/v4/vP3zFCvr2jTIYv - ZhlXtSHRbDHCzJESyjG5eHs5X62Wi/llhBXnZDDBopbpYraaSuNueRov4guMsPGqIEyOWDuuatkK - 78l6TNbrZYRn76f6PI4jFBZlnkqb9bKNMCtZZ+Qx+XnEivzJ1rEhTFB5r70oKwGSrZANAxxTCwCQ - opTcFKX4FBMYioNADxKKKV7DQRsDjSeQkuDgtNBWeLvThiDjqlI2B+FegMkHMobhhp3JJ6CtMCjo - Wq2qKIdJGfTtIegzeZDJLMXoebAj5dlqW/TpX0qCgpUB7f8iJIIO8lX8YF4SVOwFVF07rp1WQrBj - B1IGVfn9mLQ2yvaQU/j6n7aTOS060746vY0l6vzJ+sYFBiVwDaqCxmtbdEwZO0eZnHOGZSjp5PDS - YQGBUMie2s3jONvXpPav30Sg+HdT98FtdDrUwfSPOw0OPdOL1Y/QlSvGJ94LofdsMeJ4YTL+LZ7N - dKLtiAfw1LbYRrjTVvty2x81JuiFa4xQ25weMInbX+2b3wAAAP//AwAxpIznhgQAAA== + H4sIAAAAAAAAA7yTT2/bMAzF7/sUBC+5KEHSLG3q21a0a6/DiqGoh0C1GVuLLHoSDRsL/N0H2/nT + ethp2K581Hs/StQeTYoRJrmWpCjt9OpDfdd8+ni7+Hz3ePOzWa+emqf77Cb1tdzWqJBfvlMihxOz + hIvSkhh2qDDxpIVSjBaX64v1+8Xqaqmw4JQsRpiVMl3OVlOp/AtP58v5AhVWQWeE0R5Lz0UpG+Ed + uYDR1epa4dn7VL+YLxQKi7an0vXlvFWY5GwSChg977GgcLT1bAkj1CGYINpJB8lOyHUD7GMHABCj + 5FxluYQYIzgUDwI10hVjfABHlIIwVIFAcoLaG6GN8GZrLEHCRaFd39ALMLknaxm+srfpBIwTBg19 + q9MFpTDJO31Td/pMGpnMYlSvsz3pwM64bAD4khOIDjvw9KMyngIU9BdpCnraP85hQi8WHAR0WXou + vdFCsGUPkneqDrsxcmm1G2in8PifrinxRkxiQjF+J3Kh8h2DFng4vVrC3lMi54jhHqiRM5Nw39q1 + jNNCSXp3TKqNtf9yHYbgVh3X9GD625Z2DgPTG4gRuvbZeMEHoes9W4w43piMP8WrmY60PfEBPHYt + tgq3xpmQb4Z9xgiDcIkKjUupwWjefmvf/QIAAP//AwCaXwR3hAQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be2da273c20ce54-SJC + - 7bfe3ef73b52fac2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -82,7 +81,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 27 Apr 2023 00:12:22 GMT + - Sun, 30 Apr 2023 07:59:46 GMT Server: - cloudflare access-control-allow-origin: @@ -94,19 +93,103 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '14714' + - '13160' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - '3499' + x-ratelimit-remaining-tokens: + - '86497' x-ratelimit-reset-requests: - 17ms + x-ratelimit-reset-tokens: + - 2.335s x-request-id: - - 23366aac1ddcc141fc735b4a1a8b268c + - f665162ae22af897be24f632a031d434 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '600' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOQUsDMRQG7/6K8J2zZdNa3eZmeygiPSqISEmTZze6mxc2r1Qt+9+lUPU6MMOc + EAMsfOvE97mrbu+O69X3lJrdks2D2Sy/5o/rp+fgu9ViAw3evZOXizHx3OeOJHKChh/ICQVYc9NM + m2szbxqNngN1sNhnqWaTeSWHYcdVPasNNA7F7Qn2hDxwn2Ur/EGpwC6Mxn/6H2sIi+v+gKnrUcO3 + HD0V2JcTeiq/zYE7goUrJRZxSc6HnITS+f5eHV1Rl2PlUlCJpY1prxIdVeuKal3OlChMMGq8xRRL + ux3IFU6wKMIZGjEF+oStx9fx6gcAAP//AwCGJ6JPSQEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe3f5a5fb3fac2-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:59:49 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '634' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - 0bc738f58569b4b3635fe0c8384ebcfb status: code: 200 message: OK @@ -151,23 +234,23 @@ interactions: {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past:\nNone\n\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I need to use the write_to_file + command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\": + \"The task requires me to write ''Hello World'' into a file named ''hello_world.txt'', + and the write_to_file command is the most appropriate for this task.\",\n \"plan\": + \"- Use the write_to_file command to write ''Hello World'' into a file named + ''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I use + the correct file name and text to write to the file.\",\n \"speak\": \"I will use the write_to_file command to write ''Hello World'' into a file - named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write - ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file - command is the most appropriate for this task.\",\n \"plan\": \"- Use - the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\": - \"I need to ensure that I am using the correct file name and that the text is - written correctly.\",\n \"speak\": \"I will use the write to file command - to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\": - {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": - \"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"}, - {"role": "system", "content": "Command write_to_file returned: File written - to successfully."}, {"role": "user", "content": "Determine which next command - to use, and respond using the format specified above:"}], "temperature": 0, - "max_tokens": 2970}' + named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": + \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 2486}' headers: Accept: - '*/*' @@ -176,7 +259,7 @@ interactions: Connection: - keep-alive Content-Length: - - '4612' + - '4646' Content-Type: - application/json method: POST @@ -184,19 +267,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA6SSQW8TMRCF7/yK0ZydKkuSttkbAiGFA+JQBIKtItc7Wbvx2pZnFoqi/e9om00T - QishcZ3x++bNPO/Q1ViisVpMm/zkann/9b3JZnm1XMnHmzfLd7b4IrL6xB++vUWF8e6ejIyKCxPb - 5ElcDKjQZNJCNZbF5fXrxWI+my8UtrEmjyU2SSazi8VEunwXJ9PZtECFHeuGsNxhyrFNspa4pcBY - FtPiWuERfmzMLhVKFO2PpWIx7xUaG50hxvL7DlviAzhHT1iiZnYsOshgMwahMKywqwIAQIViY9dY - 4QpLGItjgx5kKFa4gp/Oe+iYQCyBaN6uR4MEJratDjVIhKfa4dVFheoUmUlzDC40e+6NJWii9uD4 - WbmCR/CLIweZJWgjC+iUckzZaSHYxAxih+4zHpLXYT9+Ap//eyOTnTjjuD1cKhA9KilwlwedFliB - 1T/oCXZcCbgzhpg3nfe/ztGcSG9fDAD+1e4e2atD3uPzv+IOuqX9tD+ucWZK5+b8p5xGO+Z66q8+ - X/Ko7EdvVeixV7hxwbFd70lYIktMqNCFmh6wnPa3/avfAAAA//8DAKZ6myqyAwAA + H4sIAAAAAAAAA7SSTW/TQBCG7/yK0VxycaIkJST4VqmijYSEQEWIYhRt1xN7yXp3uztOgiL/d7Sx + TUpJOYC4zuf7zLwHVDmmKEvBsnJ6OL/cXV+9mX2+0+aag7u6vHlvH+7myw/bd2+XmKC9/0aSu46R + tJXTxMoaTFB6Ekw5ppNXi+ni5WS2eJ1gZXPSmGLheHgxmg259vd2OL4YTzDBOoiCMD2g87ZyvGK7 + IRMwnYyn8wRPw0+JxSJBtiz0KTSdzJoEZWmVpIDplwNWFPrB3mrCFEUIKrAwHGVaw2QiwiEzAAAZ + cmnrouSQYQpdsEvQnmMwwyWUYksQaikphHWt9XfYecVMBgY3pLWFT9brfADKsAUBa6UJjKgoh0EZ + 86tdzI94z4MR3JYEhvYMgcmBCsAW6kDAJQGLsFl18ATSVpUweSz4GeurRhkmj+V6EsEaZYpW821X + Bp4eauUpQEVn54BYM/kjjzLFOZ5Y+SeiBI4an1UfCUuCygYG4Zy3zivBBGvrgcuYPYPjtDAtyRA+ + /vNxZKSTKlT9Qw3RsZNMqH3sEwxLcN5uVU4gYCu0yqG96VFob0hTPLskOBKbfsFOaf13X21HNklv + 0K78N3/Gb7Tbfhn/RJTwxVNrP/bL/zB4j3DE6Ggy02CT4FoZFcpVuxtTDGwdJqhMTntMx83X5sUP + AAAA//8DAK0qY5KVBAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7be2da943bd5ce54-SJC + - 7bfe3f5edbd9fac2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -206,7 +291,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 27 Apr 2023 00:12:36 GMT + - Sun, 30 Apr 2023 08:00:00 GMT Server: - cloudflare access-control-allow-origin: @@ -218,19 +303,25 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '10505' + - '10898' openai-version: - '2020-10-01' strict-transport-security: - max-age=15724800; includeSubDomains x-ratelimit-limit-requests: - '3500' + x-ratelimit-limit-tokens: + - '90000' x-ratelimit-remaining-requests: - '3499' + x-ratelimit-remaining-tokens: + - '86482' x-ratelimit-reset-requests: - 17ms + x-ratelimit-reset-tokens: + - 2.345s x-request-id: - - f593fff7d415793b53e5124283ad25c9 + - f294799d2564196ef4852c84fdc8fb17 status: code: 200 message: OK diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 9cb7c625..77ff94d7 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -33,18 +33,14 @@ def test_generate_context_empty_inputs(mocker): # Assert expected_result = ( -1, - 47, - 3, + 32, + 2, [ {"role": "system", "content": ""}, { "role": "system", "content": f"The current time and date is {time.strftime('%c')}", }, - { - "role": "system", - "content": f"This reminds you of these events from your past:\n\n\n", - }, ], ) assert result == expected_result @@ -78,5 +74,5 @@ def test_generate_context_valid_inputs(): assert result[0] >= 0 assert result[2] >= 0 assert result[1] >= 0 - assert len(result[3]) >= 3 # current_context should have at least 3 messages + assert len(result[3]) >= 2 # current_context should have at least 2 messages assert result[1] <= 2048 # token limit for GPT-3.5-turbo-0301 is 2048 tokens diff --git a/tests/vcr/openai_filter.py b/tests/vcr/openai_filter.py index b18b694b..15a4b862 100644 --- a/tests/vcr/openai_filter.py +++ b/tests/vcr/openai_filter.py @@ -41,7 +41,10 @@ def before_record_request(request): def filter_hostnames(request): - allowed_hostnames = ["api.openai.com"] # List of hostnames you want to allow + allowed_hostnames = [ + "api.openai.com", + "localhost:50337", + ] # List of hostnames you want to allow if any(hostname in request.url for hostname in allowed_hostnames): return request From aab79fdf6d9cb71d02c81849aa1624358b50b0b3 Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Sun, 30 Apr 2023 12:41:45 +0300 Subject: [PATCH 202/222] added tests for clone_repository (#3558) Co-authored-by: Nicholas Tindle Co-authored-by: Richard Beales --- tests/integration/test_git_commands.py | 42 ++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 tests/integration/test_git_commands.py diff --git a/tests/integration/test_git_commands.py b/tests/integration/test_git_commands.py new file mode 100644 index 00000000..d942b1b7 --- /dev/null +++ b/tests/integration/test_git_commands.py @@ -0,0 +1,42 @@ +import pytest +from git.exc import GitCommandError +from git.repo.base import Repo + +from autogpt.commands.git_operations import clone_repository + + +@pytest.fixture +def mock_clone_from(mocker): + return mocker.patch.object(Repo, "clone_from") + + +def test_clone_auto_gpt_repository(workspace, mock_clone_from, config): + mock_clone_from.return_value = None + + repo = "github.com/Significant-Gravitas/Auto-GPT.git" + scheme = "https://" + url = scheme + repo + clone_path = str(workspace.get_path("auto-gpt-repo")) + + expected_output = f"Cloned {url} to {clone_path}" + + clone_result = clone_repository(url=url, clone_path=clone_path) + + assert clone_result == expected_output + mock_clone_from.assert_called_once_with( + url=f"{scheme}{config.github_username}:{config.github_api_key}@{repo}", + to_path=clone_path, + ) + + +def test_clone_repository_error(workspace, mock_clone_from): + url = "https://github.com/this-repository/does-not-exist.git" + clone_path = str(workspace.get_path("does-not-exist")) + + mock_clone_from.side_effect = GitCommandError( + "clone", "fatal: repository not found", "" + ) + + result = clone_repository(url=url, clone_path=clone_path) + + assert "Error: " in result From 6d2c0c4242160cb422ac09077271752f68675878 Mon Sep 17 00:00:00 2001 From: WladBlank Date: Sun, 30 Apr 2023 18:43:01 +0200 Subject: [PATCH 203/222] add report method to typewriter_log & load report plugins into logger (#3582) * add report method to typewriter_log & load report plugins into logger * more clear log and comment * isort and black --- autogpt/logs.py | 4 ++++ autogpt/main.py | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/autogpt/logs.py b/autogpt/logs.py index fcad9efa..1cbb784d 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -75,6 +75,7 @@ class Logger(metaclass=Singleton): self.logger.setLevel(logging.DEBUG) self.speak_mode = False + self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO @@ -82,6 +83,9 @@ class Logger(metaclass=Singleton): if speak_text and self.speak_mode: say_text(f"{title}. {content}") + for plugin in self.chat_plugins: + plugin.report(f"{title}. {content}") + if content: if isinstance(content, list): content = " ".join(content) diff --git a/autogpt/main.py b/autogpt/main.py index 848a22a0..fca8b47d 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -125,6 +125,13 @@ def run_auto_gpt( full_message_history = [] next_action_count = 0 + # add chat plugins capable of report to logger + if cfg.chat_messages_enabled: + for plugin in cfg.plugins: + if hasattr(plugin, "can_handle_report") and plugin.can_handle_report(): + logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}") + logger.chat_plugins.append(plugin) + # Initialize memory and make sure it is empty. # this is particularly important for indexing and referencing pinecone memory memory = get_memory(cfg, init=True) From abd6115aeaec9ba221f1a4117f7f1abc8aed8550 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Mon, 1 May 2023 08:35:42 +1200 Subject: [PATCH 204/222] Add website to README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 16f3d05a..8ab51669 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Auto-GPT: An Autonomous GPT-4 Experiment +[![Official Website](https://img.shields.io/badge/Official%20Website-agpt.co-blue?style=flat&logo=world&logoColor=white)](https://agpt.co) [![Unit Tests](https://img.shields.io/github/actions/workflow/status/Significant-Gravitas/Auto-GPT/ci.yml?label=unit%20tests)](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml) [![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) [![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/auto-gpt?style=social)](https://github.com/Significant-Gravitas/Auto-GPT/stargazers) From c1329c92fde62bd09084f00dead1cf07845eea1e Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sun, 30 Apr 2023 22:14:53 +0100 Subject: [PATCH 205/222] rename search_files to list_files (#3595) --- autogpt/commands/file_operations.py | 6 +++--- tests/unit/test_file_operations.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index e9afdaea..c54fb054 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -200,9 +200,9 @@ def delete_file(filename: str) -> str: return f"Error: {str(e)}" -@command("search_files", "Search Files", '"directory": ""') -def search_files(directory: str) -> list[str]: - """Search for files in a directory +@command("list_files", "List Files in Directory", '"directory": ""') +def list_files(directory: str) -> list[str]: + """lists files in a directory recursively Args: directory (str): The directory to search in diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index f324193a..e7ba2f71 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -13,9 +13,9 @@ from autogpt.commands.file_operations import ( check_duplicate_operation, delete_file, download_file, + list_files, log_operation, read_file, - search_files, split_file, write_to_file, ) @@ -113,7 +113,7 @@ def test_delete_missing_file(test_file): assert True, "Failed to test delete_file" -def test_search_files(config, workspace, test_directory): +def test_list_files(config, workspace, test_directory): # Case 1: Create files A and B, search for A, and ensure we don't return A and B file_a = workspace.get_path("file_a.txt") file_b = workspace.get_path("file_b.txt") @@ -131,7 +131,7 @@ def test_search_files(config, workspace, test_directory): with open(os.path.join(test_directory, file_a.name), "w") as f: f.write("This is file A in the subdirectory.") - files = search_files(str(workspace.root)) + files = list_files(str(workspace.root)) assert file_a.name in files assert file_b.name in files assert os.path.join(Path(test_directory).name, file_a.name) in files @@ -144,7 +144,7 @@ def test_search_files(config, workspace, test_directory): # Case 2: Search for a file that does not exist and make sure we don't throw non_existent_file = "non_existent_file.txt" - files = search_files("") + files = list_files("") assert non_existent_file not in files From c317cf0e75d70c3b38eebe4b1a4855f9a28789d9 Mon Sep 17 00:00:00 2001 From: zyt600 <61863235+zyt600@users.noreply.github.com> Date: Mon, 1 May 2023 05:24:07 +0800 Subject: [PATCH 206/222] fix bug #3455 (#3591) Co-authored-by: Nicholas Tindle --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 98530511..11e95115 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ duckduckgo-search google-api-python-client #(https://developers.google.com/custom-search/v1/overview) pinecone-client==2.2.1 redis -orjson +orjson==3.8.10 Pillow selenium==4.1.4 webdriver-manager From 6ae90a3ea2a46f8df781aa1225bee6b1e02ca4dc Mon Sep 17 00:00:00 2001 From: Valay Dave Date: Sun, 30 Apr 2023 22:57:16 -0700 Subject: [PATCH 207/222] =?UTF-8?q?[bug]=20`list=5Ffiles`=20api=20signatur?= =?UTF-8?q?e=20change=20in=20`data=5Fingestion.py`=20and=20lo=E2=80=A6=20(?= =?UTF-8?q?#3601)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- data_ingestion.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/data_ingestion.py b/data_ingestion.py index d400dc96..c907cedf 100644 --- a/data_ingestion.py +++ b/data_ingestion.py @@ -1,7 +1,7 @@ import argparse import logging -from autogpt.commands.file_operations import ingest_file, search_files +from autogpt.commands.file_operations import ingest_file, list_files from autogpt.config import Config from autogpt.memory import get_memory @@ -10,12 +10,11 @@ cfg = Config() def configure_logging(): logging.basicConfig( - filemode="a", format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s", datefmt="%H:%M:%S", level=logging.DEBUG, handlers=[ - logging.FileHandler(filename="log-ingestion.txt"), + logging.FileHandler(filename="log-ingestion.txt", mode="a"), logging.StreamHandler(), ], ) @@ -31,7 +30,7 @@ def ingest_directory(directory, memory, args): """ global logger try: - files = search_files(directory) + files = list_files(directory) for file in files: ingest_file(file, memory, args.max_length, args.overlap) except Exception as e: @@ -68,7 +67,6 @@ def main() -> None: help="The max_length of each chunk when ingesting files (default: 4000)", default=4000, ) - args = parser.parse_args() # Initialize memory From d8968ae8990b4e50022c7d76fd37fad708f73814 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 1 May 2023 14:01:13 +0200 Subject: [PATCH 208/222] Update documentation URLs to docs.agpt.co (#3621) --- CONTRIBUTING.md | 4 ++-- README.md | 16 ++++++++-------- autogpt/llm/llm_utils.py | 4 ++-- docs/index.md | 2 +- mkdocs.yml | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9af38b29..c410d076 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ This document provides guidelines and best practices to help you contribute effe By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project. -[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md +[Code of Conduct]: https://docs.agpt.co/code-of-conduct/ ## 📢 A Quick Word Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT. @@ -101,7 +101,7 @@ https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-labe If you add or change code, make sure the updated code is covered by tests. To increase coverage if necessary, [write tests using pytest]. -For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/). +For more info on running tests, please refer to ["Running tests"](https://docs.agpt.co/testing/). [write tests using pytest]: https://realpython.com/pytest-python-testing/ diff --git a/README.md b/README.md index 8ab51669..312e4d9f 100644 --- a/README.md +++ b/README.md @@ -100,21 +100,21 @@ Your support is greatly appreciated. Development of this free, open-source proje Please see the [documentation][docs] for full setup instructions and configuration options. -[docs]: https://significant-gravitas.github.io/Auto-GPT/ +[docs]: https://docs.agpt.co/ ## 📖 Documentation * [⚙️ Setup][docs/setup] * [💻 Usage][docs/usage] * [🔌 Plugins][docs/plugins] * Configuration - * [🔍 Web Search](https://significant-gravitas.github.io/Auto-GPT/configuration/search/) - * [🧠 Memory](https://significant-gravitas.github.io/Auto-GPT/configuration/memory/) - * [🗣️ Voice (TTS)](https://significant-gravitas.github.io/Auto-GPT/configuration/voice/) - * [🖼️ Image Generation](https://significant-gravitas.github.io/Auto-GPT/configuration/imagegen/) + * [🔍 Web Search](https://docs.agpt.co/configuration/search/) + * [🧠 Memory](https://docs.agpt.co/configuration/memory/) + * [🗣️ Voice (TTS)](https://docs.agpt.co/configuration/voice/) + * [🖼️ Image Generation](https://docs.agpt.co/configuration/imagegen/) -[docs/setup]: https://significant-gravitas.github.io/Auto-GPT/setup/ -[docs/usage]: https://significant-gravitas.github.io/Auto-GPT/usage/ -[docs/plugins]: https://significant-gravitas.github.io/Auto-GPT/plugins/ +[docs/setup]: https://docs.agpt.co/setup/ +[docs/usage]: https://docs.agpt.co/usage/ +[docs/plugins]: https://docs.agpt.co/plugins/ ## ⚠️ Limitations diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py index cc4048d6..a1654c9f 100644 --- a/autogpt/llm/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -30,7 +30,7 @@ def retry_openai_api( api_key_error_msg = ( f"Please double check that you have setup a " f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can " - f"read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}" + f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}" ) backoff_msg = ( f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}" @@ -174,7 +174,7 @@ def create_chat_completion( if not warned_user: logger.double_check( f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " - + f"You can read more here: {Fore.CYAN}https://significant-gravitas.github.io/Auto-GPT/setup/#getting-an-api-key{Fore.RESET}" + + f"You can read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}" ) warned_user = True except (APIError, Timeout) as e: diff --git a/docs/index.md b/docs/index.md index b9487118..fb63ea14 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,5 +1,5 @@ # Auto-GPT -Welcome to Auto-GPT. Please follow the [Installation](https://significant-gravitas.github.io/Auto-GPT/setup/) guide to get started. +Welcome to Auto-GPT. Please follow the [Installation](/setup/) guide to get started. It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. diff --git a/mkdocs.yml b/mkdocs.yml index 856a9d62..1294a840 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,5 @@ site_name: Auto-GPT -site_url: https://significantgravitas.github.io/Auto-GPT/ +site_url: https://docs.agpt.co/ repo_url: https://github.com/Significant-Gravitas/Auto-GPT nav: - Home: index.md From 34261a15835390c5c464cef88c4a42b52a88b739 Mon Sep 17 00:00:00 2001 From: AbTrax <45964236+AbTrax@users.noreply.github.com> Date: Mon, 1 May 2023 23:16:26 +1000 Subject: [PATCH 209/222] Fix side effects on message history (#3619) Co-authored-by: Reinier van der Leer --- autogpt/memory_management/summary_memory.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/autogpt/memory_management/summary_memory.py b/autogpt/memory_management/summary_memory.py index 9e612637..754c09ba 100644 --- a/autogpt/memory_management/summary_memory.py +++ b/autogpt/memory_management/summary_memory.py @@ -1,3 +1,4 @@ +import copy import json from typing import Dict, List, Tuple @@ -44,7 +45,9 @@ def get_newly_trimmed_messages( return new_messages_not_in_context, new_index -def update_running_summary(current_memory: str, new_events: List[Dict]) -> str: +def update_running_summary( + current_memory: str, new_events: List[Dict[str, str]] +) -> str: """ This function takes a list of dictionaries representing new events and combines them with the current summary, focusing on key and potentially important information to remember. The updated summary is returned in a message @@ -61,17 +64,23 @@ def update_running_summary(current_memory: str, new_events: List[Dict]) -> str: update_running_summary(new_events) # Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7." """ + # Create a copy of the new_events list to prevent modifying the original list + new_events = copy.deepcopy(new_events) + # Replace "assistant" with "you". This produces much better first person past tense results. for event in new_events: if event["role"].lower() == "assistant": event["role"] = "you" + # Remove "thoughts" dictionary from "content" content_dict = json.loads(event["content"]) if "thoughts" in content_dict: del content_dict["thoughts"] event["content"] = json.dumps(content_dict) + elif event["role"].lower() == "system": event["role"] = "your computer" + # Delete all user messages elif event["role"] == "user": new_events.remove(event) From 9c56b1beef1362725cea0f2b98f0a46a4d728874 Mon Sep 17 00:00:00 2001 From: Ashutosh Kataria Date: Mon, 1 May 2023 20:01:28 +0530 Subject: [PATCH 210/222] Message about Pinecone initializing (#1194) Co-authored-by: Nicholas Tindle Co-authored-by: Richard Beales --- autogpt/memory/pinecone.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py index a0bd162a..7c027956 100644 --- a/autogpt/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -38,6 +38,9 @@ class PineconeMemory(MemoryProviderSingleton): exit(1) if table_name not in pinecone.list_indexes(): + logger.typewriter_log( + "Connecting Pinecone. This may take some time...", Fore.MAGENTA, "" + ) pinecone.create_index( table_name, dimension=dimension, metric=metric, pod_type=pod_type ) From 94ec4a4ea5103b1a51fe4eb3dd3ba7d3329d3e5a Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 1 May 2023 11:37:30 -0400 Subject: [PATCH 211/222] Fix file operations logger (#3489) Co-authored-by: Reinier van der Leer --- autogpt/commands/file_operations.py | 137 +++++++++++---- tests/unit/test_file_operations.py | 259 +++++++++++++++++++++------- 2 files changed, 301 insertions(+), 95 deletions(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index c54fb054..9563883e 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -1,9 +1,10 @@ """File operations for AutoGPT""" from __future__ import annotations +import hashlib import os import os.path -from typing import Generator +from typing import Dict, Generator, Literal, Tuple import requests from colorama import Back, Fore @@ -17,31 +18,96 @@ from autogpt.utils import readable_file_size CFG = Config() +Operation = Literal["write", "append", "delete"] -def check_duplicate_operation(operation: str, filename: str) -> bool: - """Check if the operation has already been performed on the given file - Args: - operation (str): The operation to check for - filename (str): The name of the file to check for +def text_checksum(text: str) -> str: + """Get the hex checksum for the given text.""" + return hashlib.md5(text.encode("utf-8")).hexdigest() + + +def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str | None]]: + """Parse the file operations log and return a tuple containing the log entries""" + try: + log = open(log_path, "r", encoding="utf-8") + except FileNotFoundError: + return + + for line in log: + line = line.replace("File Operation Logger", "").strip() + if not line: + continue + operation, tail = line.split(": ", maxsplit=1) + operation = operation.strip() + if operation in ("write", "append"): + try: + path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1)) + except ValueError: + path, checksum = tail.strip(), None + yield (operation, path, checksum) + elif operation == "delete": + yield (operation, tail.strip(), None) + + log.close() + + +def file_operations_state(log_path: str) -> Dict: + """Iterates over the operations log and returns the expected state. + + Parses a log file at CFG.file_logger_path to construct a dictionary that maps + each file path written or appended to its checksum. Deleted files are removed + from the dictionary. Returns: - bool: True if the operation has already been performed on the file + A dictionary mapping file paths to their checksums. + + Raises: + FileNotFoundError: If CFG.file_logger_path is not found. + ValueError: If the log file content is not in the expected format. """ - log_content = read_file(CFG.file_logger_path) - log_entry = f"{operation}: {filename}\n" - return log_entry in log_content + state = {} + for operation, path, checksum in operations_from_log(log_path): + if operation in ("write", "append"): + state[path] = checksum + elif operation == "delete": + del state[path] + return state -def log_operation(operation: str, filename: str) -> None: +def is_duplicate_operation( + operation: Operation, filename: str, checksum: str | None = None +) -> bool: + """Check if the operation has already been performed + + Args: + operation: The operation to check for + filename: The name of the file to check for + checksum: The checksum of the contents to be written + + Returns: + True if the operation has already been performed on the file + """ + state = file_operations_state(CFG.file_logger_path) + if operation == "delete" and filename not in state: + return True + if operation == "write" and state.get(filename) == checksum: + return True + return False + + +def log_operation(operation: str, filename: str, checksum: str | None = None) -> None: """Log the file operation to the file_logger.txt Args: - operation (str): The operation to log - filename (str): The name of the file the operation was performed on + operation: The operation to log + filename: The name of the file the operation was performed on + checksum: The checksum of the contents to be written """ - log_entry = f"{operation}: {filename}\n" - append_to_file(CFG.file_logger_path, log_entry, should_log=False) + log_entry = f"{operation}: {filename}" + if checksum is not None: + log_entry += f" #{checksum}" + logger.debug(f"Logging file operation: {log_entry}") + append_to_file(CFG.file_logger_path, f"{log_entry}\n", should_log=False) def split_file( @@ -90,8 +156,8 @@ def read_file(filename: str) -> str: with open(filename, "r", encoding="utf-8") as f: content = f.read() return content - except Exception as e: - return f"Error: {str(e)}" + except Exception as err: + return f"Error: {err}" def ingest_file( @@ -124,8 +190,8 @@ def ingest_file( memory.add(memory_to_add) logger.info(f"Done ingesting {num_chunks} chunks from {filename}.") - except Exception as e: - logger.info(f"Error while ingesting file '{filename}': {str(e)}") + except Exception as err: + logger.info(f"Error while ingesting file '{filename}': {err}") @command("write_to_file", "Write to file", '"filename": "", "text": ""') @@ -139,17 +205,18 @@ def write_to_file(filename: str, text: str) -> str: Returns: str: A message indicating success or failure """ - if check_duplicate_operation("write", filename): + checksum = text_checksum(text) + if is_duplicate_operation("write", filename, checksum): return "Error: File has already been updated." try: directory = os.path.dirname(filename) os.makedirs(directory, exist_ok=True) with open(filename, "w", encoding="utf-8") as f: f.write(text) - log_operation("write", filename) + log_operation("write", filename, checksum) return "File written to successfully." - except Exception as e: - return f"Error: {str(e)}" + except Exception as err: + return f"Error: {err}" @command( @@ -169,15 +236,17 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str: try: directory = os.path.dirname(filename) os.makedirs(directory, exist_ok=True) - with open(filename, "a") as f: + with open(filename, "a", encoding="utf-8") as f: f.write(text) if should_log: - log_operation("append", filename) + with open(filename, "r", encoding="utf-8") as f: + checksum = text_checksum(f.read()) + log_operation("append", filename, checksum=checksum) return "Text appended successfully." - except Exception as e: - return f"Error: {str(e)}" + except Exception as err: + return f"Error: {err}" @command("delete_file", "Delete file", '"filename": ""') @@ -190,14 +259,14 @@ def delete_file(filename: str) -> str: Returns: str: A message indicating success or failure """ - if check_duplicate_operation("delete", filename): + if is_duplicate_operation("delete", filename): return "Error: File has already been deleted." try: os.remove(filename) log_operation("delete", filename) return "File deleted successfully." - except Exception as e: - return f"Error: {str(e)}" + except Exception as err: + return f"Error: {err}" @command("list_files", "List Files in Directory", '"directory": ""') @@ -266,7 +335,7 @@ def download_file(url, filename): spinner.update_message(f"{message} {progress}") return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})' - except requests.HTTPError as e: - return f"Got an HTTP Error whilst trying to download file: {e}" - except Exception as e: - return "Error: " + str(e) + except requests.HTTPError as err: + return f"Got an HTTP Error whilst trying to download file: {err}" + except Exception as err: + return f"Error: {err}" diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index e7ba2f71..fb8300d4 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -2,25 +2,19 @@ This set of unit tests is designed to test the file operations that autoGPT has access to. """ +import hashlib import os +import re +from io import TextIOWrapper from pathlib import Path -from tempfile import gettempdir import pytest +from pytest_mock import MockerFixture -from autogpt.commands.file_operations import ( - append_to_file, - check_duplicate_operation, - delete_file, - download_file, - list_files, - log_operation, - read_file, - split_file, - write_to_file, -) +import autogpt.commands.file_operations as file_ops from autogpt.config import Config from autogpt.utils import readable_file_size +from autogpt.workspace import Workspace @pytest.fixture() @@ -29,66 +23,186 @@ def file_content(): @pytest.fixture() -def test_file(workspace, file_content): - test_file = str(workspace.get_path("test_file.txt")) - with open(test_file, "w") as f: - f.write(file_content) - return test_file +def test_file_path(config, workspace: Workspace): + return workspace.get_path("test_file.txt") @pytest.fixture() -def test_directory(workspace): - return str(workspace.get_path("test_directory")) +def test_file(test_file_path: Path): + file = open(test_file_path, "w") + yield file + if not file.closed: + file.close() @pytest.fixture() -def test_nested_file(workspace): - return str(workspace.get_path("nested/test_file.txt")) +def test_file_with_content_path(test_file: TextIOWrapper, file_content): + test_file.write(file_content) + test_file.close() + file_ops.log_operation( + "write", test_file.name, file_ops.text_checksum(file_content) + ) + return Path(test_file.name) -def test_check_duplicate_operation(config, test_file): - log_operation("write", test_file) - assert check_duplicate_operation("write", test_file) is True +@pytest.fixture() +def test_directory(config, workspace: Workspace): + return workspace.get_path("test_directory") + + +@pytest.fixture() +def test_nested_file(config, workspace: Workspace): + return workspace.get_path("nested/test_file.txt") + + +def test_file_operations_log(test_file: TextIOWrapper): + log_file_content = ( + "File Operation Logger\n" + "write: path/to/file1.txt #checksum1\n" + "write: path/to/file2.txt #checksum2\n" + "write: path/to/file3.txt #checksum3\n" + "append: path/to/file2.txt #checksum4\n" + "delete: path/to/file3.txt\n" + ) + test_file.write(log_file_content) + test_file.close() + + expected = [ + ("write", "path/to/file1.txt", "checksum1"), + ("write", "path/to/file2.txt", "checksum2"), + ("write", "path/to/file3.txt", "checksum3"), + ("append", "path/to/file2.txt", "checksum4"), + ("delete", "path/to/file3.txt", None), + ] + assert list(file_ops.operations_from_log(test_file.name)) == expected + + +def test_file_operations_state(test_file: TextIOWrapper): + # Prepare a fake log file + log_file_content = ( + "File Operation Logger\n" + "write: path/to/file1.txt #checksum1\n" + "write: path/to/file2.txt #checksum2\n" + "write: path/to/file3.txt #checksum3\n" + "append: path/to/file2.txt #checksum4\n" + "delete: path/to/file3.txt\n" + ) + test_file.write(log_file_content) + test_file.close() + + # Call the function and check the returned dictionary + expected_state = { + "path/to/file1.txt": "checksum1", + "path/to/file2.txt": "checksum4", + } + assert file_ops.file_operations_state(test_file.name) == expected_state + + +def test_is_duplicate_operation(config, mocker: MockerFixture): + # Prepare a fake state dictionary for the function to use + state = { + "path/to/file1.txt": "checksum1", + "path/to/file2.txt": "checksum2", + } + mocker.patch.object(file_ops, "file_operations_state", lambda _: state) + + # Test cases with write operations + assert ( + file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum1") + is True + ) + assert ( + file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum2") + is False + ) + assert ( + file_ops.is_duplicate_operation("write", "path/to/file3.txt", "checksum3") + is False + ) + # Test cases with append operations + assert ( + file_ops.is_duplicate_operation("append", "path/to/file1.txt", "checksum1") + is False + ) + # Test cases with delete operations + assert file_ops.is_duplicate_operation("delete", "path/to/file1.txt") is False + assert file_ops.is_duplicate_operation("delete", "path/to/file3.txt") is True # Test logging a file operation -def test_log_operation(test_file, config): - file_logger_name = config.file_logger_path - if os.path.exists(file_logger_name): - os.remove(file_logger_name) - - log_operation("log_test", test_file) - with open(config.file_logger_path, "r") as f: +def test_log_operation(config: Config): + file_ops.log_operation("log_test", "path/to/test") + with open(config.file_logger_path, "r", encoding="utf-8") as f: content = f.read() - assert f"log_test: {test_file}" in content + assert f"log_test: path/to/test\n" in content + + +def test_text_checksum(file_content: str): + checksum = file_ops.text_checksum(file_content) + different_checksum = file_ops.text_checksum("other content") + assert re.match(r"^[a-fA-F0-9]+$", checksum) is not None + assert checksum != different_checksum + + +def test_log_operation_with_checksum(config: Config): + file_ops.log_operation("log_test", "path/to/test", checksum="ABCDEF") + with open(config.file_logger_path, "r", encoding="utf-8") as f: + content = f.read() + assert f"log_test: path/to/test #ABCDEF\n" in content # Test splitting a file into chunks def test_split_file(): content = "abcdefghij" - chunks = list(split_file(content, max_length=4, overlap=1)) + chunks = list(file_ops.split_file(content, max_length=4, overlap=1)) expected = ["abcd", "defg", "ghij"] assert chunks == expected -def test_read_file(test_file, file_content): - content = read_file(test_file) +def test_read_file(test_file_with_content_path: Path, file_content): + content = file_ops.read_file(test_file_with_content_path) assert content == file_content -def test_write_to_file(config, test_nested_file): +def test_write_to_file(test_file_path: Path): new_content = "This is new content.\n" - write_to_file(test_nested_file, new_content) - with open(test_nested_file, "r") as f: + file_ops.write_to_file(str(test_file_path), new_content) + with open(test_file_path, "r", encoding="utf-8") as f: content = f.read() assert content == new_content -def test_append_to_file(test_nested_file): - append_text = "This is appended text.\n" - write_to_file(test_nested_file, append_text) +def test_write_file_logs_checksum(config: Config, test_file_path: Path): + new_content = "This is new content.\n" + new_checksum = file_ops.text_checksum(new_content) + file_ops.write_to_file(str(test_file_path), new_content) + with open(config.file_logger_path, "r", encoding="utf-8") as f: + log_entry = f.read() + assert log_entry == f"write: {test_file_path} #{new_checksum}\n" - append_to_file(test_nested_file, append_text) + +def test_write_file_fails_if_content_exists(test_file_path: Path): + new_content = "This is new content.\n" + file_ops.log_operation( + "write", + str(test_file_path), + checksum=file_ops.text_checksum(new_content), + ) + result = file_ops.write_to_file(str(test_file_path), new_content) + assert result == "Error: File has already been updated." + + +def test_write_file_succeeds_if_content_different(test_file_with_content_path: Path): + new_content = "This is different content.\n" + result = file_ops.write_to_file(str(test_file_with_content_path), new_content) + assert result == "File written to successfully." + + +def test_append_to_file(test_nested_file: Path): + append_text = "This is appended text.\n" + file_ops.write_to_file(test_nested_file, append_text) + + file_ops.append_to_file(test_nested_file, append_text) with open(test_nested_file, "r") as f: content_after = f.read() @@ -96,24 +210,45 @@ def test_append_to_file(test_nested_file): assert content_after == append_text + append_text -def test_delete_file(config, test_file): - delete_file(test_file) - assert os.path.exists(test_file) is False - assert delete_file(test_file) == "Error: File has already been deleted." +def test_append_to_file_uses_checksum_from_appended_file( + config: Config, test_file_path: Path +): + append_text = "This is appended text.\n" + file_ops.append_to_file(test_file_path, append_text) + file_ops.append_to_file(test_file_path, append_text) + with open(config.file_logger_path, "r", encoding="utf-8") as f: + log_contents = f.read() + + digest = hashlib.md5() + digest.update(append_text.encode("utf-8")) + checksum1 = digest.hexdigest() + digest.update(append_text.encode("utf-8")) + checksum2 = digest.hexdigest() + assert log_contents == ( + f"append: {test_file_path} #{checksum1}\n" + f"append: {test_file_path} #{checksum2}\n" + ) -def test_delete_missing_file(test_file): - os.remove(test_file) +def test_delete_file(test_file_with_content_path: Path): + result = file_ops.delete_file(str(test_file_with_content_path)) + assert result == "File deleted successfully." + assert os.path.exists(test_file_with_content_path) is False + + +def test_delete_missing_file(config): + filename = "path/to/file/which/does/not/exist" + # confuse the log + file_ops.log_operation("write", filename, checksum="fake") try: - os.remove(test_file) - except FileNotFoundError as e: - error_string = str(e) - assert error_string in delete_file(test_file) + os.remove(filename) + except FileNotFoundError as err: + assert str(err) in file_ops.delete_file(filename) return - assert True, "Failed to test delete_file" + assert False, f"Failed to test delete_file; {filename} not expected to exist" -def test_list_files(config, workspace, test_directory): +def test_list_files(workspace: Workspace, test_directory: Path): # Case 1: Create files A and B, search for A, and ensure we don't return A and B file_a = workspace.get_path("file_a.txt") file_b = workspace.get_path("file_b.txt") @@ -131,7 +266,7 @@ def test_list_files(config, workspace, test_directory): with open(os.path.join(test_directory, file_a.name), "w") as f: f.write("This is file A in the subdirectory.") - files = list_files(str(workspace.root)) + files = file_ops.list_files(str(workspace.root)) assert file_a.name in files assert file_b.name in files assert os.path.join(Path(test_directory).name, file_a.name) in files @@ -144,26 +279,28 @@ def test_list_files(config, workspace, test_directory): # Case 2: Search for a file that does not exist and make sure we don't throw non_existent_file = "non_existent_file.txt" - files = list_files("") + files = file_ops.list_files("") assert non_existent_file not in files -def test_download_file(): +def test_download_file(config, workspace: Workspace): url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz" - local_name = os.path.join(gettempdir(), "auto-gpt.tar.gz") + local_name = workspace.get_path("auto-gpt.tar.gz") size = 365023 readable_size = readable_file_size(size) assert ( - download_file(url, local_name) + file_ops.download_file(url, local_name) == f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})' ) assert os.path.isfile(local_name) is True assert os.path.getsize(local_name) == size url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz" - assert "Got an HTTP Error whilst trying to download file" in download_file( + assert "Got an HTTP Error whilst trying to download file" in file_ops.download_file( url, local_name ) url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz" - assert "Failed to establish a new connection:" in download_file(url, local_name) + assert "Failed to establish a new connection:" in file_ops.download_file( + url, local_name + ) From 7fc6f2abfc8d4795937839f66e017b4e93a986f4 Mon Sep 17 00:00:00 2001 From: non-adjective Date: Mon, 1 May 2023 08:45:52 -0700 Subject: [PATCH 212/222] update web_selenium.py to use try-with for headers (#2988) Co-authored-by: Richard Beales --- autogpt/commands/web_selenium.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 79a8ffd9..0f5764b5 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -175,4 +175,9 @@ def add_header(driver: WebDriver) -> None: Returns: None """ - driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read()) + try: + with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file: + overlay_script = overlay_file.read() + driver.execute_script(overlay_script) + except Exception as e: + print(f"Error executing overlay.js: {e}") From a5f856328d1331129d3b578157242c8716c08870 Mon Sep 17 00:00:00 2001 From: sidewaysthought <92705102+sidewaysthought@users.noreply.github.com> Date: Mon, 1 May 2023 12:50:50 -0500 Subject: [PATCH 213/222] Fix multi-byte character handling in `read_file` (#3173) Co-authored-by: Reinier van der Leer --- autogpt/commands/file_operations.py | 8 +++++--- requirements.txt | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 9563883e..28198eba 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -6,6 +6,7 @@ import os import os.path from typing import Dict, Generator, Literal, Tuple +import charset_normalizer import requests from colorama import Back, Fore from requests.adapters import HTTPAdapter, Retry @@ -153,9 +154,10 @@ def read_file(filename: str) -> str: str: The contents of the file """ try: - with open(filename, "r", encoding="utf-8") as f: - content = f.read() - return content + charset_match = charset_normalizer.from_path(filename).best() + encoding = charset_match.encoding + logger.debug(f"Read file '{filename}' with encoding '{encoding}'") + return str(charset_match) except Exception as err: return f"Error: {err}" diff --git a/requirements.txt b/requirements.txt index 11e95115..e1900ef5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,6 +21,7 @@ webdriver-manager jsonschema tweepy click +charset-normalizer>=3.1.0 spacy>=3.0.0,<4.0.0 en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl From 0ef6f064626732b9fb7a3e073256e69d00e1e136 Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Mon, 1 May 2023 21:06:22 +0300 Subject: [PATCH 214/222] Fix `validate_json` scheme path (#3631) Co-authored-by: Reinier van der Leer --- autogpt/json_utils/utilities.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index 58f5f8e4..7fcd2c0a 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -1,5 +1,6 @@ """Utilities for the json_fixes package.""" import json +import os import re from jsonschema import Draft7Validator @@ -35,7 +36,8 @@ def validate_json(json_object: object, schema_name: str) -> dict | None: :param schema_name: str :type json_object: object """ - with open(f"autogpt/json_utils/{schema_name}.json", "r") as f: + scheme_file = os.path.join(os.path.dirname(__file__), f"{schema_name}.json") + with open(scheme_file, "r") as f: schema = json.load(f) validator = Draft7Validator(schema) From 4767fe63d36e0eece256ddcc2d8dc49812215277 Mon Sep 17 00:00:00 2001 From: kinance Date: Tue, 2 May 2023 03:13:24 +0900 Subject: [PATCH 215/222] Fix the maximum context length issue by chunking (#3222) Co-authored-by: Reinier van der Leer --- .env.template | 8 + autogpt/config/config.py | 15 + autogpt/llm/__init__.py | 2 + autogpt/llm/llm_utils.py | 63 +- autogpt/llm/modelsinfo.py | 3 + .../test_get_ada_embedding.yaml | 168 ++ .../test_get_ada_embedding_large_context.yaml | 1484 +++++++++++++++++ tests/integration/test_llm_utils.py | 43 +- tests/{ => unit}/test_llm_utils.py | 58 +- 9 files changed, 1801 insertions(+), 43 deletions(-) create mode 100644 tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml create mode 100644 tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml rename tests/{ => unit}/test_llm_utils.py (73%) diff --git a/.env.template b/.env.template index 4d65c0b5..c0093507 100644 --- a/.env.template +++ b/.env.template @@ -49,6 +49,14 @@ OPENAI_API_KEY=your-openai-api-key # FAST_TOKEN_LIMIT=4000 # SMART_TOKEN_LIMIT=8000 +### EMBEDDINGS +## EMBEDDING_MODEL - Model to use for creating embeddings +## EMBEDDING_TOKENIZER - Tokenizer to use for chunking large inputs +## EMBEDDING_TOKEN_LIMIT - Chunk size limit for large inputs +# EMBEDDING_MODEL=text-embedding-ada-002 +# EMBEDDING_TOKENIZER=cl100k_base +# EMBEDDING_TOKEN_LIMIT=8191 + ################################################################################ ### MEMORY ################################################################################ diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 67e735c9..324d284c 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -35,6 +35,9 @@ class Config(metaclass=Singleton): self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) + self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002") + self.embedding_tokenizer = os.getenv("EMBEDDING_TOKENIZER", "cl100k_base") + self.embedding_token_limit = int(os.getenv("EMBEDDING_TOKEN_LIMIT", 8191)) self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000)) self.browse_spacy_language_model = os.getenv( "BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm" @@ -216,6 +219,18 @@ class Config(metaclass=Singleton): """Set the smart token limit value.""" self.smart_token_limit = value + def set_embedding_model(self, value: str) -> None: + """Set the model to use for creating embeddings.""" + self.embedding_model = value + + def set_embedding_tokenizer(self, value: str) -> None: + """Set the tokenizer to use when creating embeddings.""" + self.embedding_tokenizer = value + + def set_embedding_token_limit(self, value: int) -> None: + """Set the token limit for creating embeddings.""" + self.embedding_token_limit = value + def set_browse_chunk_max_length(self, value: int) -> None: """Set the browse_website command chunk max length value.""" self.browse_chunk_max_length = value diff --git a/autogpt/llm/__init__.py b/autogpt/llm/__init__.py index 2a6f0b8f..76eaf3b7 100644 --- a/autogpt/llm/__init__.py +++ b/autogpt/llm/__init__.py @@ -11,6 +11,7 @@ from autogpt.llm.base import ( from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context from autogpt.llm.llm_utils import ( call_ai_function, + chunked_tokens, create_chat_completion, get_ada_embedding, ) @@ -32,6 +33,7 @@ __all__ = [ "call_ai_function", "create_chat_completion", "get_ada_embedding", + "chunked_tokens", "COSTS", "count_message_tokens", "count_string_tokens", diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py index a1654c9f..a77bccbc 100644 --- a/autogpt/llm/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -2,9 +2,12 @@ from __future__ import annotations import functools import time +from itertools import islice from typing import List, Optional +import numpy as np import openai +import tiktoken from colorama import Fore, Style from openai.error import APIError, RateLimitError, Timeout @@ -207,6 +210,23 @@ def create_chat_completion( return resp +def batched(iterable, n): + """Batch data into tuples of length n. The last batch may be shorter.""" + # batched('ABCDEFG', 3) --> ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch + + +def chunked_tokens(text, tokenizer_name, chunk_length): + tokenizer = tiktoken.get_encoding(tokenizer_name) + tokens = tokenizer.encode(text) + chunks_iterator = batched(tokens, chunk_length) + yield from chunks_iterator + + def get_ada_embedding(text: str) -> List[float]: """Get an embedding from the ada model. @@ -217,7 +237,7 @@ def get_ada_embedding(text: str) -> List[float]: List[float]: The embedding. """ cfg = Config() - model = "text-embedding-ada-002" + model = cfg.embedding_model text = text.replace("\n", " ") if cfg.use_azure: @@ -226,13 +246,7 @@ def get_ada_embedding(text: str) -> List[float]: kwargs = {"model": model} embedding = create_embedding(text, **kwargs) - api_manager = ApiManager() - api_manager.update_cost( - prompt_tokens=embedding.usage.prompt_tokens, - completion_tokens=0, - model=model, - ) - return embedding["data"][0]["embedding"] + return embedding @retry_openai_api() @@ -251,8 +265,31 @@ def create_embedding( openai.Embedding: The embedding object. """ cfg = Config() - return openai.Embedding.create( - input=[text], - api_key=cfg.openai_api_key, - **kwargs, - ) + chunk_embeddings = [] + chunk_lengths = [] + for chunk in chunked_tokens( + text, + tokenizer_name=cfg.embedding_tokenizer, + chunk_length=cfg.embedding_token_limit, + ): + embedding = openai.Embedding.create( + input=[chunk], + api_key=cfg.openai_api_key, + **kwargs, + ) + api_manager = ApiManager() + api_manager.update_cost( + prompt_tokens=embedding.usage.prompt_tokens, + completion_tokens=0, + model=cfg.embedding_model, + ) + chunk_embeddings.append(embedding["data"][0]["embedding"]) + chunk_lengths.append(len(chunk)) + + # do weighted avg + chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=chunk_lengths) + chunk_embeddings = chunk_embeddings / np.linalg.norm( + chunk_embeddings + ) # normalize the length to one + chunk_embeddings = chunk_embeddings.tolist() + return chunk_embeddings diff --git a/autogpt/llm/modelsinfo.py b/autogpt/llm/modelsinfo.py index 4326c0b1..1bebcbf9 100644 --- a/autogpt/llm/modelsinfo.py +++ b/autogpt/llm/modelsinfo.py @@ -3,5 +3,8 @@ COSTS = { "gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002}, "gpt-4-0314": {"prompt": 0.03, "completion": 0.06}, "gpt-4": {"prompt": 0.03, "completion": 0.06}, + "gpt-4-0314": {"prompt": 0.03, "completion": 0.06}, + "gpt-4-32k": {"prompt": 0.06, "completion": 0.12}, + "gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12}, "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0}, } diff --git a/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml new file mode 100644 index 00000000..ec4636e0 --- /dev/null +++ b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml @@ -0,0 +1,168 @@ +interactions: +- request: + body: '{"input": [[1985]], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '83' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaSxO6Orvl5++n2LWn9FsiIgl7xl3kkiAgYldXlyAiKHJNgJw6371L/6dOd08c + QAqV5HnWWr/kP/71119/t1ld5NPf//z197sap7//x/fa/Tbd/v7nr//5r7/++uuv//h9/n8jiyYr + 7vfqU/6G/25Wn3ux/P3PX/x/X/m/g/756++DcanIKGZXd/X5SZTuBpdQW6+lYQqkfQ5NFN8pSgIw + zBGFBLyF+0x9ZATRzFVcC+xx3aJPfw3cJaBFA2RnibBlW4EuBNKhlLJ5dbH/QRJYDniFELe0Ryw1 + zy4l2M8lVX4IVLvccrDkD26WikG50PuxCQFdnkwEiSTcsFJvgcv2m6mB50DfUpu5FLAPV1ZQMZ8b + IvW7azZ//KsHo/R9nYA/OPXa3M0WXkbEI3Dus2z9lNsEjEngUv+4PdWrN6EKdpdVoMba1vqqHZkH + b+fNG4mbTQRmWGsFxIdzQM3Kfkbt1AUcBI3doc1xk9ZLJVoC3ClDSm3yBtl4AC8Bvk47CzunCbFl + aqkGQtGR0A74sT4HY8DDq8puaE3xHC1C9H7BD749CHUSFM03CxuwDd2YQD5eskks+B4idSLUZd1V + n+fOt2FzPT2pkptdNtu6EQD6Cg2sRFSr1/NdKvcmSu74EM8ioDerUyT9vHfIWElpvUwkz0F2c2e0 + vXpNNvuXsQe81EdofRp6LfDVTYMNUUx6PtQEjL//y79kHiuu1mRzc3xWsDg6TwQepu6OOhkEae88 + TKzIbykanVhZ5SOLI3zWZCmjogx6OCXDhAT9yuoulKYevMWdRl2+GOtu9W6a9CHZBxvBramHc8Up + MNsImCJ0jBgThDiA13A6U8UU5IxJ8keQGG8I2At5RecPahjD4yJBrGZgAl2aDgjGwQth47BVXR5l + eQyPQVnRMISPjMUvNgNJeRm4GC/BsMh0SsDUKRt6PB+LelHlIJCl9SYjYfewsjbe5Rb8MNvDONKd + gclhbEAMywgba6u762TdAkjVIkJrd3Pc9antBBgEgUIEXZPZdDzHL/hCTxEtOGkHtg5XBQiuvsUe + DR76a+oCCDe720rWtGncxT69JHi49BTbp/jo8vxe5mEjlx02733NVuNIb4BDao0PgNnDnOWLBoNH + YlB8ebb6ulyVUS5at8MW8c5DeVb2IdQv8RN7R9F02VEWRxBM2Yka77HVZzqLMbwdpBSJVyNzRymr + rY2zKwusPUQpIrNlFYC+AgOHmmCxbfbkS3mMeR47t0UB/ImJHvBP64C4GV/rebiFCKKw66h78mRA + twSKwOJ5DtvXng4ru62tlPsgx+63Hpep/Sgwm2cXH55jxNZzfL4BkkeYHPqNMVBkCSFkaL8isbq4 + gPmml0AnmldsPYNbvYiZBwEcZhcHqV9my/bSWVDZRwOaUSyxWs4WT17k15keClcZmCRTHua8lmGn + 2QTR9CDbEtabDUaSqLHoMsfUkgoo+WRNL5+oc6ruJqnc3qeeHIwRldNjBXb81aCHeE7ZOnKrAMs6 + JeRZtn00qXIQwho5Cek+J21Y5/NJg547drRQwStaLreWg+M2irB7ZM96em4kDSiX65bsDjAeRv5o + FxDt+JEqUVmzWUkwhD23rthXls0wLRvTAWgnjGhV9/nQ77ZKLpUhxNTYmEs0LU5pyNKTK4gEBjmb + 9N1DA/zhpmPNutz0JdZ8A3Kq4OOD8HnUtBma8M99fXOpaxLfHF56VUxFwtozfS10vYc5jhl671zD + 7ayTOErIrC4EGB9xmPbVK4aBW2dorrdAXwN9HGExwJCikPu4zMx2BF6OmU6k0DCGuXWjXLKi/EVj + 6cPYlD+EGSr704CPt/NnWBRijjBqDzLaXMIuYrvNkgIm5DYRWWXWs7XbOlC0e4/aBlcNzNn0PcyX + 4YiA8UmHlRVVDBsOKvQU8Za+RmkqgcwZPIy2xgiWNNzOsBncC9Wt/D6Q7a0RwFsKrlhP44ixuK1z + mJ9QT93w2LqzFJocrO+tRA9G1NdLW18R7N2qIuLuRoalfTwrYMvjTOZvf1qorhBgHHuB6tK+cef9 + dXFgBIUnqTt7x2b1/kkgi/oLavuDDKZdJdrSSqWeatz5zOa38EHAy7BPzYDYYAG9ToDs9yrhv/O7 + 3kQ7Br4WHLD71bdOeCgF5OhVRnJYTWDmzWCG7gXK2MxhmlH95s9gPrsaPvrPul6GduWhpZUdPp5a + N2PcoxThd/6pJrx1sN5EJYGnk8OhrdsLw7AUhgfdNssRF36WqOtJGUDH9I5Ym54LYx6gDjjipiH8 + GzRgILEkwqnjJHqA2qGeuYfFwfO6a5Akth82c+RUwlZrZ3o+eHt3GRvZAiS7xPiwOw86I0WqASsq + XiiYHiqYL1AeAa34CTufxHKX61vUwIDHiR6bOM3owxV4MCXdhM3g4ej8RRs8OBlsR80VvrMlLdIW + 3nfBgJXJ9tz3tXReEIKAYteXjtHylLwGevfXA59j+VMvt9vZhtKaythJKk0XnCFuwFefscdLJhtP + bPZkJd16JMolO2PGpChw674Q4QXqsDnzm1y69MuduvxWi3jyiRA8bz2Rekfxra/2W4JwSk0DR8G5 + 1ufr6Qkh+qQ+eX/1tputOYUtvzHJOnU2W80EImmTcBHa8Ks0LPYlTOC2O26o/yjs73qiyR9/5CVW + XM/LPJbgFFUB2QUiZatq6RUsJPVB5u/7FOIks+HhaliIPWSZjUXjJHB95C69U3DRp+zJV0BYbJ6c + 0rAG7KffB6kosW/K7bCcmBVAk5Idtk/1h43h6CuSu79cUNTZF0DuYPSAWRkOmWt5dafuPqxwDdID + RuVRdZmcuCvciMlEXSW9slV1Zw74/rPCSF6O7uortgAJ6Z9oEndrNKLXksPMs2qqHhRNHxkALyhe + vC1GZr4b5lrqCYyO1pZaqcTrHXgVAbwcNiN267qsV2z2OZzsV0ldheJh1nrowBRr7/96fxetRvBU + azySmk2Qje1JEeHWbRD1YL9j1FPzFiaf2id9EunZ+pATGy59ZKNdIGLwez5wwnOGjbJ86Gy6lTG0 + 3vHtT723vJnOcCPGE3a//WnZVbMDY+7IYeWtGtkCWMtDn/VP9OaAoDMuSBLAKamAtdi26+UdVCv0 + nxHGB0/P63arZ8p+P8aAjKfdFgxTIvYSSooQqy0NdRbWqgDezcajWno5RLO5X0W407WeOob7yGbX + OzcSnYH01e8SzHSeEyjPUkidgdcH5sqjBEs+7mgx4339HtpVgKfH1FBNSEyXP1qAg+vmiqj9XHYZ + M7o0gWP4qAhI1MWdw9HUYErUiXqqvrB5TIpv/QoT1qT7yBbnagRAfUgmthscg4UF6igp/aTR42ab + DvOYezYYNchh/bW/6qPJm7lUJCeGkjXjs7EWmAIN1/Cxsk+ygcwukCCIA4aNoHtHdMCnAOyPTwfj + G8uHpRh8Afh+XSGiOfXPn/HAj5CGBKa2jKJE4aExFh+qtJH4/X1eCEP9HpE9pYrO25u4grz+vmHn + 5o4ZScclgGVVMNTZgLqtsWYljPJUo97xibOlPro3aXEDhUbcbR+tGZgVyNXeh6r5rEbjKh17OJ+P + GrX7gg0lW4sc7PjMoLb9GdksWoIEHTUryV5528OuhwUH6xzX1ASdWn/rPQbVkLwRj/kFLP6wjlB7 + 4hrJqDfA1m1OhmQJwZlG5XrK/ughamObmvrwZrPjDC0wN35GcQsUIDiLpsHwfD5SDfcO4B2nbqF0 + 0mqql+sSMRSebHghLwsHXVC79OfHp1ueYPu+J1EHl9oCxFsHqj9Tk9EPM3t4etAGH+73rT7LzeqA + 0UjP3/mANT2MPJKm4XbCasRb7uI3fgjuZ/FEHfG0uuNmjGYQcy6HXpL6yKbaPdrwlzd1IWuHpZxp + DrFcINJ882XHrLEBh0tLv/mnGlbFPoYgDcQc65L7za9bQ4GOESJqQU8DW+tc3sCoPj/Y3Yk70FWH + aw4DeTeTfWVe69mSqwamD67E5tfv7OxeKCC+kopsK/uZLbOcIVDySUd21/eoz3TQJCg81itWlxxF + 8x4VIQzoWiNxF16GOS9bAukzS4i8v1tgvVw3CkRvTsAq92n1dc3nVN5fNx15g04dhL0g3eDY2io1 + Y/kzrKNRxrK6hCV2HU3Jfs8D4oGcsKY9Fn1BtL8BdFYAdeAxc8k3/8GnlgmoV/rRnRtzEX71RK83 + q/35j1jKK/tNfdS/wHxvFQu8zWYlUXDW9bksHg7sbo6N3SRt2PrJsARffL9DrDcrdyBp38DPfCoQ + f38r2Swmai5/x1Nlu22jKaDFC7KovRDh5FTunBp3WwqCUKEqunLR3KVlKC+39YX4aUjBKD+NEX7X + P1r22UOfYzbeoDffTtS5uV7GErZW0h+9KQJDX0JBhkDfLQo1N09nmL/5Bhiu5VPjmqjD7tUYLSik + EpA6ZsqwO2VVDlKiT+h1KIaoS4ugBd/5or/1O3qBGcD0yV/oY5tifW0rGoKNadyRYFuBy0javwD4 + NMo3f94zFtZH/ud/aELW/pvXRgluwfP0x68vh2tg/PG/1tpHOvXUuP/lETKpQslI5AkatGqgI5k/ + 3cGiawuE9VPSyfztZ+xZ1go8v8cYeyipavbzb/GjD6minXuXFLrbg7g01m8+7Fxat9oN4g+j1AIX + nH3rrwVj66gYJY9b1L8uLw1WtpqSLd6fM4YsLtj7fJNjJMIzWPL3ywMavozUupb20AfSoYILtDn6 + 84vELv0S5kpIkbjrpuwPfwmGjYQku8uBcFtvEiSkfRIQHludJa8PhEuyAei1trU708ER4SSHB3zU + W06fV9Ks8BxeD1SP0rSe57UP4VooPFbf1gHM6LUUcvp4GGT51gfbA1mE9f1YYLuRlWi9kgGB4C5O + 9GAvtvu5yjoPz5pYY4zugst2+tOBumSH1LxHyJ0jyo9wauua7O3XNVtMO2tg7+V3IontAcxD/Jqh + ubwIvbc6ijrTqCXotpJIFc5FbHYvcw4vnNFj5z5u6mWWIw8qkt/Qg3cLszXnPR7IhVVS1XluMxI/ + xBBAKeapW5ApY5301mBNDlck2h8PrMn7JIJFbs7UiJ8kG03B9cDxWdzx8aPVbOVUmUBouguxfbPP + mKjWNxhGYEWQj0/ZrJOa/62fnz7U6zPUUjgEbfdnfuf8VDgwy94HrMe+566L/+bhKPgIl7zI6U8h + mhrw3oHDL78z9s3nIqGvHDvGEjG25ftY+vYDrGsvvp6PKioBMFaMxML29UXfXRTw43nuLaBgvb6R + Bb/8B9FI74f5+RbKXx6jaJtVQ0f3Q7k/12cNB3nc6lRO1QoWAxeixi1BtPiNGUL+teGpbQvD0Pey + IoLp824JTO59NpNJ6SFVFEjawWh1VlbtDF/K3qUG94CMvqdRA6p7fP78l7twyyPYJ3ZlYr+ePjW5 + vi0L1iVnozJc02hOHa2Rv36ILOUprpcj/tjw+rY+2Dm/d4xOJ92Tf/xJfTAB0N96JpxrUttQ02ht + xgDJ19gZ8c//rBN7QMDrJkedOVSzMWzPMdyOyoT2xn2pxy9fg21cNtP+vHPcHnGuBiEkFlW2mDHm + OfsZdpYAsMKLhT5eT08OjALpCfzsNLaquO1hcZgcwhwJuIvbXA1gb5wDko4XO1vEArbAvXAyteFh + cNnpLGvQng2dPkprqic+WlJZGe0r2fNmw358FBRJxMii6Zcf38x/+QXndfvSf/4QvJ6XHmvRvhtY + 4D0k6W7A5Ntv+2H83oe2m2Y0g9D55rVAg/0oQiKxswB6Im0SiWxMheL9Z2LdMPYV+OWH42mjusTM + ux7MZltgMy0k1ke58gLf+aFq7nhgTjiQ/3nf2mVZh+HKGgIDeTtj7CQkI6qSWFAUP5i8u+d9oD1M + uP03X9JDFdpRb5XXGG4dZ0fN4NG7bMcXJehasqBebD9gXiW1lW9pvKO6ewrAt7+uwB1Ch2rlNAHW + 8bsGfip0Ii/8Zjq7bo4N+PJq+s1/YHs+vnK4+YjoD+9dvFeWAF0cAT7l12Egx3qL4ErFnhpxiIf9 + GV9FuM5Cg60mr4cF7oIS6s3WJFs327G1GVMEI3nOqbbfpdGojXsR7urHEYlfXracuM4B37z8zW9P + 0FOtWIF9D05fvnZnzO65HIIXDfHXr331UirBTn1H1OCEsP7pgXQqh4rI85Vn6+/9LE5ikr3/rIcv + b7Z+/AEf4aq7c+ebKQDGjIkgVLk+7697B2qCpJGBuqCeOHbWwEswEI6mZpuRn14GhG/JJtKd+udX + 4I9H+8dmZfP97At/+MzmxuAw4c/Og/sxAdTyzoQxJeBz8PE39pdv7/W3U3UpLDUlxt4NfMD4q9d3 + lxT4y1N0iryBQLe95tRNPu9h0TYOAvBS6/T40XS2rHYfSycGKbVhcmRbAroQLgN5YG+gA+sfFkhg + 2PYJEoT9AkZwfcdgE79V6pq+r/M6fwqg3qEL9peDEs1uqCtw95JK8tRufs1AUNnAOJxe1Do5mr47 + inog/fR9zmPb/fILBRKXn7G3T/WMbu61DS89uyMAETeQjU57sG4yRIZZoe46n68KtNj1g621Z+54 + VK0S2l6c4JsalD+9QeCb12mmcChbrdsg/vIZxpxfRayMCg++L0WLoDFCl11lXQDvs5lQnz/J7Mej + gZvUzc+/RuyRqSncZ1eFfv25y3/rWT6cT3eMkiCrd+mIE7BjoMX6+/OqWXWTX/CrF4QT4ZbNh+3m + 9ocXWf1QZXMfbC3ocXBPvTPl2Ki3XAo43q7oob/OP55syGopqPSX10lTAQPoUS1SWynf7penoD9+ + 6dSdrXoXk1SERg71n37UfSY4DfzuT2C9I162Oz1CBxo599O/dlhF5hB4E5cEF8MCAG2WbIR5sb38 + /HJEj2pbwN4r7ljV9B2YuQeCkmJtEsRzjxysxZ4UkJ6klSoXdacT64RmcNw8O3pQjIFR7TlDuM2s + FStqumEL3KUV7EygIMsbXwPtD3tJEh6BjP0xnPSfvsBbAkpSfCIuY0HoJ0AXLh2av3x6Fpw1he5B + 1bC2zPMwvgXqwaOPY4pbfxvN8f4kyE8zeFFzIyqR8Ov/H1+2qdL2ZOgVWw3+jDfubyXaYWfiwIdc + P9SrpHR4PbeqAdqoGagenHWXLP7E//whPpyTKWNfvyh/88mf/YWV2h4C1s4LsDMgqtP4YSXQo/mZ + BhMph13Udx7cbZMNgS/ryabuo4jwiFyMcXRTsuWbp6BUPg/kQ1EdLQFNGrgxrTs2pE8E+tsaSvAy + te6Pp4AJLoMB9XiDsdtMhb5uu30KN9GxR2y6JMP84rcB1IVzh7oqx9my9ZAnhfvXif78ycKUjQPS + zb7+7S8w8kQ3AkVJawjvbIp6XuUlgbzURmQP19plV9aMsBssHZtA93Xy88O/vK+ViaiP0r0U5K8f + psahcKNZcKQbbFwUY6x2oT6/KBbAb/9HxyWuZ+mZVjByKo/65oV3R/zwNPjjZUyITsMo908e3LVD + S/g4pDWTkOj94Y+XdHOpZ16wbLgRhSOST/CarSMnCWDevp/426/An/xvc+1Av+uVzXIj2dI+Fy5Y + iY7IpZddj8D+qhK0+er7aqR7CAso+kRsmr6epQu7yTW2Ttj/wCaaP5z1gostEqpc0hHQIuo18OXZ + ZJGO/cDsXsjB8T4w0hqqGHV0k3I//4rV6D5n9P1CEjjs9SO27mrGGA6iFGZw80Q722U1WY6cLe2U + LsV2RdRhXvkcwfAyZjTcbnBGfzx4t26m335a9NvvBV8egY+k58A01GYKv/yY+rMj6N3LA9ZPn7AD + d80wbMWbAr/9AvuqoLBp3r1n+M1zROhj7+tPrQpu1BNP3nZaRUtyaEZ4xK+G/Pj1ck2mAv78uTek + gt5utrMo/fTLFHiSrVn/GeUfn4y/ej0XVdjCe5xLGD0kTWeKb8ywAsz4+jPObVM8a/CsSTVG/srp + 0zBWJfyUq0Qkv/m4q+ynrz/1AGrPYaszxC8Q5I8EzdrZ0dmLditYrhcfW5X/YqOPDhW8XVf05XOb + aC3sxYITjD2qLM1xYCu1R2guDUFSv9tnP14n9/dbivFxkw5ffbaBwCqLKi/j5tIHbsr9ZVOpWHck + oLPrPnZk8zsT6BNx0dgdzyOA1tum+uodwWrdakn8+3cq4D//9ddf/+t3wqBp78X7ezBgKpbp3/99 + VODft/vt3zwv/JsKf04ikPFWFn//81+HEP7uhrbppv89ta/iM/79z1/bP6cN/p7a6fb+fy7/6/td + //mv/wMAAP//AwDOXgQl4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c09bf823fb50b70-AMS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 17:29:41 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-kd1j0bcill5flig1m29wdaof + openai-processing-ms: + - '69' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 555d4ffdb6ceac9f62f60bb64d87170d + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml new file mode 100644 index 00000000..b6d6a75b --- /dev/null +++ b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml @@ -0,0 +1,1484 @@ +interactions: +- request: + body: '{"input": [[5289, 564, 71, 773, 89, 2332, 19747, 87, 664, 71, 8311, 85, + 392, 25299, 11032, 940, 89, 85, 398, 80, 73, 5481, 48558, 86, 1919, 1634, 8890, + 5346, 44185, 53060, 450, 392, 68273, 64145, 511, 77, 80, 3696, 71, 2042, 28990, + 44490, 19456, 89, 21807, 44490, 6331, 3180, 23361, 71, 455, 76, 583, 87, 21436, + 34695, 352, 70, 953, 278, 59524, 2857, 93000, 641, 6043, 73, 70, 36463, 7141, + 21807, 391, 1273, 8536, 566, 71, 7513, 580, 263, 936, 10333, 86, 983, 822, 9700, + 73, 3316, 80, 1320, 365, 27278, 564, 67, 12591, 3576, 36851, 2465, 1739, 657, + 6375, 89, 23361, 6885, 352, 88, 14046, 292, 70, 32204, 648, 300, 73, 32547, + 869, 20854, 28679, 89, 11805, 70, 9081, 80, 11251, 87, 28679, 6194, 85, 906, + 64, 300, 16501, 70494, 6511, 51622, 87, 80, 8462, 295, 13536, 49463, 604, 64, + 333, 79, 302, 5469, 36068, 267, 5289, 19870, 911, 71, 587, 4912, 32868, 64087, + 22650, 89, 3368, 83, 50798, 391, 273, 365, 72, 1233, 604, 3858, 48380, 710, + 6375, 93000, 3013, 427, 7796, 66, 11160, 77, 10910, 85, 4777, 5969, 276, 7717, + 87, 71, 21325, 80, 73389, 3576, 86, 29238, 544, 1026, 31019, 20218, 3524, 64, + 7270, 7783, 80, 1498, 77, 1739, 5979, 327, 49450, 1791, 36409, 86, 582, 894, + 73, 85, 89, 2174, 87, 40494, 20824, 40078, 89, 80, 4317, 1609, 85, 3601, 73, + 86, 36068, 894, 56871, 89, 4680, 67, 2562, 1494, 6331, 16017, 2332, 21046, 86, + 11823, 18854, 85, 10616, 85, 23913, 73, 86682, 441, 1494, 360, 1978, 3013, 2562, + 511, 86, 427, 5298, 81, 2580, 89835, 14694, 86, 3059, 26738, 65736, 84, 1394, + 4458, 677, 85, 3368, 71, 15479, 391, 713, 35531, 566, 94869, 325, 14029, 52360, + 80, 406, 1290, 10616, 79, 773, 3315, 365, 89, 73, 2111, 13045, 73, 89, 14694, + 710, 95210, 1804, 79, 20824, 80, 4086, 19555, 936, 1091, 301, 936, 1494, 73, + 10784, 1029, 77, 1302, 1310, 86, 80, 24752, 89, 73, 89, 1302, 343, 38977, 324, + 25457, 9902, 88, 4168, 67, 3841, 953, 96518, 4777, 3696, 80, 61057, 90563, 80, + 20127, 86, 3418, 24672, 80, 796, 88, 85, 1026, 80, 8910, 285, 7864, 406, 14694, + 454, 361, 85245, 83, 664, 43210, 3368, 27278, 9832, 89, 5010, 89, 762, 86, 19747, + 299, 23808, 69057, 80, 4922, 20211, 71, 73, 404, 89, 1395, 830, 939, 5298, 402, + 83, 80, 70, 1631, 97582, 3306, 7387, 73, 89, 2034, 66, 3906, 53060, 339, 343, + 582, 2465, 80, 52926, 2642, 1739, 69057, 3906, 13649, 85, 3178, 291, 66, 267, + 67910, 24672, 2201, 2933, 34175, 4777, 23361, 10784, 73, 764, 1517, 87, 12825, + 49755, 24752, 89, 265, 307, 85, 80, 689, 332, 1331, 76, 1031, 1658, 325, 2303, + 36851, 87, 8892, 365, 87, 402, 17606, 80, 12407, 4341, 85, 78498, 370, 3074, + 455, 44183, 79224, 830, 20811, 906, 361, 73, 2580, 80, 359, 258, 10753, 2329, + 37406, 50139, 67702, 10616, 56029, 7513, 2438, 402, 3258, 80, 22491, 4748, 359, + 41129, 2034, 11032, 1658, 65, 3714, 65, 1919, 40153, 363, 2580, 52333, 73, 87, + 80, 45015, 4086, 3141, 69, 392, 76, 19747, 21486, 708, 88, 1273, 21486, 85, + 764, 67, 16373, 4223, 641, 53060, 784, 8977, 73, 85, 1739, 6043, 321, 89, 18571, + 49450, 32589, 450, 88, 2303, 62497, 49595, 80, 49755, 1494, 80, 302, 2689, 87, + 29238, 89, 1551, 8088, 76, 20926, 87, 2034, 66, 785, 22873, 53060, 8480, 708, + 88, 906, 73, 4680, 80, 22119, 89, 398, 78498, 1919, 67, 64087, 318, 24409, 869, + 754, 20990, 1498, 23760, 648, 728, 89, 22619, 29224, 1216, 2933, 85, 1386, 8587, + 7737, 87, 17043, 24990, 70561, 47171, 6375, 67, 1073, 85, 89835, 3933, 85407, + 564, 38359, 3647, 60338, 89, 17008, 10079, 80, 385, 1394, 32837, 1247, 14170, + 74, 936, 1658, 87, 73, 365, 73, 21807, 76, 50139, 2629, 20568, 80, 1609, 5241, + 21206, 96518, 301, 36722, 2249, 74, 581, 75, 641, 1494, 31019, 6780, 967, 361, + 288, 77, 49755, 1631, 73, 11160, 20990, 876, 4511, 69, 675, 78, 352, 15809, + 363, 65, 9110, 20990, 2034, 82, 544, 28546, 939, 73, 37227, 266, 70, 3013, 7911, + 365, 14625, 65, 3841, 62559, 69, 2658, 76, 2302, 41922, 383, 347, 75, 3172, + 784, 634, 89, 26554, 74, 75406, 544, 71, 1146, 20926, 10784, 32589, 71, 894, + 64, 2319, 10784, 509, 15720, 327, 2701, 45015, 89, 344, 9810, 71, 344, 29700, + 2194, 5308, 303, 85, 23361, 402, 86, 939, 283, 6870, 2005, 73, 1919, 5308, 858, + 18854, 74, 11032, 911, 65, 80, 10784, 3576, 4223, 5289, 69, 2249, 37406, 44508, + 89, 2739, 80, 70, 80, 10462, 80, 23001, 31019, 80, 40429, 9092, 19072, 80, 56102, + 15444, 598, 30986, 85, 23866, 3933, 67, 1494, 86, 21325, 73, 2580, 18571, 675, + 19239, 876, 352, 24990, 87, 1336, 14029, 78, 762, 9258, 2319, 9081, 87, 24244, + 50536, 15720, 668, 85, 359, 86, 652, 70, 73, 70, 675, 355, 1725, 3696, 65941, + 295, 93797, 9081, 1273, 49891, 830, 581, 360, 302, 89, 14878, 87, 80, 73, 44067, + 86, 80, 5544, 49450, 7141, 65, 2465, 70, 344, 49463, 303, 10616, 3172, 906, + 74, 1658, 9832, 88564, 65, 80, 1216, 69, 89, 58153, 9832, 85, 4168, 32204, 2067, + 2465, 773, 73, 28774, 89, 5308, 18571, 49463, 300, 664, 14694, 3178, 71, 38844, + 80, 876, 13423, 65, 1146, 87, 34175, 327, 1394, 68916, 302, 74, 78498, 22016, + 450, 65, 2005, 7141, 80, 352, 31764, 73, 49865, 641, 16684, 17692, 2067, 9697, + 28679, 73, 85514, 80, 337, 1804, 939, 84, 80, 906, 15618, 69, 8671, 894, 385, + 89, 318, 13028, 18458, 70391, 89, 12328, 89, 5010, 49404, 285, 764, 32868, 3423, + 70, 5192, 73, 70, 80, 24244, 566, 3172, 346, 31764, 89, 1273, 75, 3427, 87, + 80, 38361, 1631, 4714, 78930, 21807, 24990, 40078, 86, 3906, 1310, 70, 406, + 80, 360, 1100, 87, 2032, 73, 49865, 668, 37406, 6358, 24244, 6634, 265, 1100, + 64, 16711, 822, 80, 52926, 8910, 3059, 1247, 4922, 87, 24244, 16400, 1729, 70, + 1320, 427, 85, 355, 22524, 42908, 1729, 8510, 80, 89, 26929, 1897, 5230, 363, + 80, 858, 89, 80, 64087, 637, 80, 48480, 69, 18577, 2303, 307, 818, 41129, 953, + 66, 2912, 2067, 73, 822, 8671, 1247, 22619, 1073, 383, 67665, 8212, 752, 10759, + 23866, 71, 1026, 89, 26929, 50139, 71, 16274, 8462, 82, 764, 391, 318, 74, 2465, + 9832, 3013, 713, 16400, 89, 1065, 86, 39911, 74, 773, 81, 2042, 77, 1029, 9701, + 22524, 80, 24672, 71, 22619, 76, 65653, 2034, 73, 581, 4748, 17008, 1320, 6634, + 1498, 85, 80, 73, 85, 402, 67800, 637, 77, 820, 302, 12082, 4657, 831, 437, + 74, 31881, 89, 11328, 9117, 80, 6358, 343, 363, 5964, 65, 546, 85, 32478, 1498, + 87, 84, 454, 295, 3933, 88, 762, 1394, 69, 677, 96518, 88, 25299, 1098, 764, + 3919, 447, 82649, 3423, 70, 566, 2249, 89, 302, 2201, 88, 983, 1498, 1320, 333, + 3368, 34175, 84, 392, 87, 71, 16400, 93000, 8186, 27752, 74, 2465, 2642, 7141, + 74, 11823, 11927, 80, 18499, 454, 85, 9907, 1662, 4171, 12273, 2706, 36325, + 89, 7387, 65, 4922, 20926, 89, 23576, 73, 27515, 80, 86611, 9991, 333, 343, + 2881, 20990, 3423, 540, 74, 796, 3013, 89, 85, 36068, 14485, 10056, 14878, 27515, + 2465, 1026, 89, 2332, 303, 76, 28927, 43210, 3306, 81, 1658, 3576, 327, 292, + 76627, 3415, 351, 75, 447, 89, 2302, 291, 75, 4086, 53289, 21703, 89, 392, 2042, + 677, 3013, 70, 73, 784, 89, 14878, 6358, 81, 17156, 78, 582, 24752, 9996, 68054, + 2312, 18571, 906, 830, 76, 344, 263, 87, 4680, 83, 12498, 65019, 4912, 65, 346, + 10644, 75, 9902, 45228, 564, 73, 13423, 6331, 39911, 24235, 774, 74, 6789, 85, + 1919, 441, 4748, 273, 28679, 25299, 41922, 1073, 68475, 18571, 3978, 89, 675, + 66, 1897, 361, 37406, 9810, 53835, 66467, 87, 74, 1729, 894, 61057, 564, 81, + 27515, 728, 86, 2562, 652, 73, 1336, 2739, 675, 71, 1897, 869, 677, 675, 86, + 41796, 73, 637, 80, 24752, 2319, 1498, 8671, 28996, 13671, 9110, 375, 450, 1347, + 69, 689, 72, 1100, 51318, 1609, 2042, 69, 303, 3933, 69, 1347, 8318, 86, 3647, + 74, 710, 85, 4680, 24012, 278, 89, 70, 788, 7911, 28679, 70226, 41796, 2174, + 57208, 11328, 292, 5308, 21888, 13536, 5308, 982, 65, 375, 820, 564, 32962, + 8998, 2438, 74, 15479, 51622, 8587, 283, 2303, 67, 2857, 347, 372, 11823, 65, + 303, 631, 77118, 92216, 3696, 65, 80, 2329, 4341, 74, 6200, 73, 70, 12591, 87, + 80, 39911, 9902, 1320, 7387, 838, 56871, 61318, 86, 34175, 12328, 46082, 73, + 87, 4341, 75406, 36825, 80, 16284, 12940, 13752, 70, 73, 88, 5544, 67, 75092, + 30683, 80, 566, 65, 675, 84, 70678, 375, 511, 70, 74, 50900, 2465, 88, 9081, + 70, 324, 80546, 7387, 24672, 7501, 87, 4511, 38977, 69, 73, 27278, 303, 2025, + 5979, 4317, 915, 14170, 80, 9978, 12825, 32204, 1310, 14878, 89, 13423, 11032, + 27515, 13536, 80, 7369, 86, 339, 14878, 89, 28531, 2303, 8587, 73, 85514, 501, + 80, 501, 427, 4777, 45228, 939, 85, 774, 87, 6789, 4867, 19456, 71, 6885, 455, + 75, 728, 65, 303, 675, 81, 3172, 72, 20629, 72, 587, 302, 86, 89, 3696, 86, + 8480, 49463, 869, 295, 73, 385, 80, 73, 1658, 13183, 87, 94908, 8480, 3601, + 1546, 99355, 66, 6194, 5192, 77, 376, 73, 88, 1729, 85, 329, 19285, 31268, 406, + 641, 1463, 76, 392, 27780, 10470, 347, 1029, 22524, 376, 69, 22016, 86, 784, + 288, 36851, 85407, 10056, 87, 6870, 9092, 292, 84, 1354, 70, 604, 723, 876, + 80, 51759, 1320, 4922, 2171, 66, 22119, 85590, 85, 365, 65, 12328, 69, 283, + 73, 86, 67800, 713, 309, 5308, 8892, 84, 554, 2329, 60933, 80, 74, 332, 86, + 5308, 2042, 83, 652, 39152, 275, 69, 8783, 73, 28774, 24672, 87, 3893, 370, + 79, 1662, 81, 24752, 3178, 20824, 71, 31019, 80, 78, 80, 21659, 1026, 301, 675, + 2701, 76, 8671, 51622, 4214, 8311, 87, 80, 295, 12825, 89, 303, 5964, 92512, + 784, 74, 99066, 31019, 97523, 454, 93000, 8671, 581, 50922, 80, 634, 77, 3893, + 18854, 19239, 18631, 75, 50139, 7488, 455, 41129, 7453, 80, 4171, 86, 16618, + 25105, 4341, 10776, 5683, 360, 9810, 89835, 1791, 31881, 70, 7050, 34106, 12825, + 17134, 983, 27515, 74, 34175, 8050, 7488, 10910, 84, 354, 299, 2332, 87, 18244, + 85, 441, 710, 13423, 2332, 70, 2032, 68, 2319, 73, 89, 764, 1604, 35555, 39152, + 40078, 8050, 73, 2234, 65, 575, 1800, 38361, 73, 10616, 14878, 49450, 3919, + 20369, 6375, 1725, 86, 8462, 325, 49755, 347, 83, 73, 87, 7513, 89, 830, 12328, + 71, 8890, 97523, 303, 1631, 83, 70391, 29684, 1031, 87, 752, 31881, 1320, 4239, + 1347, 784, 49450, 3576, 6591, 70494, 664, 2203, 2329, 86, 80, 36708, 80, 70981, + 85, 1764, 80, 7047, 796, 78, 1073, 263, 7501, 450, 375, 16820, 73, 1310, 25457, + 80, 7488, 65019, 1026, 355, 73, 14606, 89, 325, 295, 447, 73, 291, 69, 89, 36238, + 911, 66, 14092, 3013, 344, 77, 398, 4086, 88, 12412, 53835, 7387, 66, 1551, + 87, 73, 604, 73873, 1631, 1517, 318, 13009, 74, 376, 89, 8050, 664, 4193, 2000, + 87, 74, 4086, 454, 8890, 73, 89, 32547, 3919, 10333, 554, 63643, 89, 327, 80, + 784, 88, 3988, 13884, 664, 73, 88, 22119, 10196, 80, 8088, 41796, 343, 85, 28679, + 65, 8998, 11032, 1662, 361, 56029, 3368, 36851, 72245, 2034, 14878, 87, 2319, + 69, 73, 4942, 80, 70, 34097, 67, 406, 62497, 17156, 869, 1546, 7141, 10616, + 80, 1729, 87, 68156, 1662, 10630, 4922, 15930, 11160, 55109, 74, 25299, 73, + 59524, 579, 12825, 86, 822, 454, 77, 4171, 58305, 1517, 12498, 77584, 5192, + 31764, 3716, 675, 383, 21486, 291, 8998, 11823, 321, 19639, 8587, 309, 66565, + 3635, 3013, 89, 906, 3418, 7759, 18894, 89, 762, 275, 1658, 3933, 398, 22016, + 337, 747, 2249, 56029, 566, 94869, 2249, 4458, 670, 65, 16220, 89, 8050, 8587, + 85, 80, 301, 74, 511, 266, 79, 6194, 331, 87, 540, 6410, 1494, 1273, 4484, 4341, + 1073, 283, 11377, 84, 3935, 89470, 80, 70, 292, 76, 1105, 2642, 20824, 87, 32837, + 303, 12825, 4942, 406, 80, 74901, 44067, 89, 21206, 15618, 1065, 77, 2445, 2580, + 73384, 5010, 38181, 1065, 354, 77, 5972, 664, 82999, 689, 2302, 1910, 74, 1791, + 258, 1810, 71, 7006, 85, 953, 8587, 80, 501, 377, 77, 2739, 361, 25123, 54761, + 2005, 307, 96518, 22619, 1494, 76, 1146, 18442, 14878, 2201, 30148, 11949, 85, + 13092, 728, 1065, 30506, 29684, 14878, 13183, 14878, 65, 8088, 86, 19870, 86, + 544, 85, 11160, 1498, 69, 89, 5104, 71137, 28774, 331, 1031, 79, 91410, 80, + 29224, 49450, 93000, 5964, 1394, 8890, 84, 80, 42908, 10176, 953, 11251, 24672, + 87, 355, 55233, 25100, 74, 4511, 16400, 20211, 17137, 3306, 20990, 372, 13855, + 309, 89, 47689, 2701, 1910, 20127, 363, 80, 354, 637, 9117, 329, 50864, 427, + 80, 278, 89, 708, 343, 62369, 1302, 16400, 73, 20191, 3576, 16618, 70, 302, + 69, 392, 8480, 764, 65, 80, 321, 377, 675, 6489, 3059, 84, 80, 85514, 4223, + 69, 12328, 74, 6634, 6358, 17043, 1739, 89, 21807, 641, 67492, 1336, 49595, + 4341, 17008, 710, 9701, 87, 797, 92869, 5418, 2469, 60013, 377, 564, 28679, + 1897, 50139, 418, 87, 11253, 329, 78498, 265, 4239, 377, 2329, 20926, 49463, + 2580, 8212, 283, 3841, 46894, 762, 86, 7047, 462, 80, 47928, 80, 1331, 85, 451, + 20568, 80, 3172, 27093, 43210, 2445, 40078, 1026, 89, 80, 359, 75, 360, 7501, + 5469, 710, 6358, 677, 74, 5469, 80, 5192, 71, 14029, 94055, 325, 8890, 383, + 52333, 85, 2008, 74, 7488, 36415, 12328, 74, 713, 80, 2302, 41622, 11253, 80, + 784, 89, 501, 4937, 21206, 89, 3013, 74, 710, 86, 564, 1065, 81, 27455, 359, + 81, 708, 347, 70, 13183, 2554, 88472, 28679, 80, 1494, 82, 8480, 7439, 86, 652, + 34175, 74, 13423, 39911, 455, 21807, 953, 66, 4223, 14460, 81, 38359, 93000, + 14736, 66, 7759, 68, 3524, 336, 71, 61952, 329, 7369, 28927, 581, 77, 303, 42818, + 331, 14606, 76, 3172, 1026, 11089, 52971, 80, 20824, 73, 16276, 6194, 89, 18577, + 677, 70, 331, 1551, 87, 86, 7387, 773, 37406, 2411, 355, 355, 4912, 85, 80, + 1273, 39911, 31721, 9907, 376, 20191, 406, 2249, 869, 14170, 1336, 2438, 1551, + 1347, 8088, 86626, 80, 2303, 80, 10616, 10776, 65019, 21659, 41196, 333, 989, + 8050, 10011, 581, 509, 67, 3714, 1026, 80, 69, 1804, 75, 13565, 31881, 89, 352, + 87, 68054, 363, 84, 3178, 2727, 710, 42848, 77827, 2303, 72, 332, 784, 273, + 45015, 5230, 3601, 292, 454, 36722, 566, 343, 318, 1864, 2042, 2554, 333, 44508, + 87, 85, 48029, 20127, 3172, 953, 71, 29684, 1310, 69, 657, 12825, 34175, 329, + 3178, 66, 2465, 80, 34175, 73, 360, 283, 73, 20211, 830, 8088, 85, 80, 17851, + 73, 11377, 1030, 69, 1498, 85, 4086, 8050, 67, 10616, 45283, 86, 5481, 80, 38359, + 22580, 84, 7453, 581, 830, 70, 4168, 675, 89, 70, 1919, 75, 604, 74, 664, 12591, + 41625, 31603, 675, 52926, 35401, 17008, 39152, 581, 30407, 9802, 89, 38977, + 6151, 86, 39749, 52767, 24672, 89, 12082, 73, 88, 4341, 73, 1026, 16400, 12825, + 16373, 564, 4657, 80, 354, 8462, 14029, 2003, 53060, 80, 17708, 1609, 87, 959, + 69, 754, 89, 2957, 544, 19456, 57208, 20211, 9991, 71, 18244, 74, 4355, 33863, + 16400, 81, 10784, 69, 575, 7783, 1804, 89, 4010, 798, 5544, 1273, 69, 19456, + 89, 7047, 4835, 65, 80, 19747, 455, 6194, 1336, 76, 75406, 3055, 587, 70, 80, + 4086, 10776, 85, 11828, 73, 451, 42908, 2249, 73, 50139, 1336, 79, 5735, 566, + 3906, 73, 614, 87, 86, 1302, 352, 70, 80, 302, 83, 14912, 80, 70, 20926, 23866, + 2562, 83, 16468, 277, 10002, 2562, 15930, 73, 6780, 648, 4458, 70678, 22619, + 76, 579, 74, 19072, 84, 566, 89, 784, 12591, 363, 47689, 383, 1263, 86, 58305, + 371, 76, 2200, 2203, 5010, 3919, 67, 13028, 346, 582, 5241, 71, 2042, 4239, + 73, 7783, 1729, 70391, 2249, 15930, 2727, 2005, 4912, 46341, 1216, 75406, 14736, + 69, 332, 18596, 347, 75, 3933, 3055, 27960, 86, 89, 265, 1100, 9810, 1100, 869, + 36238, 1804, 85, 12328, 12825, 80, 32962, 39911, 8385, 80, 93797, 14029, 77827, + 418, 2303, 361, 65019, 954, 86746, 953, 52971, 540, 3716, 363, 89, 4680, 80, + 8385, 318, 544, 5683, 1791, 80, 1498, 79, 316, 10196, 80, 14088, 347, 3696, + 80, 4991, 83, 2174, 50226, 65, 89044, 17692, 7911, 80, 5104, 4381, 752, 32204, + 1331, 66, 3306, 21486, 73, 88, 1320, 75, 20824, 87, 11823, 88, 2701, 5010, 87, + 86, 5192, 32962, 784, 3423, 4055, 1498, 3178, 301, 80, 3716, 1216, 818, 48380, + 8035, 285, 588, 65, 540, 15770, 83, 7387, 785, 86, 3576, 2302, 52097, 7910, + 88, 38359, 45015, 13536, 3059, 66443, 10462, 80, 1498, 21325, 73, 1729, 4777, + 22619, 73, 28990, 89, 29700, 3427, 728, 344, 11805, 267, 86656, 392, 7513, 831, + 7910, 391, 33539, 9117, 300, 4341, 8783, 869, 2203, 2249, 85, 2042, 1631, 37406, + 2329, 70, 80, 3447, 1662, 13223, 57208, 11089, 65019, 73, 10776, 81, 302, 4168, + 1411, 450, 49712, 80, 8050, 3251, 3423, 53016, 70, 447, 86, 2117, 352, 48045, + 17063, 86611, 1310, 89, 587, 652, 32962, 60013, 3933, 14170, 87, 1031, 97523, + 675, 44067, 5104, 77, 45015, 333, 8050, 70, 80, 525, 404, 87, 392, 41796, 69, + 24752, 3696, 441, 30986, 49463, 21436, 67, 7387, 44508, 87, 3055, 675, 14606, + 69, 894, 2715, 84, 83438, 73, 5673, 6358, 354, 2445, 34854, 32547, 73, 23361, + 5308, 4086, 1631, 1551, 68467, 77, 45015, 73, 89, 70, 6733, 447, 333, 3647, + 66, 22119, 535, 73, 86, 1498, 86, 4080, 88, 73822, 365, 41982, 454, 20127, 80, + 1336, 392, 830, 75, 10776, 6723, 80, 74, 2580, 51924, 40261, 2042, 19275, 983, + 86, 869, 82, 62497, 86611, 822, 64, 5171, 73, 6292, 19571, 1302, 27093, 370, + 79, 14773, 447, 61057, 89, 80, 70, 83256, 80, 28546, 89, 80, 85369, 28679, 74, + 3418, 283, 74, 89, 3368, 85, 953, 87, 352, 15618, 23265, 3576, 625, 784, 65, + 318, 1197, 359, 88, 4171, 44567, 71, 46341, 2032, 16373, 354, 88, 78498, 10910, + 3906, 85, 354, 64087, 2445, 85, 17134, 6511, 6375, 89, 19747, 65, 80, 581, 87, + 1289, 31721, 8934, 752, 12591, 11949, 80, 1634, 12082, 333, 89, 33863, 24990, + 5308, 67, 3841, 12328, 44970, 73, 25457, 85, 764, 8671, 35401, 2067, 73, 42848, + 454, 80, 89, 1347, 4835, 89, 19239, 80, 31695, 511, 8480, 309, 5308, 77, 2329, + 22119, 73, 8890, 1411, 1273, 69, 2302, 61828, 75, 13856, 5230, 33721, 1030, + 327, 20191, 16276, 52926, 24244, 71, 876, 76, 1658, 52971, 74, 402, 69, 418, + 42848, 73, 6358, 38828, 28774, 302, 79566, 49463, 79, 1791, 85, 8998, 52097, + 80, 30506, 564, 89, 641, 375, 84, 2580, 41221, 4991, 36851, 8050, 796, 2332, + 87, 74, 18571, 906, 288, 360, 6634, 85, 80, 73, 9112, 1910, 74, 10056, 86, 85, + 12940, 51622, 1130, 86, 16618, 344, 73, 11823, 351, 3315, 2319, 22580, 37762, + 80, 7521, 81, 376, 79, 24286, 337, 71, 9907, 8480, 94055, 3906, 1395, 80, 69, + 9117, 85, 41796, 18442, 71, 44508, 339, 38359, 33367, 8587, 77, 7501, 5298, + 5241, 5701, 86, 14668, 5455, 45015, 37975, 71, 632, 8890, 27515, 28679, 2580, + 50536, 1347, 6200, 26397, 376, 8462, 73, 998, 295, 346, 88, 20990, 18442, 87, + 1336, 74, 17134, 87, 73, 70, 80, 14170, 87624, 85, 3696, 69, 4239, 86, 85, 40924, + 74, 818, 69, 75406, 24244, 566, 32962, 80, 675, 73, 88, 68054, 2249, 65, 344, + 2465, 652, 3306, 82999, 78, 7453, 363, 27278, 41287, 427, 89, 324, 1331, 484, + 69, 2034, 3418, 652, 32962, 70, 10470, 71, 3906, 28679, 45062, 2465, 20926, + 17008, 876, 1631, 544, 87, 14912, 27093, 18287, 85, 32204, 35734, 69, 1031, + 73, 566, 80, 2676, 72, 5308, 42528, 17008, 3906, 60013, 33367, 28774, 85, 3315, + 64, 316, 89835, 89, 4171, 76, 1551, 20990, 89, 316, 713, 80, 1100, 20824, 19456, + 80, 70, 21620, 77, 8892, 12407, 455, 86, 93000, 34152, 275, 73, 10036, 9490, + 352, 83, 6632, 278, 377, 65, 2554, 337, 309, 3919, 74901, 18442, 22506, 2580, + 7501, 74, 80, 40229, 12825, 86, 5972, 85, 8212, 336, 3576, 6151, 80, 4942, 74, + 87, 80, 25337, 71, 2034, 80, 3933, 5972, 13536, 11285, 38361, 89, 80, 38844, + 89, 2562, 89, 89687, 1551, 14694, 14606, 59524, 1662, 81, 764, 278, 59524, 652, + 3013, 747, 454, 25299, 87, 1551, 2465, 9081, 67800, 73873, 450, 339, 76627, + 53450, 89, 65116, 511, 747, 360, 89, 677, 86, 49568, 6511, 8587, 8587, 5781, + 9081, 89, 4168, 83, 80, 23001, 55282, 831, 10616, 406, 3633, 2249, 18854, 830, + 80, 74, 501, 363, 3696, 1791, 85, 28927, 17207, 73, 16820, 80, 28927, 441, 89, + 15479, 86, 327, 76, 1897, 575, 69243, 4991, 657, 70, 32589, 83079, 36463, 89, + 2249, 677, 86, 1919, 20510, 85, 383, 20103, 352, 73, 85, 8783, 78, 80, 46341, + 6358, 764, 78, 28679, 85, 89, 28774, 69, 33863, 20211, 93141, 2201, 71, 869, + 76, 447, 8311, 1897, 85, 89, 80, 23361, 72801, 4748, 11949, 544, 637, 86746, + 1910, 20211, 6200, 86, 437, 52552, 66, 60933, 1872, 14088, 501, 59524, 80, 70, + 10056, 582, 14170, 69, 20926, 752, 1411, 91943, 14170, 89, 3172, 13423, 59524, + 564, 343, 3172, 67702, 25299, 2034, 73, 1100, 80546, 1658, 265, 773, 65, 16400, + 86, 830, 87, 20926, 509, 44183, 73, 299, 71, 78512, 9112, 1804, 80546, 19747, + 80, 634, 80, 1082, 85, 5972, 13423, 266, 15618, 86, 16400, 64087, 321, 1551, + 277, 80, 3893, 68054, 708, 68, 354, 23361, 65, 18577, 79, 2303, 73, 57208, 1910, + 89, 17043, 1910, 302, 17063, 73, 377, 12407, 84, 7488, 55233, 50798, 398, 73, + 10776, 49595, 80, 3632, 5308, 16276, 50676, 454, 44970, 2329, 22119, 72801, + 2319, 70, 785, 71, 18244, 74, 28679, 87, 1072, 383, 89, 15720, 85, 1286, 8587, + 57208, 27278, 668, 20720, 546, 3647, 73, 361, 9490, 1919, 95328, 85, 10084, + 67, 1026, 20824, 25457, 17008, 85, 5683, 347, 343, 784, 544, 998, 2303, 25453, + 81, 2171, 86, 441, 85, 89, 8910, 30407, 73, 3251, 70, 5469, 73, 11377, 355, + 392, 83, 73, 6194, 87, 80, 604, 983, 3178, 87, 587, 86, 664, 47689, 269, 75, + 80, 13637, 283, 73, 9701, 73, 9902, 4835, 41796, 4171, 74, 16400, 79939, 45015, + 43505, 80, 17063, 85, 9112, 88, 39911, 5721, 89833, 73, 4748, 85, 5298, 14029, + 376, 13694, 65, 4341, 2067, 76423, 89, 3251, 85407, 418, 9701, 300, 13009, 85, + 40185, 754, 1169, 61057, 9832, 77, 1215, 87, 18571, 89, 22580, 42583, 2329, + 85, 6331, 67, 4223, 69013, 376, 8480, 86, 1331, 56201, 5104, 983, 77, 1517, + 65116, 45228, 983, 77, 337, 80, 81606, 65, 336, 83, 1336, 77, 564, 21068, 80, + 9810, 85, 1336, 2562, 346, 1463, 85, 30407, 1216, 28927, 266, 58153, 7501, 869, + 87, 44508, 94869, 302, 77, 936, 26738, 20103, 50798, 9112, 71, 1725, 4575, 33963, + 2562, 343, 67, 3576, 17692, 76, 16400, 2465, 9907, 283, 27093, 74, 939, 339, + 73, 89, 1725, 31881, 74, 1800, 87, 2203, 70, 3306, 785, 74, 89, 42908, 11032, + 2857, 69, 406, 65019, 73, 613, 427, 62497, 70, 7501, 67, 4010, 89, 8998, 75, + 2857, 84, 1509, 36825, 1631, 88, 85, 14029, 23001, 7737, 80, 5010, 41796, 10176, + 20127, 19239, 74, 2303, 4341, 11823, 329, 77, 1031, 80, 4777, 406, 1677, 87, + 1065, 69, 73, 283, 9907, 89, 21206, 88, 73, 22119, 23866, 80, 3251, 73, 49865, + 652, 728, 55442, 392, 86746, 7737, 85, 49755, 351, 14172, 83470, 3423, 74, 3919, + 23048, 1889, 80, 70, 20553, 89, 3601, 307, 49463, 2067, 509, 53060, 773, 10002, + 86, 32589, 544, 75, 59524, 11823, 486, 88, 73, 1662, 301, 7387, 344, 360, 86, + 24468, 677, 42528, 360, 85, 1310, 70, 74, 2438, 1897, 89, 278, 41796, 3696, + 86746, 22619, 85, 2067, 69, 73, 88, 23361, 84, 8587, 983, 2332, 80, 450, 27093, + 88823, 8783, 73, 47689, 540, 14460, 73, 511, 90906, 69, 1026, 331, 28927, 11949, + 30797, 1791, 76, 28927, 5298, 4777, 258, 76, 4168, 70, 540, 11032, 344, 87, + 80, 301, 1662, 49595, 17134, 87, 80, 11160, 398, 80, 89, 6331, 77, 43686, 51622, + 831, 1247, 88, 8892, 19639, 42426, 87, 6870, 427, 329, 3919, 65, 4080, 24752, + 69, 1804, 3601, 3906, 1216, 23001, 7787, 36851, 20127, 1098, 316, 3433, 51622, + 2445, 70, 5308, 7521, 85, 1029, 11927, 1091, 86, 85, 21659, 11823, 2438, 86, + 561, 80, 10333, 7369, 331, 16276, 45228, 34175, 604, 6358, 17134, 75, 1273, + 85, 5308, 71, 4168, 689, 21206, 2332, 19747, 20127, 10056, 1146, 2933, 49865, + 86, 1029, 12940, 708, 5104, 73, 911, 418, 69, 2411, 88, 5308, 21888, 3906, 70391, + 668, 46790, 80, 581, 18854, 93366, 8050, 544, 25457, 18571, 39152, 86, 1791, + 710, 15444, 80, 4086, 13183, 329, 1631, 80, 22016, 1100, 1302, 9092, 1100, 11823, + 689, 2580, 370, 501, 28927, 10910, 1631, 69, 5308, 69, 575, 7501, 39911, 88, + 376, 2302, 3205, 40617, 450, 67, 33863, 511, 16820, 365, 295, 269, 677, 2329, + 67039, 73, 7141, 4777, 7907, 3178, 58345, 2329, 56871, 273, 73, 20854, 9701, + 48380, 86, 876, 8890, 1551, 89, 325, 23361, 49568, 86, 80, 3172, 1631, 89, 70, + 73, 86, 8480, 89, 5192, 44508, 2392, 11823, 454, 87, 307, 2303, 75, 24990, 1551, + 27455, 7047, 5981, 80, 1100, 869, 71, 67139, 13044, 45015, 3906, 70, 73, 9117, + 88, 8462, 32868, 31695, 86, 80, 24244, 74, 89, 17319, 377, 69, 4511, 89, 5701, + 48380, 876, 92562, 7387, 318, 1494, 87, 17008, 641, 7488, 71, 42510, 4922, 75, + 49537, 8783, 42441, 24672, 66, 375, 79, 302, 42792, 6331, 80, 2302, 86, 637, + 29238, 76736, 541, 1336, 12255, 581, 87, 427, 78498, 53904, 3919, 1347, 7387, + 23361, 8998, 71, 43505, 80, 33495, 2302, 84, 12328, 346, 652, 89, 80, 74, 33863, + 31881, 9117, 773, 13536, 80, 4680, 68, 295, 1725, 648, 71, 57647, 85407, 1609, + 67, 34175, 85, 40924, 8998, 867, 5418, 710, 634, 48372, 1662, 38181, 3013, 17063, + 19239, 73, 4239, 80, 18577, 81, 7488, 80, 1247, 70, 45015, 269, 86, 11823, 26397, + 89, 7783, 70, 52097, 73, 876, 28990, 89, 329, 77, 80, 376, 74, 5308, 7907, 337, + 2580, 19072, 728, 5972, 71, 402, 28679, 87, 540, 3576, 85514, 13423, 74, 2641, + 3251, 65, 391, 32868, 89, 80, 8998, 23361, 263, 87, 7513, 87, 9092, 4010, 65, + 5192, 266, 67175, 300, 2933, 455, 22580, 38359, 13545, 42908, 80, 85514, 6200, + 773, 1718, 77, 2329, 7521, 3601, 17319, 73, 53289, 491, 55911, 402, 283, 89, + 3933, 8212, 76, 675, 86, 89, 12591, 337, 4458, 347, 76, 23361, 3141, 86, 93471, + 44183, 8587, 77, 36068, 511, 20824, 2192, 38181, 70, 14088, 81, 7501, 5485, + 4922, 73, 70, 26397, 1072, 80, 1662, 708, 2580, 32547, 12183, 80, 20191, 89, + 9117, 2026, 76, 2303, 86, 85, 89, 587, 73, 88, 752, 74, 1739, 318, 87, 25105, + 9786, 89835, 1216, 1729, 87, 307, 28679, 324, 764, 89, 12407, 324, 21659, 67039, + 23866, 73, 418, 336, 53289, 6009, 67, 3423, 4239, 564, 4763, 2689, 28844, 11949, + 4341, 10616, 77, 23866, 1791, 65, 1662, 3628, 278, 89835, 377, 70391, 89, 14244, + 1072, 85, 5010, 303, 1804, 6723, 80, 88, 73, 85, 604, 24752, 46224, 418, 3601, + 771, 10616, 983, 1394, 1631, 83, 302, 38977, 74, 11328, 75, 1498, 89, 24705, + 24244, 2067, 89, 455, 31721, 65, 7910, 86, 316, 86, 80, 17063, 5683, 1031, 27515, + 71, 9117, 77, 1725, 2067, 32837, 2329, 85, 2438, 23001, 1136, 75611, 728, 10647, + 79, 713, 74, 84995, 365, 361, 74452, 65019, 89, 12825, 4168, 74, 32204, 73, + 8311, 1551, 1551, 44970, 1517, 1551, 62369, 89, 28679, 4942, 24823, 78604, 3172, + 8783, 14029, 67, 417, 2067, 80, 73, 13536, 71, 27782, 2808, 38359, 74, 664, + 73, 332, 764, 78604, 2312, 31268, 2739, 85, 360, 2026, 4086, 46640, 28073, 544, + 14878, 14878, 73, 86, 70494, 27455, 8212, 19072, 73, 87, 71, 2249, 69, 4168, + 61057, 277, 4223, 11805, 79, 39911, 3919, 83, 39911, 4341, 29700, 1729, 339, + 86, 876, 44606, 299, 64, 3141, 83, 4942, 10776, 81, 24244, 18442, 9832, 9490, + 80, 22016, 10616, 3315, 316, 8671, 67, 2580, 417, 31881, 15618, 278, 8088, 4168, + 4171, 500, 89, 85, 1026, 14088, 71, 10616, 21807, 10784, 81, 2727, 14878, 52253, + 10616, 87, 13855, 1336, 822, 336, 1247, 58305, 8318, 359, 299, 73, 87, 876, + 38828, 1072, 84159, 10784, 96304, 455, 3906, 80, 876, 76, 8587, 31721, 5701, + 73, 25734, 89, 752, 64, 458, 9907, 48380, 346, 45102, 12591, 62155, 20926, 93797, + 69, 1072, 351, 87, 7141, 288, 73, 8050, 78, 831, 288, 11823, 73, 22580, 3696, + 764, 10470, 86, 21807, 451, 5979, 324, 74, 11377, 86, 7821, 318, 299, 73, 501, + 668, 36851, 351, 89, 80, 1804, 14606, 73, 7387, 89, 80, 21206, 17008, 89687, + 70, 20824, 89835, 71, 1146, 2319, 1725, 22016, 21659, 80, 89, 3368, 316, 347, + 501, 7783, 85, 70678, 894, 32868, 1498, 86, 2249, 8910, 31881, 85, 9813, 375, + 24012, 80, 265, 1026, 21620, 303, 8207, 2641, 1029, 70, 4010, 664, 6194, 365, + 39911, 318, 713, 89, 39911, 3933, 84, 4317, 80, 5010, 31172, 80, 24672, 68, + 4341, 339, 69, 2857, 67, 2453, 24244, 587, 80, 38977, 53835, 31695, 351, 89, + 455, 44067, 86, 5683, 69, 14029, 327, 2868, 69, 1065, 64, 309, 80546, 1791, + 7141, 66, 1631, 21796, 86, 63958, 3172, 2249, 8587, 44183, 1264, 80, 17043, + 50798, 784, 87, 22119, 39911, 10462, 80, 677, 5469, 80, 797, 2739, 7387, 80, + 29684, 71, 2329, 31695, 789, 87, 3447, 87, 1215, 73, 1065, 19555, 89835, 32478, + 86, 80, 3524, 822, 8311, 11823, 1029, 30243, 78498, 1662, 540, 19456, 20553, + 2026, 80, 2438, 83079, 12498, 3696, 10188, 258, 5104, 846, 303, 70, 1919, 21807, + 19639, 35531, 418, 21206, 3055, 4223, 351, 4010, 73, 301, 81, 73, 1517, 911, + 36851, 337, 89, 80, 36238, 285, 46640, 20824, 1658, 21796, 42050, 2067, 263, + 1658, 261, 641, 34175, 2580, 566, 65116, 27278, 34664, 73, 61057, 80, 9112, + 325, 73, 5861, 2249, 7047, 48480, 10470, 2701, 79, 7513, 87, 398, 71, 2329, + 7521, 29224, 80, 31721, 60933, 80, 581, 1631, 18894, 6375, 89, 85, 564, 86, + 641, 89, 32547, 295, 14736, 8318, 90752, 18244, 87, 71137, 83079, 5298, 641, + 8536, 89, 7501, 2249, 39911, 18894, 36996, 12591, 3172, 69, 488, 1347, 3368, + 12082, 70, 80, 25457, 69, 1551, 544, 56029, 1919, 78, 80, 383, 23913, 402, 299, + 797, 8212, 85, 1157, 7453, 73, 95945, 22473, 85, 8480, 6194, 76, 575, 365, 773, + 91410, 32589, 89, 70, 73, 283, 71830, 20218, 9700, 1725, 344, 89, 2034, 7783, + 2302, 74, 2642, 89, 4657, 773, 8998, 65, 7737, 269, 4575, 75593, 74, 728, 4942, + 1347, 75, 3841, 581, 13536, 60013, 509, 46790, 80, 1073, 79, 60933, 73, 17007, + 372, 89, 1517, 89, 383, 40952, 1002, 316, 87, 32962, 74, 392, 46341, 10616, + 69, 3919, 31764, 4317, 773, 69, 12825, 3576, 2249, 339, 20926, 894, 12183, 89, + 73, 15618, 8207, 76, 588, 7387, 86, 7141, 79, 11089, 8998, 84, 352, 16618, 56871, + 89, 29443, 31764, 906, 73, 70, 44490, 450, 77, 1729, 19260, 675, 39911, 1919, + 7513, 69, 70494, 392, 10616, 32868, 2067, 17553, 61057, 418, 5418, 80, 2784, + 8318, 511, 77, 51622, 87, 14878, 76, 89, 2880, 64087, 9902, 1230, 80, 3576, + 454, 2303, 41129, 3524, 89, 13752, 441, 6358, 73, 10056, 12498, 3251, 32868, + 14736, 566, 70, 80, 4458, 752, 677, 343, 67, 376, 664, 2880, 4777, 85, 10470, + 86, 1100, 89, 9258, 288, 48380, 16400, 668, 8512, 634, 83, 8186, 5682, 43505, + 664, 12082, 540, 85, 26738, 80, 1725, 6009, 3141, 796, 7521, 42930, 47689, 537, + 1302, 12329, 79, 8910, 5010, 27515, 346, 80, 89, 39911, 74, 80, 336, 7910, 80, + 723, 462, 11160, 3919, 1073, 87, 73, 347, 637, 88, 1498, 70, 21486, 1031, 15444, + 73, 21796, 60013, 11949, 89, 3368, 88, 69, 2329, 1551, 349, 18442, 901, 85, + 336, 3251, 582, 2438, 48872, 450, 9081, 303, 1065, 76, 6200, 66, 1273, 581, + 85, 22532, 1347, 76, 55506, 44970, 73, 87, 8050, 52253, 28990, 86, 6358, 48872, + 87, 1739, 74, 89, 80, 44970, 80, 2042, 7501, 72801, 67455, 427, 16017, 85, 324, + 31764, 1729, 78, 380, 8671, 57367, 2319, 82, 1336, 89, 15444, 73, 8376, 2315, + 64, 454, 46640, 69057, 383, 73, 361, 80, 3059, 85, 316, 587, 86, 587, 637, 5308, + 5298, 70, 1394, 6733, 80, 11032, 80, 8998, 8212, 73, 87, 462, 71, 45015, 86, + 89, 8998, 34175, 69, 89, 6200, 14606, 85, 2332, 79, 876, 894, 68566, 8783, 86, + 11345, 65, 1631, 1302, 89, 9081, 8480, 73, 5972, 8587, 80, 2445, 2026, 1017, + 377, 88, 451, 277, 18244, 581, 336, 604, 8311, 85, 7737, 86, 89, 1031, 79, 3415, + 73, 1609, 79, 45015, 17319, 87, 86, 13536, 939, 89, 14088, 77, 1100, 21068, + 85, 89, 93366, 86, 4168, 2933, 73, 21206, 509, 28774, 71, 11949, 566, 6200, + 22619, 38041, 28204, 10647, 96518, 22619, 3326, 67919, 23361, 52971, 292, 65, + 713, 5308, 295, 76, 4867, 1568, 1729, 89, 10188, 88, 936, 300, 336, 3306, 3368, + 1065, 9326, 65, 7387, 2465, 869, 66, 9555, 83079, 1494, 87, 3423, 652, 11805, + 3906, 73, 70, 1910, 87, 10097, 72, 4171, 86, 8892, 24409, 80, 5230, 2554, 73, + 52097, 80, 2026, 38977, 365, 4080, 89, 32868, 16711, 80, 352, 84, 80, 23361, + 74, 80, 89, 66777, 773, 87, 21620, 1551, 34856, 462, 402, 27455, 8480, 55568, + 1919, 67, 39911, 87, 3178, 67665, 80, 3601, 4080, 89, 764, 17704, 80, 324, 74, + 9996, 80, 333, 87, 1919, 8385, 1739, 302, 4922, 14406, 830, 76, 96416, 89, 1910, + 89, 85, 5010, 67800, 45015, 77, 16373, 89, 1804, 76, 14029, 376, 77, 23577, + 4970, 73873, 11823, 1983, 74, 80, 3601, 67, 9092, 385, 22575, 672, 69057, 13045, + 354, 38361, 8212, 86, 89, 4991, 8088, 71, 7197, 19239, 38041, 64087, 875, 657, + 66, 773, 24244, 74, 4680, 9832, 74, 4890, 85, 332, 19555, 86, 2106, 77, 6151, + 1065, 346, 5308, 969, 5104, 28679, 74, 13536, 85, 1031, 509, 75, 3172, 15802, + 80, 5010, 75, 49450, 3933, 87, 1658, 87, 327, 47171, 21543, 34175, 87, 49358, + 398, 31764, 55233, 392, 44508, 73, 302, 73, 36722, 12940, 71, 73, 7488, 12825, + 89189, 37406, 52971, 3427, 4341, 3141, 876, 70, 5010, 324, 7047, 73, 2438, 3368, + 46224, 762, 385, 73, 11949, 2203, 31764, 66, 46640, 86, 5010, 65, 2580, 89, + 3153, 4664, 78930, 88, 1546, 398, 2034, 86, 5237, 44490, 784, 84, 263, 391, + 2580, 85, 346, 14468, 79, 2067, 85, 42818, 18442, 8385, 677, 1662, 2067, 86, + 383, 6723, 1073, 64145, 1026, 587, 2629, 383, 80, 8671, 40078, 953, 24990, 2026, + 66867, 1568, 66, 1551, 8088, 38041, 74, 764, 70, 32478, 49450, 32204, 15555, + 31146, 15809, 546, 15618, 689, 78498, 5308, 361, 73, 8462, 24738, 266, 26039, + 71, 7910, 80, 875, 53835, 88, 28679, 73640, 17008, 70, 5308, 34146, 71, 710, + 85, 8910, 8311, 2562, 42818, 6511, 796, 22119, 74, 87, 1394, 6200, 70, 46640, + 47154, 89, 6151, 581, 3906, 1273, 10470, 25299, 80, 30407, 299, 6292, 266, 73, + 63885, 1336, 20553, 1631, 86, 372, 752, 41796, 85, 336, 74, 1065, 66, 16499, + 69, 6358, 38385, 10776, 71, 23913, 276, 80, 73, 13855, 77, 764, 68054, 23866, + 796, 80, 1725, 4239, 71, 544, 2480, 89, 657, 86, 80, 78, 343, 73, 2603, 42511, + 69, 69057, 5964, 7369, 8462, 14029, 261, 12989, 11949, 49463, 73, 87, 4010, + 90231, 365, 450, 79, 29238, 85, 427, 82, 65167, 41129, 31019, 68411, 1347, 47689, + 70678, 1551, 87, 22119, 3418, 728, 65, 80, 657, 80, 73, 10820, 1029, 80, 30506, + 42908, 54899, 677, 451, 80, 20990, 85, 10036, 71, 2445, 20962, 708, 69, 764, + 652, 796, 7501, 80, 1498, 78930, 5010, 10196, 80, 1072, 295, 87, 23361, 70, + 45015, 70, 351, 67800, 86626, 66, 634, 1631, 9092, 73, 797, 87, 19866, 12498, + 74, 3172, 70, 2303, 74, 12498, 7737, 65, 3601, 2727, 80, 360, 2034, 32204, 2727, + 65, 1609, 5972, 89, 4680, 5981, 42818, 7501, 1302, 38977, 65019, 88, 797, 80574, + 300, 1658, 73, 4657, 309, 97715, 5192, 6009, 24027, 10333, 84, 80, 14029, 40924, + 2111, 67, 728, 372, 85, 365, 18854, 86, 5289, 276, 9902, 80, 359, 20553, 1804, + 339, 8311, 1065, 370, 869, 73, 10776, 77, 80, 1764, 10333, 76, 1910, 8311, 437, + 80, 4777, 77, 657, 74, 93000, 447, 85514, 6375, 4912, 36851, 42908, 7047, 25299, + 74, 1273, 21436, 76731, 1658, 85, 953, 88, 9700, 5104, 36825, 544, 7513, 73, + 438, 89, 11253, 10784, 73, 2026, 70, 74, 8207, 309, 25772, 4239, 38181, 3251, + 65, 831, 377, 8480, 87, 22873, 83, 73, 94817, 637, 5746, 3423, 953, 65, 427, + 41129, 89, 46224, 73, 1247, 370, 87, 300, 8248, 86, 2249, 8910, 85, 2221, 710, + 303, 1247, 87, 86, 1331, 87, 18577, 85, 26397, 16276, 3458, 80, 2857, 377, 11251, + 89, 7387, 42908, 302, 39911, 77, 385, 39911, 42050, 710, 89, 40078, 89835, 70, + 5289, 344, 11328, 80, 1339, 355, 31721, 41523, 344, 2438, 73, 7501, 333, 2203, + 540, 375, 79, 521, 5981, 588, 80, 89, 85, 10796, 87479, 8050, 73, 4239, 9988, + 76, 1631, 81, 8088, 79, 1394, 82, 2912, 70, 73, 52513, 822, 511, 9188, 450, + 10176, 46640, 87, 86, 398, 333, 86, 8385, 80, 300, 1897, 55773, 303, 1072, 2319, + 89, 9996, 664, 31268, 13855, 939, 69, 27457, 39911, 911, 55748, 347, 83, 73, + 86, 581, 6331, 370, 70, 73, 9907, 3105, 752, 78, 70391, 5298, 74, 28679, 65, + 22524, 74, 64087, 80, 78, 3251, 28400, 1116, 80, 74, 796, 24459, 723, 85, 89, + 3418, 344, 27515, 40078, 93797, 85, 1658, 6194, 73, 501, 2933, 71, 95143, 11206, + 71, 73, 5079, 1073, 74, 3178, 5683, 3431, 81, 40078, 89, 1354, 73, 86, 3013, + 303, 89, 454, 70, 80, 784, 80, 42818, 3368, 71, 7501, 67800, 85, 73445, 73976, + 86, 5650, 6194, 79, 318, 66, 318, 12328, 65, 2200, 13536, 31019, 3059, 689, + 3251, 6511, 80546, 564, 77, 1517, 28774, 4080, 41129, 292, 81, 783, 65, 70678, + 24752, 73, 89, 894, 939, 39152, 566, 85, 77827, 80, 4511, 72, 1130, 5964, 20811, + 263, 74, 339, 8910, 71, 8566, 604, 332, 1320, 16400, 307, 4763, 67665, 69, 73, + 12591, 42050, 19072, 80, 3906, 89, 392, 73, 7488, 89, 1122, 418, 20103, 80, + 56871, 4880, 65, 80, 74, 427, 566, 796, 77, 5701, 80, 74, 27455, 25817, 1551, + 309, 540, 354, 94908, 8998, 4223, 14878, 8050, 23001, 87, 1065, 65, 9907, 1494, + 1910, 581, 85514, 365, 88, 21888, 85, 5298, 998, 402, 9991, 85, 303, 299, 2496, + 45015, 3427, 352, 74, 80, 797, 23707, 87, 13441, 365, 49891, 66, 450, 343, 26397, + 1273, 455, 318, 267, 80, 7047, 1590, 318, 71, 20127, 564, 75, 303, 710, 5469, + 3251, 8207, 318, 75, 10784, 2438, 79, 87348, 4168, 74, 34870, 302, 80, 3458, + 80, 88, 50153, 1233, 86, 344, 23048, 370, 7521, 10002, 1100, 9700, 337, 20553, + 87, 363, 9451, 6194, 73, 63643, 93000, 6511, 9110, 73, 2649, 80, 89, 80, 70, + 9490, 86804, 87, 18577, 74, 3415, 7910, 6388, 1498, 27457, 1073, 70, 80, 69, + 18442, 359, 58153, 73, 9907, 540, 89, 3251, 18458, 365, 5192, 74, 412, 327, + 509, 78, 343, 73, 20127, 18442, 85, 19072, 10616, 391, 85, 80, 11160, 10056, + 911, 24752, 4922, 86, 85, 42908, 1631, 343, 318, 89, 641, 2784, 1729, 1030, + 5972, 15479, 1900, 2933, 43505, 2439, 9509, 728, 3978, 13536, 89, 32868, 6455, + 2249, 8998, 89, 56871, 10784, 784, 5308, 49712, 89, 22524, 52097, 509, 45015, + 8910, 2329, 73, 3418, 677, 4912, 80, 69, 24752, 11377, 10776, 391, 21325, 6511, + 552, 2319, 1658, 49712, 9700, 70, 73, 8998, 80, 285, 365, 64305, 89, 61057, + 1729, 86, 5298, 71, 544, 307, 798, 23913, 70, 2303, 263, 76, 16400, 89, 48380, + 33323, 80, 24244, 48372, 18926, 7910, 3418, 85, 1551, 4341, 677, 44067, 3601, + 3696, 48610, 3427, 89, 710, 10470, 587, 36722, 49463, 45015, 13009, 9700, 79, + 372, 30987, 81964, 21796, 48380, 588, 371, 2201, 3059, 89, 55418, 47154, 73, + 53289, 13722, 3524, 2642, 18854, 1764, 68, 16618, 80, 764, 80, 5241, 1604, 5972, + 88, 352, 1662, 64087, 80, 4991, 10910, 953, 80, 303, 80, 1498, 376, 377, 88, + 6634, 80, 1466, 7453, 94912, 9490, 8462, 15618, 65, 710, 89833, 75, 80, 88, + 437, 73873, 89, 2312, 318, 66, 359, 9832, 3601, 8207, 23361, 74, 418, 67, 34175, + 86, 80, 25457, 66, 333, 85, 3178, 1725, 268, 80, 4484, 604, 84, 4341, 5469, + 89, 78498, 89, 35531, 67800, 4852, 441, 65, 93000, 33721, 318, 74, 604, 36851, + 64145, 93797, 2465, 80, 61057, 302, 3316, 2191, 31019, 19456, 20824, 637, 8826, + 21486, 351, 79, 6414, 3178, 1725, 80, 89, 42818, 80, 404, 1498, 86, 85, 588, + 74, 634, 14088, 87, 23361, 80, 47689, 30506, 1029, 12119, 80381, 5964, 66, 1498, + 73, 6292, 291, 89, 6256, 7835, 1498, 66, 7453, 73, 87, 19239, 582, 5308, 93529, + 1634, 88, 329, 10616, 86746, 20191, 402, 4080, 86, 2005, 6511, 361, 74, 5308, + 450, 66, 8462, 6780, 363, 4719, 85, 9810, 80, 29238, 80, 1910, 13855, 309, 69, + 1498, 9092, 4922, 5418, 830, 79, 1100, 12682, 83, 73, 87, 73, 318, 66, 3423, + 65, 80, 2332, 3906, 370, 710, 327, 28546, 87, 7488, 86, 12940, 773, 6043, 80, + 10847, 84506, 352, 830, 65, 10616, 329, 22119, 4458, 64, 427, 79, 372, 21659, + 31881, 3423, 44970, 1662, 1233, 8510, 31577, 81, 1031, 2308, 87, 318, 12498, + 18299, 88, 869, 4657, 773, 339, 3841, 72, 4912, 3518, 406, 73, 3251, 65019, + 88, 7047, 10630, 9092, 53060, 9902, 82999, 4625, 309, 38361, 67800, 16848, 70, + 675, 42848, 87, 8671, 28990, 25604, 74470, 1662, 79, 7737, 418, 2690, 12328, + 3893, 24672, 398, 53016, 32759, 3181, 72, 906, 652, 80, 13075, 31019, 48872, + 96304, 5104, 637, 4381, 683, 70, 604, 1073, 625, 74, 58598, 28990, 418, 501, + 80, 1216, 53016, 87, 43723, 66, 352, 85, 12940, 747, 404, 2026, 14170, 80, 8088, + 1031, 967, 86, 911, 45228, 73873, 88, 1331, 581, 906, 85, 70391, 17008, 80, + 275, 22619, 2739, 85, 16400, 4317, 80, 509, 372, 343, 21659, 2176, 80, 333, + 1065, 9780, 73, 375, 20372, 1677, 71, 10056, 9902, 89, 5697, 336, 675, 71, 641, + 29684, 1030, 39152, 911, 19747, 80, 32204, 1662, 1065, 4484, 89388, 1609, 3647, + 94869, 6036, 1764, 15258, 1551, 16373, 5289, 85393, 5469, 2411, 77, 1551, 85, + 47154, 365, 69, 26397, 76, 1026, 5544, 1494, 15618, 52971, 11089, 51622, 70391, + 4991, 83079, 7737, 71, 450, 65, 16276, 84, 5308, 939, 24244, 9289, 89, 19072, + 462, 1729, 11805, 36851, 329, 4010, 307, 333, 1662, 38041, 11160, 1658, 796, + 5237, 45228, 8050, 70, 80, 10776, 9092, 1631, 363, 34175, 2070, 36068, 68054, + 14949, 1031, 3906, 73, 13536, 71, 10470, 83448, 5010, 9092, 77, 7488, 84, 300, + 25770, 26636, 544, 86, 4010, 22473, 8536, 3251, 76, 1286, 261, 17704, 4341, + 70, 27455, 11872, 55568, 351, 74, 10056, 80, 12498, 70, 73, 46790, 566, 377, + 398, 67, 1100, 376, 38361, 69, 24012, 580, 85, 80, 11851, 32589, 377, 22619, + 316, 3576, 652, 70, 2329, 3251, 343, 2319, 82, 441, 36851, 79, 20127, 71, 1609, + 29700, 89, 92216, 87, 398, 1517, 73, 637, 67, 5346, 71, 7501, 7141, 96518, 6098, + 81, 418, 85, 10470, 83, 544, 41796, 89, 89835, 6358, 85, 4777, 564, 35804, 604, + 80, 38181, 302, 95947, 70, 12723, 412, 80546, 395, 1273, 1029, 300, 87, 3368, + 1273, 454, 74, 5972, 74, 1030, 12748, 80, 450, 20127, 5964, 69, 351, 75, 9907, + 9092, 77827, 7063, 84, 2319, 266, 303, 365, 18577, 1073, 675, 3647, 69, 454, + 87, 8587, 73, 85, 3893, 28679, 80, 25299, 1029, 57208, 9092, 9117, 2034, 89, + 2689, 5972, 9991, 79, 12498, 74, 89, 29684, 2302, 29684, 76064, 89, 14694, 288, + 98667, 7501, 80, 39172, 2005, 73, 3479, 14244, 73, 13183, 31019, 344, 360, 22016, + 1936, 9081, 1725, 30506, 11160, 1919, 81, 42908, 80, 89, 4223, 86, 2302, 418, + 3172, 87, 1729, 303, 80, 2329, 566, 85, 1336, 13536, 85, 44970, 9832, 73, 86, + 302, 73, 1220, 18571, 774, 46224, 10784, 5544, 73, 4912, 1371, 12940, 1073, + 73, 12082, 80, 1394, 53060, 437, 6043, 511, 17137, 85, 1065, 88, 4010, 7044, + 32204, 451, 88, 7387, 6511, 1320, 283, 327, 581, 70, 33863, 23913, 89, 20824, + 22016, 1394, 40924, 344, 89, 754, 7453, 73, 85514, 80, 2668, 575, 67, 1725, + 675, 69, 73, 15720, 67782, 418, 69, 41796, 87, 302, 73, 54909, 10616, 2312, + 74901, 74, 36238, 2580, 18854, 2042, 89, 399, 89, 31268, 74, 45015, 38041, 5469, + 12498, 13391, 14029, 652, 66565, 3178, 35949, 1631, 4341, 5469, 49938, 65, 1897, + 295, 2111, 344, 47154, 73, 2580, 73, 65019, 10333, 8462, 78, 1065, 6634, 71, + 1804, 267, 66, 40078, 3013, 74, 6331, 355, 1729, 80, 24672, 540, 35531, 2445, + 15008, 89, 5104, 1273, 88, 69, 1494, 1091, 2249, 88, 37975, 2857, 9832, 25299, + 710, 70, 80, 89, 97523, 23866, 261, 89, 70678, 764, 87, 18577, 21486, 8480, + 69468, 3841, 20926, 80, 352, 12498, 73, 42528, 36825, 301, 80, 418, 89, 45788, + 337, 9832, 89, 85, 12498, 76, 1030, 1394, 11377, 73, 869, 728, 5241, 773, 88, + 3340, 85, 1310, 80, 637, 4239, 30407, 3415, 68, 71696, 762, 302, 19239, 89, + 1320, 1394, 764, 93000, 4680, 71, 2249, 69057, 501, 23760, 6200, 91998, 18400, + 85, 20926, 7501, 73976, 79, 46390, 80, 318, 370, 18577, 343, 1725, 70, 1658, + 4991, 96605, 73, 1029, 9832, 376, 67, 8536, 301, 89, 331, 85, 1347, 1931, 73, + 6632, 301, 278, 46341, 784, 1725, 63643, 60013, 32589, 89, 1310, 89, 24990, + 86, 18854, 4937, 4680, 87, 71, 483, 355, 455, 25299, 664, 67702, 1073, 9195, + 73, 6634, 59524, 74, 80, 2739, 18854, 74, 501, 1331, 86, 26738, 391, 1073, 64145, + 70, 2067, 359, 47875, 3251, 1498, 3933, 6780, 1739, 4912, 4223, 418, 79, 1897, + 664, 65, 10188, 80, 983, 11823, 266, 48320, 5192, 36851, 74, 8311, 876, 15444, + 3906, 87, 2332, 75, 80, 14878, 2465, 4835, 983, 76, 8462, 784, 5469, 575, 3013, + 59524, 375, 12328, 73, 20127, 89, 983, 9112, 713, 324, 86, 11253, 7819, 1092, + 86, 89, 339, 74, 15959, 295, 51924, 1073, 89, 31721, 325, 1331, 309, 85, 12407, + 8671, 59524, 20211, 4484, 998, 283, 8318, 88, 73, 2312, 1036, 21888, 7004, 2026, + 5241, 80, 2332, 36171, 87, 2303, 89, 26929, 1366, 20127, 71, 70678, 1551, 84, + 773, 3806, 21206, 278, 81, 9664, 76, 38359, 85, 1394, 773, 869, 3906, 953, 347, + 76, 1978, 383, 41622, 509, 339, 80, 17156, 266, 9117, 87, 84, 12748, 88467, + 708, 15618, 73, 46341, 447, 48380, 80, 7208, 74, 87, 70, 73, 70, 1290, 88, 324, + 302, 78, 336, 3306, 86, 361, 4922, 89, 73, 764, 307, 31172, 8671, 70, 283, 96518, + 31422, 88, 73, 2159, 332, 2067, 710, 668, 8462, 31764, 70, 3013, 19271, 86, + 657, 79, 316, 74, 447, 263, 44970, 5481, 44490, 17043, 32973, 4168, 2302, 74, + 7501, 9832, 451, 1247, 575, 344, 2042, 1031, 81, 1739, 69417, 273, 11289, 81, + 2445, 307, 28679, 1273, 8385, 648, 78, 511, 2445, 7047, 76188, 441, 2192, 80, + 54803, 9700, 5308, 68054, 71, 398, 3415, 953, 88, 73, 10462, 363, 31172, 370, + 89, 9810, 71, 1517, 52719, 84, 587, 3418, 82, 3423, 1729, 89, 876, 82, 3415, + 73, 85, 2642, 22524, 377, 4942, 73, 564, 9700, 87, 2106, 8318, 88, 80, 22619, + 752, 70, 1394, 75, 3178, 4835, 89, 657, 18571, 1729, 8207, 86, 89, 92216, 3172, + 4942, 3933, 332, 939, 57208, 9991, 363, 19239, 4511, 89, 73, 347, 86, 9081, + 70, 8587, 2580, 80, 325, 9490, 3906, 19747, 5481, 70678, 5544, 67, 34175, 29229, + 13616, 73, 12130, 36851, 85, 80, 36068, 6200, 427, 17704, 80, 3251, 85, 31695, + 402, 3516, 869, 74, 383, 2067, 72801, 79, 1791, 73, 939, 70, 1604, 3368, 86746, + 87, 392, 75, 543, 4835, 88, 80, 4086, 94055, 16400, 71, 75406, 339, 625, 1729, + 31764, 15479, 511, 1551, 5598, 398, 1320, 346, 11377, 437, 901, 85, 80, 38041, + 73, 59448, 4223, 85, 3601, 283, 16820, 41796, 85, 4381, 336, 55498, 283, 22119, + 14878, 44183, 455, 3059, 21538, 16017, 73, 5683, 89, 372, 708, 28927, 9832, + 66, 98667, 752, 80, 69, 89, 1231, 8088, 11222, 1394, 3306, 71, 1517, 911, 85, + 17134, 2302, 85, 818, 7141, 65, 5298, 70, 3601, 7521, 4355, 3223, 84, 347, 70, + 4080, 61057, 46341, 1804, 28927, 46341, 47154, 16400, 8462, 12119, 32547, 1658, + 12591, 20127, 12825, 84, 80, 275, 3172, 10470, 9117, 417, 73, 894, 34967, 80, + 7488, 86, 80, 953, 2312, 17134, 86, 288, 77, 343, 10616, 25105, 29238, 74, 9110, + 39152, 86, 1310, 76, 385, 1610, 74, 89, 9509, 61318, 77, 78498, 73, 88, 5418, + 56178, 89, 9197, 398, 347, 55655, 25299, 5289, 3893, 25105, 675, 43210, 398, + 65, 2329, 376, 2203, 17134, 75, 333, 89, 10910, 1336, 87, 32837, 75406, 80, + 4010, 80, 1026, 75, 3368, 68, 55334, 42818, 80, 89, 1551, 31695, 86, 10784, + 2329, 9701, 544, 81, 339, 20990, 10796, 75, 1094, 14606, 85, 1336, 45228, 3576, + 336, 11949, 391, 86, 8088, 46224, 291, 36851, 424, 73, 86, 3415, 1609, 89, 4168, + 88, 47391, 8587, 28990, 29700, 80, 3055, 402, 14928, 67, 7488, 876, 76, 6780, + 88, 36825, 42908, 2312, 3306, 2739, 80, 25295, 30506, 501, 21066, 93000, 2562, + 7215, 337, 80, 88, 677, 69, 33863, 80, 16820, 572, 35278, 77, 80, 1065, 64, + 309, 77, 544, 1149, 68054, 784, 2332, 67, 2641, 73, 88, 9117, 87, 4922, 79, + 911, 83, 10616, 11823, 275, 73, 21620, 31268, 70678, 1029, 98496, 12328, 10910, + 79, 416, 85, 89, 664, 26397, 71, 73, 32962, 10056, 85, 17134, 1146, 80, 30463, + 1551, 24990, 70, 41670, 604, 67702, 11032, 7488, 385, 581, 80, 1247, 11823, + 87, 9701, 501, 1146, 352, 76, 44183, 21620, 936, 9700, 76, 1216, 76, 2396, 2249, + 2739, 73873, 87, 18664, 88, 1919, 86, 85, 2042, 5972, 28546, 73, 86, 11089, + 22619, 80, 99161, 1216, 45228, 2438, 70, 511, 79, 391, 25299, 939, 86, 1718, + 80, 70, 1764, 21796, 74, 47154, 67800, 9490, 28679, 67, 365, 406, 73, 43974, + 89, 19239, 652, 71, 73, 89, 3841, 60013, 2249, 69, 5584, 6194, 70, 80, 73, 21807, + 58305, 329, 1029, 74, 7453, 1216, 80, 3141, 97816, 4511, 17134, 60933, 45015, + 74, 5721, 664, 5010, 327, 74, 80, 295, 346, 89, 24527, 1331, 96250, 344, 89, + 80, 8088, 953, 82, 2727, 318, 352, 347, 8207, 73, 88, 8462, 75, 796, 14606, + 1662, 540, 17851, 36825, 1031, 80, 359, 1310, 11823, 566, 588, 73, 5038, 4942, + 49595, 8462, 4942, 32204, 85, 1029, 73, 4484, 309, 9902, 85, 2857, 10784, 82969, + 747, 88, 64087, 302, 86, 4080, 73, 87, 31764, 728, 91295, 95143, 7513, 869, + 359, 67020]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '45399' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6XROyvJbs/f4Vb723nF2CClnsOwREvkwQEHVq6hQoIqAiHwmQqfnvp/TZNafm + xgtIIQmdXt298l//+Ouvv5usyq/D3//66+9n2Q9//5/vtVs6pH//66//+Mdff/3113/9fv/XyPyV + 5bdb+S5+w383y/ctn/7+11/i/1z5/4P+9dffPK51Zlm5w7mkNi+wXlnNrHNYerPffK4Qe1bI3MOi + 6Oa2C3X4PNM120LZV5MsPW2kif1Edu7uzscyWyXImg93LLn2bIyrW9qg7N0kOGq7VTYt12WAzDg8 + E3PBlt1U3E82IsXZIM5dEHlzXQ81ii0W0PF9LSOuN5CiPksntvGEYzSqjViqj4e1YMbwWXq9J5xN + tFoEB2JK/JIxIZ80YLfwyUjp4W5QvaxH3VN1iLVRrx09zrquVp1AsFAkp2rQiJeC7OMZd69FUHFo + HBGm55ERzxE9Y7x0eopky1jjhm53fGLPtQLz3b1QMRm31SRsdhZyNQNjtTaJMeeofcFhffdpIbIh + o900UPCYVjH8jhNvitmbouydXSjnn7KafdkZ4bHvPYLf8dIrHseugd/8rNwNvFFT/RIJaSRh9dJF + FevM5gz7ZzGzVBEO3UD3+wD69epNdhyzaG78kIIRgkT26+UWjfdVYUHXuwbTjRUx+gopV3RenW40 + iaa4Y+nufkXHSqHM8+cPH8++ZymDOZRs+xDXEeV+eQVpfjpEOzRtNulX+oJ5v6vZVq4HPssX2159 + ulhi2jQssmEc7BRNqPiQqMGWNxrIwNCAtSPG7FYZLRcigGxt1kxHvdx9nDGagZ2Shorn3jCmwZpq + aJDkElK9hGq6+MOIeBrpbPdODbRiO0GAL54Yvqt2JTnsDkjLUo1s9o/amE/B9gXwfN7JqZaNTJp4 + bKKQvyX8ePVON662rYhMnVREZ2LOebB8J6gt5pztZEutBmFDTHhcaotsGs8xBoLUFoyWtmR/6XjF + QprG4DTFkRnuS4/G9ycQ1cg7tmwfBgmfHHYX0PEmjFRab+4ZD7c4hU14rvFKttSOa/rzBSS9bsnh + Ng/VuPaWBZgTw7S9xj5azckN5OZMUrKr5Sobda+qYbdiO7KnNzfiCS8KGIL3DQtZ4XaSrYGLhnVN + 2HmdxsaUXbVa1U+iTwKla7vZsG6hvOAvj2hPOUJDrp1EOGO9Z9viYkfz6rBuQFjjJ9GFdp3NEQqX + ixlEnxHjlkZj2MgxvKrKJFqRxhV3anmUF6ZZUK4KGHGnkgDmTotZlnlbg0OQ6mglxzFzy6D1GuPz + tkDq1zb7rqfHj/1Fg+dCjohX54E3LhPPRif7bdK50KKKP2a4omJ6x3/wzoVlsYQuMHwq3M5rPoXK + 2gXVOIS0Nzuj49q895GFP5TEHHnestTWS9g+Y4Ec9cmsRJTeCvCRajINDR2iSfFcI1F1Cqyk0j5j + +/BsI/nGB1q5ZWRMl+ch/X1vRubr2uBJCgpSLl6Ej0dQO/o2dymkZoEI/sQ6mtx80cIiWx6Jpx/v + xsii2USkNzSy/Y7v5NZzYbWlNXP5uzbG5BgpatKiA71NJ5k/FpeZotSMBMolWzdGCaMW7jmViO99 + SmNQS9KDOo8q84Nsgca6S21Ay3iicgm8+iDT9IFtD5w5N/NZjdfgdgU3tAws5ctLRaVscwVu9zti + a89PNw8brYbsfbmQrWvP3sCV6gXTp5/ocr7bnMufXQJ1KXCK8F7mfLdVcgjfwor2kUa68eztAjRa + y4jZr7YzxvLKbXRqU6DhTRdRJx2DK1ocu4x5+OB4Iv4sl8ruvjgQ7bp5opEzMYD4qrjM8XzBmM/a + w1Ky4ZgQ52Q3Hi21tQhyBQ0zCpYaTD4zXbkesEnMaH5x3mXXFL58zyJnZ3j8Zl0EGMd0Q8z7a46G + UBlt1cIdZbo9XI35g+YExpBishMHyvtHf29gEzpvZjDf7OgPL3Xmq8xL6NugTjgDIAgpc7qbhqTm + sFPQqOQGXQPaRNP5ZpnyD0+6w27ZsL/NLvLr3KeX4bDgFL26BNISU7b/4Vd8rmflu//Jt54aY+bs + BWjagFBppBtj0m/lGR6dt2E75eMbo+51LwTYfLHTkDyyWeo+M4z0ZVM+W7ExHtVehzTwZLKX1m9v + XG3LJQjmS8JSPZ+8KTPPI7hb1SO+hp8dbcRWRN4Nd1Rp7HXGr0chh1CeGdkchNJ4+EPeoPXGdNlG + ezce0+atD7/6oGQrXE2va5CgLJszst0wI5oPZtVD9egCQpq6NcZzpShwKhKFuG5dId71YwHmZ3Fm + Ptsb3vDR3i+QpbkgRhV/UK9+1gn68iXl8iNAY7gYYrgqpYgVbxHxZTc/SqjJUacVs4/RUPtSCY3/ + uuJptYiz7/xmVMnZFi+Mh1PVMkpsqNcd/4P3Sa2MAslna2B4ExmoTvU5gPA9KBgkp6jGFndrkI5u + hIUiffEJSx9AxvLUECOTVNSroVbCCbBIaR9uKj69ZwtJziYjmiHORqfL1x7FylHD5f1dd+PeXRQw + nhuPGWur4NP+KiQoFhsNw7wLq/EkRliNJPdItLi8cD4FbgBpH4wEa3sejTdtFKDrZOvLF7eOK5dW + BO2kC199lFRDcT+5kKx0h+ESvSPWHm0KT9G4MMfE947vzGWzbu2pI3r+iL2pC9wzqF1ofvGmG/Ne + O8QovnZ7shUWNzQFxtYH1O5lYvCPXk3ai2NY8NpjyWXaZNMru5bKtjMEDJdjhr76ylp3QeiQXVY6 + xmg9Ugtu/o7R5UdA1ZRl5qxsVYsTO7lsKnGRvxXoZnND17b2yqZnByIaw9uBeP7s8Pn0VlNYBgEw + 7VAjr6cIp+j7fYlvuTof3i5gtGenFT4FhpqNgb3EaNloEztFG4TaVscmLFztxpzHTcz6fDy/YLXt + a3bd+abx0Yh3VpVbeSSeJLXe/OP73/Nd0dSz0dTDP/uJ7Y6LtJqisNHBr/cEL6sPy2j/2Kfo8ra2 + zFrCi491VCbw3T/Ez9ftd33iAi7HcE/8/WPMeCS+NLi8JJtZlyE0ZgXzAlgjV8RhS1ZN82MBgGEO + 8WCkajZrTppCc85PZHOjecQi6SEqS1n8sE0x+tk8aOb442c65vnK4Pf6mCuPy8v6t96s8kMDJx8s + ZjnXpuKKjXs0iGRPF7reRnO6OmP46ge8YKVdrcry6sJXnzOsVHnGn0W6RHR4qFSNfTGbF9FeQZ6X + crqKNhma9cWegnTQMLstRDNiqZ2F8NkNW6ZNE+3GnMUzcPvmMl0Su27apkMAbGHdfvXPm26KacEO + pwmdW00zui8+FfEyDxSpgcXHSxS+lMDpOebOzjC6cZW84GywgUJPzGpKBIsinxaYbevlKxq++EQi + T7KffjbmpBjWoCN/QRzsAqLoVSW//UA2SaYZs1ckNoiGOGJxPa2N4TTQdK3CcoEXLlhZ31+JpVS8 + 3zLbv5nZ/JAfIZzyx4bsOXyqfqcdcjiBL9IFSo1oXA2zAPbgMqK/hNCbF51cooV88MhOdns+dQTX + 4FaNQKyPkHV/9Mnmen0xq6m8aloJEsBLioHs1iZEf/Ss+CgqukjFh0HtKguUynVPWL2wkn/12wv1 + zktmztLL+CBeWgCq7Wy2UdTI48ZnjYEu6plthYWKJnRsElhfBZP5MQzelF+MHIZATFlyjKvshx90 + CIQYL+d7gyi7+6bSOnpDp9VCzOY8CnOw33imPF4tsnnpPlxVNUpCn2ehq7h1zAGcsj6TS6Xp6Hmr + AlNFe00jbhV6Ga+at4h+7+cxue+mvaUnahyKNvGspR+Ni/y9Br26u1SN+DObQ49j+Pofoq+iFnFx + v3fRV7/Q5+xM3nT1mhBYgypaJ30SDYvL3EOBuoT53kf3uKQW9c9fYWHpsmqOI7cHd7vwiFk2WrYs + XKdV5M+nwV35eqBpM2AF6Vt+oup+1Xh8L/c5WK/2gRcg8YqGohdDuU7uTNucCtQ/DVeHmPY6w7dr + 441ejwRoW7EgmxgKNNW+WsIPv8KhEIw2FL0E9uy4Ysaymo15OqIQdmpnEswDzZsITLmK8pOAmbwS + OZfPTIOEBRHxPJIZ88Fga7TeWC5G98bweL7/XJXP00F4+cXTTx///CnZpOOim7H11NF59UqpGNFT + xWhovNQsq3K2a89lNVbKskG3M35jHhlaNh8nC8N3/sw+QFVN2dV+Qbi4M2LuloD+4PlVPUz25cfo + p9eABJ6F5eFwR09hQyzUpYn39aOjN9ZRm8D5OZbEaJlVTe1I/J8/ZJ4aWGiWL5qtjpYYsa8fjz5n + aaKQR/X+D99xeFsaejZejuW7IKJpGaQu+vppslnWdTROj1uAgnvGaXWT3G48megF0gs8ZkdZySc3 + XzVw3W59huGO0LhUT7kizUuLcmdXeePjWDVwu8Y9u4dRxD+PRRrAPEKIZT0tqu6dWD7Yh1DCfNOc + jCYfg1oNjIfMTGdWok8aSSGYTykivlyybLTZZCnivW8ZPl2zbNTd94hkqrdsFxi3bHwMUQ7Sa8DM + pNfum38sYrhkvsuCrFhHM3pVMRxRWxFstoeM37a5q8T1xSC7pU89thy0ECF/MrC4vXTVMPZm/cc/ + b9z70mBHY2vCULYhw2maeuPgtzU6d/ROtsOoZ3NfOhh1dhmx7aGiiGWvfAadnUxislXG6SM4JuAv + jR358edk1pMC/vkYE2ckeTc+hixH0bb9+dclGrXjiNXAoZy5/efZcTJmDVzuMSHeNlLR5LCTAMLa + fzJPPy686WU2JaSBIzOD2cesD7WHryqTazFnb2cdY5fMhMLevPASXWrOg9dO//E1XW/1XTW9Rc2F + VZWeyEYqK4NrxiNVv9+T2bKqV8t75wZwPz8QfUwbiU8+TXO4HnyTed35ns2QGNoff+ofBfxvffr1 + w8SbV17GPWOyIfeWJ2Lr2qXi2Tt8oXbvDsQJu9bjemifUVQlN7br48KgX/5Tzvfnm+mw3xkSU5se + kiX3aZMcw2gePgFF7BY8md2/mm7oiraH9RYTtrujFZoyIwa0VjyVmblXRn0ptgLSLT0mrnwPs7ka + gytsanwl+x8fSWrxUm8ntSVWwvVq9fWb8k3bS8xe50bUU2SdQTxmHZ6K/uZNxVofQexez5/+ifoY + +ynqTOfAtsqLeVPWFwr6+hNifhZ7Po7zdIZccmxSdNLBaLP4DsjbGRPBsk2zbmZODfrnsiN+erp5 + 3/cX0HYvbsk9utTZhP1cQyf9syOb+8FH43utJuv11ieYhSfDWxq7g6VOGwC69m91xu/PQwBPWU2Y + tRaP3ie2sxTw8nkm/uo18V64nHWonVZn33wj4ununqPXQXDYdrkvMl4duAiO+37SxVzsMp7uTlfE + Pm5B7HQOslHV+jOwjUfpZOsmHz/2GaMv3qlssx7xYNXZsJ/OA9m1Mc+aH57P9/ebLneyjJjKLgnU + 24OG10JyiIbNYK3hsace/chO2s2vY1eihavfvvlOmdEIpSJ4kf0hhqosvni1XXguUER++KNgvtYo + toQ9baM06Jhxjk2k3j2fedHYZKPSSksoGM9ooz0/Vfm+Iu3np5jVQ5qNOkf9z78R3eF9xZ87htGv + Hm1SkfIezfsEHiDIzFxLu0w8V7OClm4iEP8apd04B6IA3+/D9iR5Z81PnxNcu1RI09QYH/2pAU9R + cjyqT8VgTj2NcNOIhIdLx7ve67kA3zyCOOf3uxsSAffout35GK1iJ5rXWlxAPhkGXjrXpusgCDV0 + 2FoXKm1YlTFXlxKEi/UZM5BLPry/keUP//o3z5kWU1ugjYyudHHfM8RNawWg32aH7fbOgH71DPJb + Q5i+TWg1hMraRgJuV2w3JJuvP1oLIAT5iD8289HUrcMSZGksiNYmEx/sjxkodrZTvn7DiuYdRzXo + i3pD9Hnpc/apnz5c7gkhbnJm2RQeah8xSc/+5FOzjQwL/uz/uG47iq2nBt/9QdfJ5VH96jU6YJQz + 77bTu+VK9kHRlMv0yw/RnBLNAlSXMvFyp0L9PYccvs+nM2EOQvuPb0NhBxXZnu69Nyx0fY0OySem + q2614dImSRX0psoH52nuZKO1xiYs+bpnm3WxQeJUXkzg+ugyTf042XRKlBSOvrv+6tHZ+NVTmEch + JBrpezT1i1aD9C1TYhRMMYY7W10B7XUNw/Xk8DkKsQIEn/ZUmU2pmx09bdBSfvcYYth7Xz0Z//Ie + LEaz9W8//9X/1N1ujhHvr4GvXr3AIjfhIKGzs9JeylA23/qUOxGvL14MeL5FlN8mFFHyrGMQ98qM + +TXxMv5cYRG9a/GN1x4s+dhfiQnu4oDodJv3FaebKkCLR5VQvqvjqOm3mfXbz1T56s3p4j9HaNiC + MFyvajSd880a/erBKa7bqr/n4lX5+W3SFAof7Na2ALD1IpZRdN4wP1YCnNXyRLaaL6KhX5S62h/3 + K6aLFBlTtXRywIb/JEbBP9E7tbMADsFwItagOd0YOLOufPM5Kn79+bTtti/l6CHEDLyYvF++DC1k + GU70jcfp69gVyD1IFjOSdECsF20N1YEssF0txd5k7F0TlOcG//LqaHp2oggbajwISceJz1HVaco+ + uqnffP0ezQu4iOBc25Rsd7isppvif9cvQnRh3NJsDGbdhvd4exJ7Lp9cerzLXB1bYjEDaaMxppdX + iFy5Rn/8sZgMBlW/foicnzuoeH3elVCplUusK9O8poAVwCGAmAo6jfjqm2fLjpCHbDNtt97orOwX + 5NEp+KMnx3YJmvytb/QlK89uBpOu4T32JXGjdOwmBrWCNm5DMVrzBn3zFwGG5hSyrW+San4NWYzk + g8yJZh6CaP4gJUGqEYW/+p799APC+aqks8EXiB/jB1a//Ej0eVa77/yCP/npthNr3gSz6/74hVbD + sM3GZWK4MLGG/um3DLcqsFRiN3uWf/UV3ytFqH7zIlq/N8yYv3kgWphWQcxZdrh4QS1Fcfj0mM3r + nTcw6BVwG01j200XG/1jiK7Qxp8X0faen/WDqFC4rD8a2We4NHpap2eQCD4SM/f0TFqSJFE+K3+k + 4oJv+SSOuYCmjQDM+fIdpbtriPYbWWekKVI0G3Q4K835esISJ49qZN1HUf7kU0UqdkyXr1QxQduz + 0JL2nN/yzaj+9MC2OlmIbl9Bqpb2UsSz0J6zofbVAjV+fSVHLTANfj0u81/egR/RbZNJP783at2F + SlD23ayKVoFwO9y+fif0uteLFkj5nAMqxDmgofmgQsklz/7lnd702VRnOLVnYKZ4drLR+DATts9E + wHP90PjXn/hoPGcYT6JhoFExPgXMUXgnREqXaLbL+xXmepWy7XqziPhl7NI//Qv9/HkZc9wqJUyf + u0gnTzhmrGreS9S9sEUFfa4MfuyOPrzrd4tlEy8qHt1mXT1kWsIueWXyadVsBGS0fUuc7Pkyhv1N + cUE8Ng3D+mwYqxttLegH/4OX96Co/uRJrrbB7E/9DRfPBM671fzL+6vPdz3hXT9btjktR4PTZlHD + MvFlcrcy2Rvnph5hcfxkRFdJk3H64emfftVPz4vJMVOgiN2colWQfvPeWoOH0TrEgnUdzfgetGqj + lykxX+qC0/3idoXLw7WJLzlFx/eHawLeJp7o5anM2ZtN+hoUhG2qVl1htGIo2EqQRZR2j2LK/vDN + t/9Kh68eZYN8WsNnJ+jErno/G0IZYvTt9+GFEM98JJe8AFtgLeVJOvD25y/mp3X6o/f/5NtfvqXr + XWUbvXnBI8hUa//wQ/PtP4L6Qjp9bw6RN6ZpaaHVkF/Y9ix03RyoVxcc4RqSg1tto5meoIZ4DARy + DhQ9EwNb8OFCtiHbfOvFJBVzDuna7YiT36VuyLW7iG61VeJlwvVOfLmHEmjVr5nfjltvONHWB6cp + j0SfxmvWf/MJdaO4l3/rXxTXhfKZAk42fsarsfTuGnrs7xjL7SnIpmt5DcG92lucuKt1N1KEz8it + WoERKbc86bPpzmg17K9fvvhwqjlhCurD16m8OvRR8z58dKCu5xJ3soyKf+oBw+tz31B+k9xKggx0 + NBfsSKE6NtncB08L0iThdA4/l2hkkWL96Y/99NhXX5XyWryqbK+fr0a/rzob7dxNjOd5vlXT2xUx + /PpL9Syl1SzptyWKN1rAAn0OOO+M+gV//04F/Pc//vrrP38nDF7NLX9+DwYM+TT883+OCvwzvaX/ + FMXlP9nyz0kE2qdF/ve//n0I4e9P17w+w/8dmjp/93//6y+QVOnPgYO/h2ZIn//7zj++//jf//h/ + AAAA//8DADtB0HjnIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c09bf85ef231c7e-AMS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 17:29:42 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-kd1j0bcill5flig1m29wdaof + openai-processing-ms: + - '406' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 3c27a4a0263197ea84526b8aa83799d6 + status: + code: 200 + message: OK +- request: + body: '{"input": [[359, 74901, 1146, 437, 11949, 3933, 74901, 728, 86, 511, 73, + 5241, 65116, 4991, 375, 10333, 20191, 89, 383, 22650, 1551, 327, 1810, 1804, + 80, 347, 11089, 3013, 752, 1100, 73976, 303, 32547, 73, 3696, 3906, 77, 969, + 455, 62615, 6581, 66, 501, 67, 2649, 12825, 8536, 89, 2067, 85, 80, 70, 74, + 755, 74, 352, 73, 4086, 3178, 87, 80, 35401, 376, 15618, 655, 13423, 33863, + 3933, 911, 77, 32204, 2689, 454, 29200, 2176, 1739, 65, 8998, 38844, 4223, 953, + 14088, 21486, 1316, 12328, 1725, 21486, 24752, 86, 1394, 5308, 321, 31019, 89, + 80, 73, 5981, 88, 69, 2249, 19072, 23576, 9110, 47154, 876, 73, 2319, 1029, + 6292, 69057, 2857, 20191, 2329, 80, 6455, 46640, 351, 89, 5289, 876, 77584, + 73, 2026, 84, 361, 23361, 87, 85, 4680, 85, 80, 13009, 86, 1725, 73, 641, 80, + 45015, 85, 447, 1072, 86, 22374, 14029, 83, 28679, 73, 70, 5298, 7369, 79, 544, + 12591, 327, 9701, 80, 2465, 84, 455, 11949, 7881, 74, 4912, 44508, 80, 5298, + 17553, 31268, 85, 604, 77, 11032, 6331, 9490, 45444, 450, 6236, 1919, 67, 20990, + 34175, 1804, 73, 41101, 80, 277, 73, 89, 664, 723, 49755, 80, 2191, 28531, 8587, + 15555, 79, 675, 42818, 375, 3601, 82, 1334, 76, 89, 511, 87, 73, 318, 4080, + 85, 1146, 80, 983, 796, 12825, 67, 4086, 78, 336, 1897, 80, 70, 11032, 7501, + 85, 12825, 651, 79, 16017, 80, 268, 69, 4021, 15817, 3757, 16400, 652, 28679, + 5964, 73, 17063, 80, 9907, 67039, 848, 85, 5308, 76, 657, 65, 1216, 88, 40924, + 11949, 76, 377, 79, 1091, 258, 343, 4080, 13752, 70, 27515, 265, 1273, 85, 89, + 44970, 71, 708, 582, 637, 1395, 1662, 83, 27457, 5308, 8942, 1498, 360, 89, + 10002, 67, 4317, 269, 587, 86, 13616, 31268, 73873, 299, 80, 73, 14088, 455, + 8480, 1609, 372, 53060, 48380, 17008, 48380, 35401, 40078, 45854, 652, 80, 21796, + 1551, 89, 9832, 87, 74, 1029, 307, 61057, 85, 12498, 648, 541, 267, 89, 70, + 24752, 80, 18577, 43210, 74, 370, 80, 830, 4341, 87, 8890, 4890, 31218, 641, + 82, 6331, 85, 80, 14170, 71, 325, 48186, 16400, 68054, 346, 9991, 78930, 939, + 1310, 4239, 93000, 73, 2067, 86, 11032, 43210, 8311, 80, 38768, 86, 1494, 2312, + 92216, 41670, 13028, 48802, 89, 7369, 69, 2005, 73, 7783, 953, 85, 49450, 73, + 1029, 402, 84274, 1026, 76, 89, 85, 664, 8088, 75163, 376, 20127, 80, 10470, + 406, 13183, 10616, 36722, 511, 1394, 87, 60933, 1031, 76, 1540, 5308, 66, 939, + 634, 4991, 29682, 351, 332, 4086, 19639, 15618, 16017, 49450, 73, 87, 20191, + 10176, 80, 32204, 32589, 1116, 355, 36238, 10470, 74, 2312, 76, 60013, 375, + 71, 55506, 87, 8671, 89, 11089, 8910, 74, 9326, 3933, 23048, 86, 441, 718, 7047, + 75, 398, 455, 73, 85, 20211, 343, 1565, 2042, 14088, 69516, 1517, 82, 5697, + 80, 2554, 88, 2005, 65736, 17043, 677, 19747, 28927, 69, 332, 70, 17207, 80, + 74, 1729, 2249, 82, 911, 71, 983, 664, 80, 301, 450, 83, 2465, 3423, 307, 84, + 4317, 80, 88, 275, 3172, 1800, 69, 352, 762, 36255, 953, 3696, 3919, 10470, + 64087, 46640, 73, 2329, 81, 8050, 13009, 20191, 71, 73, 365, 23797, 14088, 78604, + 3055, 8783, 85, 12953, 53060, 398, 79, 391, 22619, 540, 6043, 73, 85, 664, 77, + 2235, 86, 344, 79, 708, 89, 406, 73, 360, 2933, 41287, 9092, 88, 2026, 344, + 89, 56099, 2201, 1073, 359, 65, 35401, 86, 1897, 78989, 2130, 89, 43312, 67702, + 3906, 86611, 89, 894, 17692, 85, 30407, 86, 18854, 74, 710, 65, 93000, 13536, + 588, 764, 69722, 11949, 22650, 86, 501, 24244, 637, 418, 5298, 89, 450, 88593, + 919, 10036, 4341, 8480, 4341, 9907, 418, 87, 86, 80, 16276, 5104, 18854, 11328, + 14878, 80, 269, 5298, 84, 80, 68412, 89, 4777, 70114, 575, 67, 4355, 351, 32868, + 1146, 4341, 1331, 70, 5298, 77, 1498, 87, 2302, 18577, 998, 87, 664, 85, 1347, + 1729, 9700, 708, 11949, 73, 2302, 74, 89, 73, 906, 1220, 21620, 80, 69, 40078, + 5972, 372, 16916, 1215, 93797, 22016, 78772, 5289, 65, 7369, 12328, 9092, 67, + 2302, 70, 7387, 80, 81706, 444, 647, 3418, 80, 73, 1073, 28679, 77, 4223, 894, + 2701, 70, 6511, 10200, 4511, 77, 333, 27455, 509, 3368, 74, 17008, 93000, 18571, + 19747, 87, 85, 5972, 84, 70014, 24752, 876, 28927, 18287, 318, 27515, 14029, + 80, 30407, 1394, 82, 441, 266, 76, 45854, 1498, 582, 77, 24752, 74, 17134, 66, + 5981, 67, 1100, 2562, 325, 86, 295, 55655, 89, 1331, 85, 1302, 80, 1216, 32868, + 324, 35940, 1331, 9907, 2562, 9081, 4336, 83256, 46341, 303, 85, 2332, 70, 55418, + 1030, 370, 73, 3368, 5241, 263, 29238, 8998, 23866, 2641, 80, 764, 5289, 15306, + 1662, 28679, 77827, 8303, 20824, 35401, 23361, 9907, 85, 354, 86, 48738, 6993, + 80, 1026, 1146, 1910, 12498, 80, 8212, 12825, 65, 391, 22119, 87, 22873, 8088, + 24244, 915, 81, 11927, 6511, 307, 509, 52926, 1546, 4239, 47689, 27093, 28927, + 24037, 1546, 81, 4223, 1320, 5673, 283, 69, 418, 87, 71, 455, 85, 4080, 80, + 16618, 302, 911, 75, 2203, 48380, 80, 998, 67466, 14029, 1302, 72, 318, 51788, + 84, 1813, 89, 31764, 32204, 1302, 3601, 44970, 80, 22119, 71, 29684, 4942, 1094, + 318, 31722, 15008, 427, 79, 3178, 25506, 84, 355, 9092, 81, 4991, 55427, 73, + 7005, 7141, 49595, 3427, 87, 332, 86, 80, 831, 71, 9701, 76, 44183, 295, 277, + 363, 64087, 327, 53016, 85, 392, 24672, 8892, 3223, 83, 361, 89573, 69, 1029, + 82, 12940, 352, 6200, 73, 42848, 5469, 31695, 275, 64003, 581, 2739, 710, 360, + 40078, 581, 21659, 29238, 29700, 74, 77827, 12591, 79, 309, 587, 728, 9326, + 8934, 53835, 325, 3059, 450, 83, 80, 8783, 402, 79, 10616, 363, 80, 11949, 1631, + 352, 383, 21206, 1546, 35531, 74, 42818, 1029, 43505, 1791, 6511, 70678, 4922, + 564, 56749, 89, 11328, 404, 3306, 76, 89, 657, 462, 447, 79, 447, 87, 86611, + 8088, 71, 10036, 675, 27093, 84, 49450, 6331, 80, 69, 89, 70, 10616, 352, 354, + 324, 89, 18577, 1302, 337, 2303, 36722, 1517, 15930, 69, 8510, 18854, 67, 50798, + 1804, 77, 24752, 18571, 15720, 675, 1546, 71, 4912, 346, 28990, 24752, 544, + 36238, 87, 2034, 6583, 276, 69, 73, 88, 303, 69, 5701, 2642, 25947, 377, 511, + 77, 9117, 89, 3306, 65, 60933, 73, 31268, 39342, 511, 5701, 84, 360, 29229, + 72, 324, 12825, 4937, 65, 2580, 16618, 87, 71, 25013, 2642, 398, 19072, 50798, + 10056, 86, 85, 10056, 6292, 72, 540, 83, 3906, 266, 13009, 634, 3172, 31881, + 347, 8480, 46224, 4484, 8783, 2130, 74, 911, 9988, 67, 1331, 1910, 22016, 29238, + 4239, 47689, 38844, 22650, 89, 544, 22619, 71, 4680, 44067, 822, 88, 85, 2438, + 86, 3647, 4680, 39911, 11328, 27093, 69, 318, 318, 1994, 7141, 876, 7513, 21659, + 2319, 2076, 11328, 2298, 4223, 784, 85, 89, 80, 11823, 418, 1065, 300, 55233, + 441, 80, 74, 87, 24752, 44970, 67039, 89, 81, 73, 267, 23913, 1007, 9700, 65, + 27093, 28927, 77827, 3576, 39172, 1146, 953, 75721, 86, 455, 87, 86, 18854, + 80, 8318, 540, 5418, 327, 321, 21796, 24752, 1885, 7737, 324, 66, 566, 84, 1739, + 1739, 22780, 16276, 84, 80, 285, 894, 86, 830, 42848, 418, 23913, 28073, 20811, + 86, 344, 3647, 86, 406, 2332, 70, 8910, 1551, 2308, 339, 14088, 69, 7488, 72, + 12583, 316, 15618, 65, 20926, 11032, 71, 12412, 50536, 44067, 13855, 728, 278, + 876, 581, 23913, 2727, 44970, 447, 8480, 540, 7047, 5132, 4341, 333, 27093, + 1320, 72801, 31721, 78930, 462, 309, 23576, 19639, 59270, 32962, 437, 648, 1247, + 1919, 9902, 1568, 4484, 983, 24752, 87, 418, 540, 89, 80, 34175, 14878, 12940, + 2201, 4748, 375, 65, 1919, 65023, 564, 43518, 1100, 89, 85, 10776, 8839, 72540, + 12498, 4215, 713, 36726, 73, 87, 10616, 77, 10616, 45015, 359, 65, 5298, 343, + 92216, 87, 85, 344, 86, 22650, 953, 23913, 70, 31764, 64305, 75, 876, 70, 31695, + 318, 876, 85, 28679, 31268, 52971, 78498, 12940, 73873, 6194, 77, 3601, 84, + 2249, 75, 1551, 88, 38977, 3696, 936, 80, 70, 20975, 5010, 9081, 80, 764, 12184, + 333, 637, 18854, 87, 7521, 3919, 634, 78, 1540, 89, 1662, 83, 39911, 8088, 4317, + 329, 70, 3527, 40924, 74, 894, 1065, 18577, 74, 2641, 80, 336, 83, 1662, 59524, + 5104, 1517, 70, 2554, 4168, 85, 39911, 73, 796, 37406, 89, 11099, 31172, 80, + 17134, 84, 2005, 71, 28774, 2034, 15809, 1216, 69, 26397, 1100, 588, 301, 4777, + 831, 2985, 1029, 94908, 9081, 1609, 8050, 73873, 20926, 275, 9832, 64519, 86611, + 2933, 73, 1394, 7387, 818, 70678, 89, 1739, 21486, 73, 540, 86, 2067, 3306, + 16276, 7369, 43813, 4341, 641, 588, 35531, 74, 6358, 84, 427, 40078, 94346, + 8207, 10169, 89, 31695, 13223, 333, 355, 9110, 81656, 399, 79, 7907, 4021, 33721, + 8942, 52971, 1394, 80, 325, 85, 796, 14736, 637, 6098, 40101, 74, 383, 441, + 80, 17008, 437, 85, 22619, 80, 398, 87, 80, 73, 88, 5544, 8892, 24244, 652, + 89, 23048, 80, 6200, 65, 3841, 43189, 1029, 9991, 303, 1026, 81, 302, 69, 418, + 87, 566, 784, 9902, 370, 76, 32589, 1302, 48380, 501, 2192, 80, 60013, 2034, + 1065, 41922, 3251, 9490, 1320, 69, 451, 321, 1336, 361, 29700, 16400, 77, 8671, + 5544, 657, 81, 73, 911, 2312, 454, 1347, 13855, 2562, 80, 1310, 68054, 2642, + 4942, 8207, 86, 331, 67702, 6034, 92216, 14170, 58781, 1609, 19239, 74, 50798, + 377, 1310, 89, 564, 8050, 1919, 2857, 6043, 39911, 77, 9117, 1347, 76, 751, + 41129, 89, 8212, 68054, 35531, 70678, 2857, 80, 9902, 67800, 89, 406, 17851, + 89, 3013, 72, 360, 24752, 9902, 4341, 88, 830, 4385, 72, 1094, 1108, 437, 7217, + 70, 3933, 88, 80, 2580, 9509, 73, 29700, 641, 894, 5964, 89, 85, 894, 540, 8783, + 9490, 67039, 363, 81, 6511, 83, 80, 20811, 392, 3315, 2303, 2642, 70, 3306, + 2076, 76, 89, 30142, 38041, 4937, 11253, 73, 4086, 581, 8376, 982, 402, 2319, + 20306, 23457, 89, 44490, 675, 383, 22619, 80, 70, 80, 69, 3415, 68, 8635, 31172, + 86, 11032, 447, 67, 4835, 65, 26397, 1962, 65, 5469, 343, 1029, 66, 1677, 87, + 318, 86, 85, 1072, 39152, 1302, 1336, 80, 12591, 321, 7521, 42488, 87, 1546, + 17082, 17851, 418, 5972, 23361, 69, 38253, 66, 60013, 680, 73, 34485, 989, 18244, + 87, 398, 5298, 327, 1072, 3145, 355, 3178, 1739, 532, 20926, 81964, 22619, 32478, + 73, 30275, 392, 10616, 87, 73, 86, 89, 21206, 80, 943, 80, 49938, 3576, 8376, + 7717, 10784, 1105, 5481, 1247, 494, 86746, 9996, 55767, 17704, 75406, 28990, + 77, 62604, 5481, 13028, 710, 1273, 53835, 77, 39911, 6375, 15618, 919, 307, + 773, 85, 24752, 87, 5104, 454, 89, 35531, 8934, 13855, 451, 71, 2642, 347, 3178, + 261, 87, 371, 2727, 402, 67020, 8462, 1100, 85, 360, 376, 40924, 84, 20127, + 28679, 26047, 39152, 23866, 10616, 44183, 28927, 32589, 5104, 69, 1146, 45015, + 351, 13183, 708, 1247, 85, 1310, 98714, 2191, 69, 12407, 359, 544, 80, 74, 6870, + 84, 89835, 70, 8890, 352, 74901, 4385, 80, 3306, 86, 347, 86, 18244, 98493, + 3601, 316, 89, 501, 22650, 1273, 76, 24990, 511, 911, 77, 5544, 406, 32204, + 10616, 70, 376, 4744, 85, 3576, 84, 392, 71, 3178, 81, 20127, 3893, 80, 70, + 344, 87020, 303, 65, 66362, 41933, 45015, 71, 9110, 86, 383, 764, 75406, 2496, + 23913, 283, 80, 50139, 80, 56414, 581, 1302, 24990, 80, 7521, 77, 8480, 391, + 3315, 23866, 24752, 1347, 67665, 73, 18442, 85, 501, 74, 2641, 48836, 19260, + 588, 1662, 77827, 9451, 86, 540, 2319, 31881, 73, 6634, 564, 7513, 511, 1073, + 276, 79, 2933, 80, 89, 4171, 16276, 73, 3841, 89, 4937, 80, 7521, 566, 9794, + 6194, 80, 73, 10776, 14694, 84, 710, 10470, 67, 1302, 360, 73, 27093, 9490, + 1146, 36068, 9490, 76, 8671, 70, 60083, 333, 3055, 10333, 797, 12883, 73, 89, + 3013, 87, 21557, 80, 18577, 70, 1609, 14088, 75, 1791, 283, 73, 519, 11032, + 1336, 723, 51998, 20127, 53289, 88, 402, 677, 418, 588, 85, 325, 12498, 96518, + 4239, 6358, 4680, 87, 6133, 73, 86, 8376, 774, 303, 1546, 21807, 8637, 4680, + 450, 447, 20127, 86, 47154, 11089, 74, 11949, 5979, 316, 73, 4086, 28774, 70, + 21796, 80, 77152, 3178, 9902, 7369, 83, 73, 2562, 26738, 86, 11160, 455, 93797, + 8839, 1923, 85, 4381, 80, 5964, 347, 869, 85, 20926, 85, 1394, 343, 86, 29700, + 6194, 21206, 267, 1031, 14736, 67, 906, 80, 49755, 20127, 604, 1347, 277, 24752, + 74, 4239, 454, 80, 1498, 73, 9112, 35401, 1302, 73873, 8910, 2026, 4086, 87, + 86, 4341, 36238, 73, 89, 337, 266, 894, 20975, 3013, 89, 80, 61057, 46640, 318, + 1546, 32962, 822, 261, 952, 283, 88, 70828, 85, 7047, 93797, 5531, 1146, 1656, + 89, 70, 89835, 2034, 641, 4341, 3423, 85, 1031, 10616, 70, 713, 66, 4744, 94356, + 28400, 85, 6292, 80, 295, 7737, 81, 45015, 1073, 86, 455, 2319, 1609, 65, 2329, + 552, 7513, 1791, 69, 73, 1216, 13423, 87, 70, 19456, 1336, 352, 13009, 89, 5010, + 78930, 86, 13584, 604, 41196, 47154, 21659, 82969, 5893, 21796, 89, 80, 1494, + 31019, 87, 19639, 87, 73, 16499, 73, 10470, 894, 2580, 20211, 454, 9117, 324, + 80, 73, 2005, 337, 5308, 258, 76, 89, 3933, 8207, 80, 2445, 77, 80, 8318, 352, + 80, 652, 11328, 35531, 80, 7369, 8671, 6194, 89, 275, 4168, 69, 89, 398, 343, + 75, 4341, 752, 301, 94055, 10462, 46640, 267, 51622, 360, 96518, 22119, 73, + 86, 677, 2329, 12940, 86, 12806, 7783, 49450, 18287, 74, 1631, 352, 86, 6194, + 3340, 75, 10616, 302, 73, 376, 31172, 9832, 2642, 22016, 5289, 31172, 56266, + 14170, 81, 1919, 87, 1026, 80, 2642, 267, 1494, 447, 85, 1813, 3601, 80, 8910, + 76, 1072, 64, 37135, 391, 10776, 1073, 21301, 45228, 85, 16400, 74, 939, 70, + 1100, 96518, 299, 80, 441, 5481, 94908, 18287, 939, 78, 15769, 79, 5308, 14878, + 86, 8318, 23913, 398, 327, 5641, 501, 318, 28679, 17851, 2034, 5531, 564, 40078, + 49755, 28679, 71, 1551, 87, 35478, 9902, 302, 21206, 5010, 75, 8658, 20824, + 76, 8671, 332, 15720, 418, 7047, 79, 1273, 3972, 9700, 67702, 983, 7387, 86, + 89, 81, 73, 36722, 73, 47689, 70, 713, 276, 81, 2465, 20990, 45015, 831, 2303, + 764, 267, 511, 31695, 21888, 316, 11949, 494, 87, 25105, 82969, 4223, 352, 65, + 2880, 9902, 87, 20824, 5308, 796, 96518, 10333, 71, 398, 86, 4355, 3524, 50798, + 23913, 89, 699, 2329, 47342, 10196, 266, 67, 402, 40078, 2249, 34946, 17043, + 80, 372, 66, 634, 7501, 41796, 87, 74, 64003, 53835, 70, 784, 49993, 3415, 327, + 77, 10036, 441, 67665, 81, 273, 501, 86, 13536, 86, 4168, 14170, 45015, 24244, + 2319, 21659, 1310, 88, 80, 4239, 70, 1331, 291, 89, 67039, 73, 51924, 14878, + 2784, 42528, 7141, 76, 16017, 71, 12328, 81, 21486, 79, 1031, 392, 4484, 588, + 58305, 901, 365, 329, 2249, 657, 1804, 447, 45015, 23866, 1729, 67, 4991, 1498, + 13536, 3978, 2005, 1336, 339, 85, 43210, 98714, 343, 92791, 74, 677, 76, 15618, + 876, 8462, 911, 14029, 9907, 70, 346, 3524, 89, 59064, 822, 40078, 2392, 25457, + 402, 9700, 81, 2554, 14966, 86, 16400, 89, 11328, 69, 4171, 79, 22537, 8480, + 18571, 44508, 5192, 710, 12812, 830, 501, 17704, 86, 12498, 2365, 19747, 1031, + 77, 38844, 80, 4080, 88, 8783, 2319, 1310, 70, 73, 57071, 1551, 94375, 89, 28990, + 89, 4175, 9081, 1056, 23866, 10784, 5192, 87, 4922, 64, 1658, 27093, 2034, 52877, + 27515, 4086, 81, 40617, 83079, 45015, 70, 80, 19239, 8088, 4942, 2933, 9832, + 359, 76, 19383, 9832, 86, 5192, 817, 73640, 45015, 38977, 324, 6713, 71, 1302, + 677, 35989, 9832, 266, 92110, 14736, 14029, 8892, 73, 87, 71, 94908, 406, 49463, + 33721, 300, 70, 52097, 302, 12841, 2249, 8088, 85, 10470, 40078, 71, 2642, 74, + 67039, 13997, 648, 39911, 19555, 26738, 84, 580, 18854, 80, 82999, 333, 93000, + 2661, 540, 85, 6151, 677, 5746, 19456, 73, 85, 4193, 14606, 75, 303, 94468, + 85, 2676, 544, 81, 4922, 4239, 79, 450, 1029, 14694, 1273, 1494, 1072, 11823, + 91311, 80, 4942, 675, 675, 10462, 29229, 18854, 65019, 85, 78498, 509, 383, + 336, 41796, 20990, 373, 12825, 80, 1273, 76, 581, 267, 80, 47689, 5254, 87, + 14485, 53060, 9700, 352, 94817, 1658, 710, 96518, 764, 80, 82999, 88, 321, 1065, + 1072, 14029, 38844, 7521, 69, 50798, 16373, 73, 70, 9996, 77, 3368, 307, 5972, + 351, 1003, 88, 936, 351, 372, 391, 34175, 303, 1273, 398, 75, 447, 21963, 63643, + 5683, 4355, 1662, 23576, 93000, 34175, 89, 8510, 332, 74, 8462, 77, 351, 87, + 1609, 87, 86, 1725, 65, 511, 8207, 7387, 509, 73, 31695, 85, 1031, 80, 93797, + 89, 9832, 73, 26929, 13028, 23866, 25658, 87, 72231, 1073, 75, 13045, 579, 5308, + 71, 73873, 22119, 77, 752, 78, 762, 68054, 365, 683, 27780, 31019, 1336, 67, + 2332, 2739, 86, 859, 14878, 2076, 710, 9701, 5981, 2465, 87, 365, 9258, 277, + 58152, 321, 404, 51622, 300, 14029, 40078, 5289, 1302, 69, 73, 7141, 41796, + 1468, 7141, 677, 87, 2192, 318, 94023, 66, 664, 80, 604, 18854, 10630, 3716, + 455, 540, 36851, 88, 1073, 65, 4239, 15809, 26804, 764, 1394, 4991, 80, 88, + 73, 7864, 385, 82999, 268, 18287, 60933, 86, 1138, 67, 3746, 21297, 74, 456, + 4835, 820, 19555, 80, 2203, 22016, 6733, 62355, 76, 1216, 5972, 11305, 675, + 41796, 4239, 911, 332, 9832, 88, 347, 10497, 71, 4171, 1897, 2067, 49755, 20127, + 33863, 17528, 1286, 21659, 3423, 9117, 1073, 14878, 81, 1320, 64, 1540, 1030, + 327, 7513, 7387, 69, 89, 4378, 29684, 4937, 23361, 6091, 85, 3315, 31935, 6292, + 1813, 19456, 79, 9117, 796, 26929, 86, 9081, 4341, 68, 1302, 2302, 6194, 48380, + 86, 1568, 1919, 359, 337, 19747, 6200, 375, 65, 15618, 86, 564, 69, 1026, 73, + 87, 8674, 773, 19639, 86, 455, 2739, 1273, 67, 402, 2439, 90625, 2438, 66565, + 363, 579, 318, 47154, 2329, 372, 12825, 76, 1751, 76, 906, 34175, 69, 95233, + 1216, 76, 2302, 1336, 72, 592, 9700, 73873, 441, 68991, 13536, 66565, 4575, + 406, 89, 588, 80, 1073, 77, 3919, 93000, 5104, 1609, 3906, 87, 14694, 817, 33686, + 7513, 325, 25105, 8311, 87, 25457, 5104, 73, 89, 53060, 894, 70, 16276, 604, + 21659, 710, 2445, 69, 41460, 7910, 5104, 28679, 266, 38041, 6375, 26929, 71, + 14029, 45015, 71, 50813, 773, 87, 321, 85, 89, 275, 54964, 73, 38361, 344, 6043, + 4843, 939, 73, 16400, 1320, 23960, 73, 52926, 19654, 3251, 73, 9117, 25105, + 80, 76166, 637, 76, 38359, 398, 21659, 55282, 81, 18565, 1631, 73, 354, 1302, + 14606, 876, 44508, 9112, 74, 80, 73, 87, 80, 392, 31695, 291, 2067, 66, 8462, + 17134, 84, 5308, 1031, 79, 540, 316, 831, 80, 38361, 29238, 3145, 73873, 31695, + 581, 8747, 316, 339, 89, 784, 1100, 74, 75406, 1662, 81, 377, 81, 2174, 69, + 647, 7453, 73, 85, 27455, 80, 385, 89, 398, 1142, 75, 2554, 3418, 98667, 53289, + 73, 22619, 74, 8311, 1273, 54681, 1302, 2054, 10743, 74, 87, 70, 8998, 1394, + 28679, 57208, 20990, 983, 69, 3315, 53478, 266, 66, 86427, 80, 332, 77, 24752, + 65, 32345, 4151, 332, 87, 3306, 4681, 50798, 7501, 80, 13423, 28990, 74, 637, + 73873, 365, 86, 2249, 89, 85, 412, 86, 1072, 84, 3524, 27455, 4511, 10910, 21796, + 80, 33721, 588, 40429, 3576, 309, 74, 5289, 48872, 1494, 78498, 55506, 87, 35531, + 5972, 46341, 12825, 21620, 6511, 80, 38306, 17378, 354, 80, 69, 73, 85, 8587, + 80, 406, 747, 337, 51924, 85, 3423, 7047, 89, 27515, 1026, 85, 1029, 83, 641, + 77, 5544, 45015, 437, 13652, 764, 447, 351, 1103, 6733, 540, 80, 377, 544, 540, + 74, 70391, 10776, 87, 5308, 939, 87, 339, 3178, 1791, 1551, 80574, 1394, 75, + 12407, 60435, 80, 677, 81, 15720, 50139, 3919, 78, 581, 80, 89, 73, 581, 19072, + 80, 74, 80, 73, 32868, 69, 652, 376, 582, 354, 73, 361, 785, 21963, 87, 321, + 713, 52926, 73, 18854, 83079, 1919, 501, 3251, 92216, 450, 4942, 74, 544, 4285, + 1055, 392, 27515, 1546, 82, 2302, 354, 87, 80, 1910, 86, 10776, 87, 5230, 53835, + 71, 31695, 337, 4912, 1517, 69858, 392, 3906, 71, 2329, 544, 620, 80, 47689, + 5241, 80, 73, 86, 9112, 72, 454, 12825, 77468, 18571, 1273, 66, 71690, 15809, + 344, 6331, 372, 28990, 2933, 88, 7783, 80, 25604, 86, 85, 8671, 1073, 9490, + 1729, 88, 354, 17704, 73, 38041, 1031, 2143, 49712, 73, 634, 76, 36463, 652, + 302, 83, 73, 87, 10616, 15618, 377, 88, 73, 17247, 283, 4484, 1029, 2005, 1910, + 2034, 31268, 668, 73, 48372, 5010, 376, 74, 14878, 81, 26397, 86, 675, 8783, + 2642, 70, 80, 343, 80, 4777, 295, 13584, 86, 5964, 7141, 71, 8458, 2303, 8212, + 94908, 5308, 71, 13423, 85, 89, 3306, 848, 1072, 344, 67665, 2329, 87, 450, + 2868, 1073, 74, 17156, 80, 4223, 89835, 89, 6634, 21046, 73, 14244, 6200, 89, + 14694, 263, 79, 1031, 10776, 65019, 1551, 406, 76, 6358, 8088, 318, 71, 1804, + 8883, 10910, 23001, 24012, 80, 73, 95398, 2034, 3906, 80, 12341, 14606, 87, + 391, 73, 2329, 485, 75406, 24990, 46640, 64087, 754, 822, 80, 70, 376, 269, + 87, 27515, 86, 5308, 50139, 1395, 402, 3013, 86, 1026, 16400, 67702, 1764, 4922, + 19644, 65, 4341, 8587, 89835, 511, 91485, 80, 69, 73, 19456, 73, 8462, 728, + 1094, 31268, 87, 71, 5981, 89, 80, 372, 9700, 26397, 73, 21620, 51622, 1026, + 4223, 87, 4175, 2332, 76, 17912, 69, 10176, 80, 38359, 2727, 34175, 80, 9991, + 6414, 19747, 333, 8255, 352, 418, 677, 60933, 346, 70391, 80, 9112, 351, 89, + 540, 267, 49463, 1551, 2562, 4341, 3893, 501, 73, 1658, 13045, 285, 69, 754, + 80, 5701, 71, 39911, 81, 73, 953, 83214, 8311, 73, 19555, 86, 9112, 392, 40078, + 3251, 52971, 87, 28679, 67, 2438, 80, 285, 7521, 50798, 383, 22119, 604, 80, + 7387, 339, 28927, 41817, 28990, 10516, 1658, 2302, 91650, 26618, 1729, 3013, + 641, 4991, 73, 2312, 12940, 24620, 36068, 51622, 73, 6634, 29224, 372, 88, 11949, + 295, 20211, 17134, 5298, 87, 437, 402, 1347, 939, 63643, 20926, 89, 7141, 80, + 1273, 36825, 8783, 73, 4912, 5972, 85, 33863, 869, 303, 2042, 74, 1072, 455, + 664, 1394, 19072, 8385, 86, 1631, 81, 35063, 73, 1609, 88, 10616, 67, 64087, + 8480, 292, 41196, 31019, 316, 70, 939, 45015, 309, 89, 18577, 2554, 73, 1897, + 41129, 333, 71, 3893, 18442, 24752, 29811, 65, 33863, 3696, 18571, 7129, 80, + 2303, 668, 911, 87, 10056, 95233, 10910, 1320, 18577, 16276, 24752, 5298, 86, + 89, 2042, 16731, 38359, 6870, 80, 20824, 1897, 80, 1347, 39911, 1517, 78498, + 796, 73, 21340, 86, 46224, 278, 1029, 82, 3251, 23866, 67702, 3251, 4659, 30506, + 1394, 398, 754, 80, 70, 1517, 82, 2933, 89, 43441, 5418, 67175, 1729, 28700, + 63848, 316, 273, 9117, 2739, 73, 1029, 72801, 3633, 73, 85, 8671, 316, 17156, + 12328, 12682, 4171, 42294, 5192, 73, 1216, 32204, 87, 70, 399, 21963, 89, 360, + 14736, 16400, 1813, 21206, 74, 427, 1494, 5104, 346, 444, 84, 5241, 23866, 2067, + 79, 10470, 11328, 74, 2465, 70, 73, 87, 2933, 88, 3696, 69, 9081, 17134, 1725, + 8212, 1031, 19866, 77, 3576, 7908, 31803, 773, 43686, 1498, 1658, 17156, 511, + 22619, 86, 7141, 2042, 77, 6194, 85, 6200, 1658, 5469, 2562, 84, 23361, 65, + 5104, 450, 67, 1725, 73, 11089, 67039, 343, 5469, 49755, 25658, 2465, 10470, + 73, 4838, 23866, 74, 66867, 911, 4341, 1658, 27106, 352, 41796, 91998, 16514, + 1031, 65, 8536, 80, 969, 869, 38181, 12116, 87, 73, 1919, 359, 85, 5308, 75, + 29700, 70, 80, 2026, 2302, 69, 10784, 89, 1725, 86, 4991, 79, 27515, 71, 7369, + 69, 418, 62585, 65, 376, 77, 10910, 266, 8385, 575, 29700, 1517, 566, 11748, + 85, 42818, 4484, 2580, 69, 1494, 8671, 65019, 9258, 316, 1791, 9081, 575, 87, + 1026, 81, 620, 589, 80, 332, 7387, 24672, 73, 38361, 89, 70, 876, 89, 4239, + 21888, 83448, 45775, 85, 455, 5241, 67, 796, 42908, 64123, 76, 1073, 79, 564, + 18894, 2067, 1725, 65, 80, 24244, 375, 45015, 1331, 953, 13009, 89, 32962, 7501, + 70678, 2329, 77, 39911, 73, 21807, 31881, 648, 57932, 70, 292, 77, 80, 2438, + 29238, 10616, 22524, 285, 26397, 74, 80, 1302, 911, 1195, 86, 89, 4239, 79, + 2319, 89, 2303, 37406, 27476, 73, 13536, 80, 2176, 1944, 441, 31141, 69, 1336, + 15802, 73822, 21888, 1962, 91299, 32204, 376, 53060, 447, 5298, 85, 634, 359, + 752, 7521, 3919, 87, 1910, 2445, 49450, 73, 27093, 7737, 2689, 8088, 22016, + 86, 336, 16094, 73, 728, 18244, 87, 10056, 18287, 1302, 69, 587, 81, 73, 13075, + 67665, 3427, 675, 80723, 80, 52926, 73, 1897, 20990, 301, 70, 3906, 80, 3696, + 73, 566, 587, 8892, 80, 427, 29443, 70118, 69, 73, 87, 85, 637, 77, 3601, 37406, + 86, 9188, 2933, 80, 73, 398, 1919, 65, 8587, 316, 3423, 85, 20191, 6194, 346, + 1100, 12821, 450, 77, 455, 82969, 75406, 4080, 32280, 43776, 49755, 2580, 76, + 299, 23770, 5701, 5241, 65, 1031, 85, 6511, 675, 24672, 77, 80, 1662, 8587, + 1065, 4223, 566, 1030, 80, 2026, 82, 713, 24752, 455, 46246, 540, 365, 93797, + 41101, 17043, 460, 71, 91849, 4991, 327, 73, 87, 1336, 2727, 10616, 18244, 31019, + 316, 402, 42818, 7129, 587, 89, 31764, 7513, 3306, 21206, 383, 73, 85, 501, + 80, 69, 3251, 1546, 51924, 80, 1100, 2042, 46051, 86, 49595, 89, 299, 321, 38639, + 24752, 66, 1729, 939, 1302, 402, 76, 9490, 60933, 4843, 67, 447, 84, 455, 31268, + 22016, 80, 269, 97523, 455, 491, 77, 31881, 3368, 9701, 915, 65, 939, 8050, + 67, 3368, 81, 2727, 41129, 309, 944, 76, 876, 2026, 83, 33863, 2857, 2933, 71, + 2852, 2005, 4168, 73, 87, 86, 34855, 18854, 427, 75, 1662, 41625, 1897, 3035, + 43505, 3696, 70828, 73, 3415, 587, 77, 46390, 455, 50139, 23361, 70, 2034, 3180, + 309, 66, 88183, 347, 1791, 1126, 4341, 73, 18442, 85, 89, 5192, 1336, 50226, + 35531, 2465, 75, 5192, 372, 21963, 2642, 3251, 6870, 1551, 7501, 94055, 11328, + 1897, 434, 69, 89, 73, 1551, 80, 936, 73, 346, 1065, 73, 89, 1100, 2642, 3675, + 73, 1395, 352, 73, 4341, 45775, 347, 76, 728, 80, 906, 2319, 88, 327, 1764, + 351, 1055, 283, 88, 89835, 2026, 85, 89, 59125, 876, 76, 12940, 6780, 53146, + 71, 360, 65, 41670, 53289, 1751, 858, 15444, 339, 710, 87, 437, 89, 73, 11253, + 2329, 76, 15809, 327, 80, 2026, 764, 2312, 4991, 18244, 4317, 581, 77, 1331, + 65, 8910, 92216, 59448, 86, 3035, 73, 2857, 77, 3178, 664, 87, 46341, 70678, + 3647, 85, 42908, 11032, 587, 79, 346, 1411, 6511, 67702, 4341, 80, 78, 336, + 38361, 70678, 80, 7521, 80, 50139, 89, 27093, 604, 28990, 49461, 69, 3696, 69, + 3059, 1609, 44528, 12407, 9700, 87, 953, 7947, 87, 8510, 6511, 73, 70404, 65, + 2580, 80, 4086, 16115, 23866, 80, 1302, 29200, 73, 752, 87, 327, 86, 509, 86, + 3933, 321, 89, 454, 2465, 2249, 4680, 86, 1320, 31764, 31695, 73, 14736, 7783, + 87, 29684, 2562, 80, 70, 80, 10616, 7737, 377, 566, 329, 19285, 3906, 41922, + 1498, 35531, 89, 16017, 6634, 4511, 2465, 19239, 82969, 8839, 77, 564, 69, 1031, + 267, 73640, 87, 28679, 24990, 983, 1065, 7369, 89, 80, 344, 8462, 84, 648, 65, + 3415, 285, 44490, 75406, 7488, 14736, 67, 337, 16276, 718, 29684, 89, 3316, + 73, 4010, 36068, 4341, 25299, 708, 71, 4937, 69, 73, 22619, 2465, 876, 8587, + 74, 49755, 80, 1725, 86, 7501, 1026, 80, 4341, 85, 1029, 6885, 266, 1551, 564, + 87, 8510, 80, 8207, 2034, 2706, 50039, 41922, 80, 12591, 321, 63643, 85, 540, + 86, 89, 598, 79, 5697, 762, 23866, 89, 355, 14029, 19456, 17063, 20372, 764, + 31764, 73873, 27495, 648, 575, 81, 28927, 74, 2641, 8207, 89, 73, 56871, 1546, + 392, 67, 5981, 80, 5298, 4239, 35531, 89, 81, 360, 86, 7911, 511, 8998, 44183, + 266, 32204, 52719, 418, 86, 1725, 87, 39385, 73, 376, 9117, 347, 79, 1813, 869, + 74, 939, 87, 9779, 657, 5308, 82, 1609, 80, 15720, 73, 3415, 1065, 20811, 906, + 74, 87, 797, 69, 32589, 1302, 42051, 333, 27455, 1336, 13351, 73, 36825, 343, + 6780, 80, 292, 31019, 267, 3601, 5289, 1631, 566, 87, 35401, 796, 70, 8910, + 15809, 50798, 1517, 41796, 3178, 81, 8671, 5010, 2344, 86, 455, 30863, 318, + 3059, 11823, 73, 8376, 62220, 94908, 26738, 86, 4835, 1517, 87, 53016, 17063, + 85, 6200, 5981, 77, 14468, 1725, 10056, 8050, 94346, 2329, 73, 4175, 79, 24752, + 87, 5981, 2201, 447, 2642, 86, 9218, 20582, 77, 10776, 50139, 5346, 79, 361, + 1146, 1247, 29238, 1065, 26929, 97523, 89, 7369, 21620, 754, 68, 283, 50798, + 4744, 86, 346, 31422, 88, 352, 26474, 359, 16276, 71, 1302, 648, 1216, 89, 13752, + 604, 79, 911, 87, 84, 2826, 12825, 351, 876, 29443, 74, 6870, 80, 82969, 2154, + 65, 3696, 587, 40213, 12328, 588, 10784, 85, 2411, 9907, 6358, 85, 19747, 332, + 773, 80, 20553, 587, 76, 370, 547, 547, 4021, 79, 11032, 1725, 291, 77, 32204, + 54964, 370, 73370, 39911, 2438, 15618, 44183, 4223, 87, 2249, 7453, 89, 953, + 1347, 17704, 70, 7141, 88, 31278, 604, 67, 86682, 5721, 637, 27093, 50536, 911, + 12328, 86746, 9810, 73, 88, 69, 894, 7737, 53016, 73, 51622, 40375, 66353, 710, + 71, 4719, 1100, 24672, 32962, 21206, 61057, 764, 73, 50536, 2067, 5683, 47617, + 18244, 71, 4080, 1029, 17319, 1494, 16618, 17156, 80, 333, 363, 56871, 12328, + 75, 12591, 19239, 77, 19639, 89835, 5308, 5532, 73873, 31019, 361, 70, 32589, + 375, 5481, 344, 27515, 71, 5794, 47875, 385, 25299, 68151, 566, 14046, 44183, + 39911, 79, 391, 35478, 1347, 73, 3315, 21963, 74882, 89, 677, 1320, 351, 86, + 41796, 89, 63502, 87, 94908, 983, 51622, 17008, 8088, 5981, 77, 2034, 75, 1030, + 2701, 303, 70, 911, 17134, 89, 53835, 77, 1643, 64, 352, 83, 80, 74, 69057, + 9991, 16400, 31764, 73, 22619, 6331, 80, 22016, 87, 906, 604, 26397, 3368, 68, + 427, 3919, 73, 20824, 5192, 392, 80723, 88, 14736, 26474, 86, 85, 8212, 20191, + 80, 318, 5701, 39911, 8910, 18664, 86, 708, 71, 5104, 953, 80, 18442, 347, 11089, + 4341, 365, 6489, 80, 88, 9907, 2396, 70, 6358, 6331, 3172, 406, 4469, 3251, + 5010, 57504, 65, 80, 1320, 76, 1310, 37541, 953, 71, 7369, 278, 7488, 45015, + 798, 318, 15930, 29700, 57671, 343, 32962, 89, 23361, 45062, 391, 12940, 96295, + 35203, 85, 1804, 23001, 50536, 2332, 65, 35401, 6200, 71, 652, 13752, 4010, + 5701, 85, 80, 8207, 89, 4239, 89, 48380, 648, 28679, 6194, 43210, 29684, 2562, + 74, 89, 7369, 4080, 41922, 80, 418, 70, 375, 65, 402, 76, 1007, 454, 74, 1065, + 81, 2005, 71, 11823, 588, 80, 833, 24672, 331, 85, 1331, 9832, 81, 566, 652, + 80, 5981, 1273, 71, 447, 4341, 70, 1494, 267, 83, 73, 32547, 710, 4912, 8512, + 24620, 89, 9700, 6200, 302, 258, 18577, 14833, 501, 73, 1347, 18577, 1517, 17008, + 587, 1336, 6194, 13545, 69, 332, 406, 89, 540, 911, 20824, 998, 2933, 30403, + 68003, 62497, 797, 98418, 70014, 15618, 74, 14029, 89, 7521, 65, 7488, 5192, + 85, 1072, 30863, 5469, 1146, 391, 454, 85, 2629, 80, 2176, 89, 27278, 509, 88, + 30506, 84, 1739, 87, 788, 1517, 77, 2438, 4239, 9451, 648, 830, 86, 346, 587, + 89, 332, 6438, 93000, 26397, 32589, 3516, 31695, 363, 85, 12328, 74, 3315, 337, + 1609, 327, 876, 352, 613, 8826, 2642, 1739, 66, 44970, 73, 85, 22619, 85, 1331, + 76, 634, 29700, 1031, 3576, 88, 6043, 80, 395, 5469, 69, 347, 87, 710, 80, 74, + 70391, 467, 5241, 370, 637, 69, 339, 3524, 44490, 73, 441, 36107, 4484, 73, + 70, 2642, 82999, 87, 6885, 1609, 17319, 86, 32478, 80, 64087, 7513, 4168, 9701, + 351, 6375, 86, 21206, 5469, 355, 1031, 67, 4171, 7737, 4080, 9701, 1394, 2642, + 2034, 8536, 324, 87, 32589, 42888, 15809, 351, 30506, 13136, 62327, 73, 983, + 2042, 52097, 20898, 16618, 59524, 796, 376, 9996, 3013, 38984, 4171, 87, 86, + 9991, 85, 89, 2249, 1286, 22119, 540, 2465, 14088, 20127, 89, 7369, 6511, 80, + 42818, 28990, 65, 80, 566, 18571, 85, 588, 39911, 2005, 372, 97523, 38844, 20926, + 80, 18894, 64087, 14736, 1494, 86, 1494, 7521, 65, 5104, 73, 70, 27093, 84, + 564, 20926, 74, 6194, 385, 80, 3919, 764, 87, 80, 491, 53060, 830, 822, 45015, + 89, 3306, 290, 37406, 675, 84, 7488, 71, 1331, 30407, 1310, 1026, 1026, 5469, + 894, 3933, 88, 634, 87, 14736, 31881, 89, 73005, 363, 67, 4223, 3316, 75, 8510, + 80, 18287, 911, 70, 582, 3178, 80897, 3418, 87, 2329, 86, 31019, 17008, 2026, + 74, 14170, 3893, 86, 355, 11377, 4010, 5964, 74, 80, 89, 359, 1804, 44490, 73, + 11949, 26205, 343, 89, 25299, 4086, 38041, 1725, 86, 377, 86, 332, 939, 339, + 36068, 67039, 333, 2862, 6370, 13536, 3524, 263, 456, 18854, 65116, 74, 354, + 4080, 33721, 86, 1764, 359, 6194, 269, 450, 66, 3055, 6672, 73, 3368, 18499, + 85, 2857, 69, 73, 10630, 85, 564, 85, 1729, 5972, 80, 6634, 87183, 5308, 65, + 19747, 74, 1394, 77, 2933, 23361, 953, 2933, 85, 17008, 300, 73, 354, 8207, + 52360, 2303, 604, 10470, 73078, 1639, 17063, 89, 652, 18287, 391, 66, 564, 45015, + 86746, 5469, 8503, 87, 8050, 70, 77827, 45015, 5469, 53289, 4922, 69, 4010, + 404, 1729, 447, 675, 333, 13536, 292, 86459, 337, 1336, 89, 8480, 6511, 94908, + 2249, 540, 876, 73, 784, 754, 1662, 69, 7488, 44970, 9197, 278, 1273, 73, 1910, + 363, 9902, 95780, 14606, 80, 754, 69, 729, 7141, 5192, 21888, 283, 1289, 1662, + 805, 66, 634, 6780, 352, 79, 1609, 69, 1498, 31019, 375, 4381, 89835, 12825, + 73, 4744, 78930, 318, 13423, 24990, 370, 19639, 73, 55233, 1394, 3427, 41796, + 69, 7387, 70, 52513, 2319, 87, 2445, 86, 347, 31695, 60013, 652, 3933, 80, 8892, + 5289, 21807, 657, 54532, 81, 20553, 28679, 9991, 86, 8318, 2005, 82999, 604, + 49712, 3368, 406, 7521, 16017, 728, 1331, 581, 7513, 73, 18894, 87, 75406, 80, + 89, 85, 2642, 5746, 7141, 88, 44490, 339, 1494, 27515, 23361, 14773, 31881, + 10916, 32962, 295, 9832, 89, 1517, 73, 85, 80, 25604, 21796, 321, 81, 8671, + 894, 23361, 43505, 71, 73, 953, 41196, 2445, 318, 16618, 11805, 20127, 35940, + 372, 80, 2303, 85, 2445, 77, 45015, 71, 2438, 80, 51622, 344, 87, 4223, 86, + 16417, 34263, 80, 32204, 73, 88, 347, 48372, 3893, 39152, 399, 96518, 1072, + 299, 3576, 69, 98347, 6100, 22016, 15618, 16915, 8462, 73873, 17134, 77, 406, + 86, 96250, 344, 65, 764, 89, 21206, 7044, 78, 1662, 1727, 80, 30506, 43319, + 830, 2365, 3919, 29238, 73, 7783, 88, 1551, 69, 1146, 383, 641, 3172, 87, 278, + 5104, 258, 81, 73, 333, 6043, 73, 1631, 35531, 3315, 392, 9902, 80, 73, 27515, + 9081, 3919, 316, 581, 2203, 74, 398, 370, 17063, 42050, 2580, 73, 5298, 652, + 87, 71, 52097, 2465, 8890, 86, 4223, 8212, 97816, 1498, 346, 70, 73, 2580, 18854, + 78604, 1310, 84, 1512, 25457, 89, 1026, 8462, 2329, 88, 2396, 68, 1263, 75, + 3933, 75092, 8587, 3826, 80, 437, 4680, 81, 3933, 3988, 936, 10644, 70, 564, + 36722, 572, 8910, 710, 12214, 31019, 87, 3655, 796, 65, 80, 73, 70, 73, 267, + 344, 80, 3893, 327, 70, 31764, 1910, 36851, 2249, 604, 4991, 544, 70, 5298, + 90173, 953, 9991, 88, 60933, 2067, 5972, 42908, 604, 87, 10776, 21659, 9334, + 22619, 1394, 31268, 1310, 55895, 3576, 4991, 14122, 72, 801, 4239, 427, 50798, + 796, 40078, 74, 2580, 88, 1910, 74, 343, 354, 80, 444, 16373, 89, 73, 11328, + 28927, 73, 67910, 5230, 76, 10910, 22524, 44490, 99692, 3159, 14088, 5241, 80, + 1100, 7737, 74901, 4942, 11851, 10470, 7737, 2203, 86, 85, 80, 73, 87, 80, 418, + 85, 4922, 77, 10784, 818, 295, 35531, 89, 9700, 352, 87, 86, 292, 49755, 3601, + 29238, 652, 604, 36851, 4680, 78, 343, 36825, 36068, 80, 1336, 1729, 83, 58631, + 1302, 80, 295, 80, 73, 344, 69, 22016, 86, 657, 3368, 85, 25299, 53470, 454, + 8910, 80, 402, 10776, 79, 1662, 2203, 3919, 65, 5972, 35531, 789, 48380, 16915, + 69, 2329, 2329, 73, 2629, 12825, 713, 1030, 70391, 3646, 2470, 32547, 21878, + 5697, 80, 89, 22524, 370, 12498, 953, 67, 1302, 3059, 953, 1029, 6455, 2580, + 73, 2312, 21888, 80, 1320, 1517, 81, 713, 73, 8998, 8998, 22580, 89, 64145, + 73, 418, 24752, 318, 83, 27455, 80, 70, 266, 77, 80, 3172, 3458, 797, 9810, + 87, 2201, 2176, 2249, 1347, 87, 3251, 89, 6634, 71, 3423, 89, 80, 652, 95589, + 9490, 332, 10470, 8480, 21796, 46640, 9902, 35278, 12116, 67, 5972, 76, 4625, + 73, 63643, 1631, 509, 73, 36068, 41287, 2850, 20850, 581, 2203, 4777, 372, 80546, + 19456, 544, 81, 544, 7488, 77, 773, 76, 44906, 352, 5192, 89, 31019, 9117, 894, + 32589, 2347, 65, 1130, 58153, 370, 31268, 65, 418, 89, 16276, 1029, 68054, 2701, + 69, 295, 85, 28990, 80, 65019, 365, 28679, 76, 7387, 85, 1739, 46483, 72, 414, + 39911, 24990, 2826, 797, 4484, 60933, 86, 2042, 818, 392, 564, 73873, 48642, + 3919, 31881, 29238, 352, 24672, 77, 3906, 4922, 65, 24752, 5972, 3418, 637, + 35531, 42818, 4777, 327, 43505, 454, 70, 1804, 9996, 50798, 15720, 911, 1658, + 70, 1302, 307, 3520, 876, 437, 5964, 86, 3696, 51924, 31695, 652, 13856, 300, + 4168, 73, 2438, 80, 1494, 73, 89, 73, 462, 85407, 7369, 21206, 75, 11823, 437, + 19555, 17063, 74, 2635, 31695, 74, 9832, 67, 321, 73, 818, 6043, 16944, 84001, + 73, 5241, 49463, 89, 24738, 10630, 3601, 385, 28927, 80, 2933, 25105, 73, 939, + 13752, 73, 86, 4484, 1804, 10470, 5683, 15479, 70391, 55334, 80, 69, 3858, 67, + 427, 344, 309, 26397, 68054, 9092, 80, 56871, 20926, 93797, 70, 73, 87, 1149, + 3172, 447, 14088, 2319, 69, 8510, 73640, 43505, 404, 3576, 38149, 509, 564, + 325, 21796, 68054, 4223, 303, 15930, 385, 89, 42908, 1609, 65, 27093, 10616, + 17156, 12825, 2642, 32589, 333, 363, 15720, 89, 80, 321, 85, 267, 89, 22580, + 1517, 635, 1791, 52971, 7453, 19275, 483, 307, 1302, 64, 940, 74, 1201, 70, + 754, 83079, 1919, 9700, 71, 2580, 302, 77, 3893, 4168, 32204, 74, 18442, 73, + 6511, 418, 10188, 906, 2249, 69, 28679, 52926, 7488, 80, 18571, 89, 14694, 66, + 6194, 7387, 86746, 4922, 83, 6410, 49755, 32893, 73, 3433, 64, 2176, 71, 34175, + 85, 998, 80, 3647, 89, 85, 9832, 80, 261, 80, 38361, 4680, 2701, 86611, 1056, + 72, 9490, 80, 3601, 89, 7513, 355, 383, 10616, 11032, 87, 1065, 76, 89649, 67800, + 1247, 74, 87, 20211, 8619, 20811, 86, 830, 65, 81913, 80, 26397, 74, 7737, 333, + 8088, 13028, 24037, 77827, 63502, 21486, 581, 5981, 3423, 55233, 321, 63643, + 85, 89, 3601, 86, 89, 1216, 86, 5981, 33863, 2249, 5308, 23707, 5544, 38977, + 723, 7141, 40924, 21206, 38844, 9110, 85, 581, 18854, 72455, 344, 9996, 72245, + 2249, 87, 49463, 3524, 87, 3696, 80, 911, 2562, 83, 40016, 80, 7513, 1498, 87, + 4937, 73, 70, 73, 10776, 4922, 1498, 23866, 50536, 20218, 587, 18287, 85, 302, + 3322, 14694, 10033, 24468, 648, 65, 21620, 4341, 2312, 327, 73, 18894, 8910, + 74, 7737, 3251, 831, 73, 85, 1739, 2933, 85, 2302, 664, 86, 89, 85, 406, 85, + 339, 74, 9451, 76, 10776, 87, 80, 31695, 316, 10616, 81, 16400, 548, 69, 351, + 72863, 31695, 80, 376, 17008, 2910, 587, 5893, 648, 269, 12828, 56871, 39911, + 7387, 17553, 5289, 39911, 4341, 2249, 6489, 728, 4835, 74, 37955, 86, 536, 564, + 45015, 65, 4086, 73, 10630, 325, 331, 301, 983, 74, 383, 71, 385, 2203, 77, + 876, 911, 5972, 89, 13752, 24990, 70678, 708, 70494, 89, 4835, 42818, 5683, + 73, 89, 652, 303, 4937, 86, 12825, 47689, 953, 88, 80, 64087, 4168, 25105, 89, + 70, 652, 74, 728, 359, 61345, 5683, 74, 3545, 2026, 79936, 11513, 28546, 831, + 1073, 20990, 85, 1394, 76, 21659, 372, 38844, 6200, 27093, 13855, 8998, 2203, + 89, 451, 953, 13423, 45015, 23760, 58153, 911, 82, 98347, 7387, 73, 52926, 9907, + 73, 88, 6703, 575, 4680, 88, 3841, 713, 437, 73, 59524, 1609, 43505, 6358, 93797, + 1073, 276, 12940, 31764, 7513, 18571, 16276, 53191, 915, 88, 41129, 818, 343, + 1910, 31268, 3601, 73, 1739, 80, 370, 303, 38181, 69, 447, 74, 1072, 575, 4484, + 8311, 1026, 88, 5308, 675, 86, 28990, 641, 92216, 88, 80, 70, 25457, 73, 70, + 5972, 9786, 80, 88, 509, 9991, 89, 5298, 450, 74, 1919, 372, 9092, 566, 939, + 77, 10176, 52926, 80, 69, 34750, 398, 65, 80, 21963, 302, 67, 6723, 569, 86, + 80897, 86, 12328, 73, 88, 80, 88, 85, 4937, 75, 375, 65116, 86, 316, 27780, + 71, 73, 70, 73, 88, 7737, 307, 377, 14088, 53060, 9810, 87, 74, 50039, 36851, + 86, 869, 42848, 23866, 77, 80, 337, 32962, 69, 6194, 17063, 67702, 329, 5964, + 911, 86746, 74, 5964, 40637, 80, 1347, 501, 47154, 331, 85, 14029, 8088, 85, + 3852, 634, 560, 39851, 1662, 14878, 40078, 764, 63913, 10784, 65, 7783, 404, + 2034, 35478, 391, 44508, 3696, 86, 3059, 85, 31695, 2067, 55911, 18244, 35531, + 80, 295, 3906, 7453, 269, 10910, 86, 16400, 38844, 869, 23361, 10056, 92216, + 18244, 2411, 544, 2857, 93797, 86, 316, 86, 383, 18287, 1026, 822, 32204, 87, + 818, 80, 7387, 575, 8892, 81, 5697, 1759, 65, 7737, 66, 3251, 12498, 437, 73, + 2302, 303, 88, 32478, 86, 80, 47689, 10056, 7521, 69, 344, 96518, 509, 796, + 87, 73, 89, 70, 73, 70, 19239, 344, 53289, 4657, 80, 634, 77, 6802, 582, 67139, + 728, 89, 59367, 3893, 27455, 70678, 501, 14606, 80, 15618, 360, 8088, 18442, + 455, 21888, 58305, 347, 89, 347, 74, 7047, 72, 292, 906, 78, 581, 14088, 360, + 375, 73, 27476, 20127, 764, 71, 28679, 936, 80, 89, 36825, 80, 88, 1729, 983, + 1065, 37406, 97523, 4680, 1897, 1347, 87, 89835, 31881, 70, 355, 55233, 22650, + 7349, 9081, 267, 18854, 40952, 337, 14736, 5964, 74, 60933, 375, 40924, 10616, + 70, 3841, 337, 73, 85, 33367, 31764, 677, 80, 44183, 372, 71, 22873, 637, 88, + 80, 7488, 66, 2249, 70, 509, 94055, 19639, 81, 713, 21486, 85, 4777, 93797, + 1273, 18287, 80, 73, 2438, 38359, 3858, 9991, 9081, 89687, 85, 20824, 87, 41622, + 22016, 392, 797, 17008, 18571, 27515, 73, 82553, 86, 621, 19260, 1146, 89, 29684, + 324, 71, 48233, 80, 2067, 28546, 8998, 68467, 540, 6358, 73, 86, 2067, 87, 14878, + 784, 1791, 11032, 56871, 318, 1551, 4991, 63345, 796, 26397, 1100, 77, 324, + 89, 4922, 76, 754, 80, 12591, 533, 2026, 84, 1498, 1804, 73, 86, 418, 654, 74, + 8212, 675, 6200, 1320, 73, 392, 3251, 6375, 1756, 88, 351, 67, 22650, 73, 454, + 69, 634, 87, 6511, 34175, 80, 3172, 9112, 85, 15464, 799, 68, 21963, 38359, + 23797, 74, 6489, 70678, 19456, 71, 42818, 89, 553, 22119, 66867, 5469, 1764, + 31697, 953, 75, 462, 25105, 488, 65, 27322, 73, 67702, 25299, 80, 4010, 92216, + 4010, 74, 1910, 9907, 3172, 42528, 352, 302, 273, 75, 302, 68, 2113, 39911, + 76, 1625, 17851, 80, 4625, 32752, 784, 80, 637, 88, 1336, 6091, 40078, 80, 52926, + 25457, 418, 70, 4777, 92562, 11328, 11949, 46390, 10056, 74, 380, 73, 89, 641, + 87, 71, 728, 266, 5972, 1302, 70, 19747, 67, 796, 327, 70, 1100, 4748, 266, + 34175, 450, 361, 85, 8480, 32204, 69, 604, 1791, 447, 544, 85, 267, 437, 52971, + 876, 76, 12821, 647, 80, 61828, 2580, 894, 87, 40078, 8480, 325, 14736, 63502, + 46640, 544, 1331, 3172, 85260, 894, 69, 1302, 89, 728, 86, 85, 1073, 377, 8671, + 19456, 80, 784, 87, 84, 77827, 713, 94869, 8998, 11949, 1729, 637, 21620, 84712, + 66, 7387, 6043, 84, 329, 89, 309, 2201, 1498, 55233, 2332, 5192, 89, 38977, + 566, 27515, 80, 69, 677, 25105, 762, 73, 70, 74, 677, 1247, 71, 25457, 80, 78, + 300, 1725, 45228, 85, 6511, 74, 637, 45015, 2465, 983, 64087, 1302, 713, 11377, + 40924, 89, 46341, 983, 85, 41129, 1791, 7141, 41796, 969, 14898, 73, 10056, + 86, 511, 14736, 70678, 4086, 604, 71, 302, 78, 1662, 65, 5010, 79, 540, 2465, + 7910, 333, 87, 4010, 86, 5683, 85, 406, 876, 20990, 11328, 73, 637, 81, 418, + 295, 295, 86, 45015, 28774, 8892, 80, 9700, 53835, 25457, 13423, 42908, 77576, + 797, 85, 657, 9701, 96819, 2438, 70, 59194, 73, 441, 52971, 17134, 86, 40153, + 1494, 10056, 87, 73, 4937, 86, 375, 1804, 75, 90377, 73, 57504, 1791, 1897, + 23866, 273, 3036, 292, 402, 21963, 983, 38977, 295, 4080, 80, 29224, 89, 752, + 74, 80, 830, 8783, 2933, 45015, 7369, 77827, 1395, 73, 1394, 1394, 67, 5010, + 77, 10910, 2933, 15618, 19456, 32962, 85, 5972, 88, 40078, 8890, 80, 12654, + 12498, 80, 3306, 258, 71027, 13066, 85, 50798, 80, 939, 2302, 22374, 19239, + 1302, 87, 73, 3418, 4010, 89, 501, 92180, 73, 63643, 321, 93213, 84, 21659, + 303, 81, 73, 2438, 83227, 86, 8998, 65, 458, 1302, 14736, 45015, 20926, 5964, + 32204, 784, 25013, 80, 4239, 81, 747, 21659, 652, 604, 30506, 7387, 710, 9490, + 1494, 89, 1336, 73, 2933, 88, 15278, 2580, 1100, 275, 8658, 70, 74, 1739, 437, + 27752, 87, 73, 41139, 74, 5759, 7369, 3935, 12328, 17270, 354, 85, 14720, 292, + 9117, 31268, 2192, 88, 45854, 376, 764, 6511, 1030, 24470, 51622, 6358, 23866, + 10616, 3919, 70, 5010, 67800, 980, 21888, 581, 77, 4239, 375, 295, 852, 74, + 13183, 80, 11160, 77, 2312, 89, 3423, 65, 3427, 7453, 80, 74, 20176, 8747, 28679, + 80, 1625, 71, 1146, 6539, 78, 648, 79, 1331, 85, 16400, 69, 11949, 80, 11251, + 6043, 797, 652, 11032, 78930, 332, 74, 89, 392, 59524, 85, 267, 80, 3368, 24041, + 83470, 796, 41922, 292, 86, 2668, 82, 9902, 689, 1900, 80546, 19555, 77, 39911, + 81, 9786, 2319, 89, 745, 5701, 78, 336, 9907, 70, 3601, 88, 85, 5298, 80, 9672, + 13846, 70, 329, 49755, 375, 85, 3013, 31881, 11377, 82999, 637, 4341, 398, 88, + 88548, 16400, 11377, 39152, 80, 1631, 45015, 6358, 85, 8998, 58305, 652, 78930, + 69, 73, 830, 70, 74, 3368, 657, 9490, 89, 752, 69, 26235, 86, 652, 3306, 72801, + 73, 2312, 5544, 69, 752, 20211, 392, 50397, 3601, 773, 3368, 43389, 1394, 1910, + 544, 8910, 81, 64087, 331, 85, 23361, 277, 2303, 332, 80, 73640, 2465, 1919, + 67039, 74083, 604, 1791, 13536, 74, 14088, 587, 1195, 269, 1751, 42528, 80, + 35401, 89, 3368, 94908, 728, 85, 89, 46224, 447, 87, 18442, 4341, 78604, 4922, + 12825, 331, 283, 309, 85, 12184, 2303, 14088, 406, 634, 346, 88, 31764, 26397, + 3368, 60013, 713, 70, 1739, 87, 587, 1029, 2933, 78, 42930, 2003, 86, 48380, + 71, 17008, 5683, 24470, 8998, 74, 80, 344, 5010, 93000, 501, 40762, 4922, 66565, + 80, 344, 76, 575, 2042, 76, 1026, 509, 73, 1804, 939, 7047, 5010, 92238, 292, + 73, 354, 84, 70391, 5972, 88, 846, 2465, 37406, 89, 14088, 100120, 14878, 20990, + 564, 31147, 983, 69, 664, 31019, 11823, 3418, 14170, 392, 49595, 7453, 40078, + 8998, 36851, 80, 38359, 20553, 67039, 1662, 14878, 80, 44067, 2423, 316, 80, + 13634, 24672, 66, 1029, 79, 3696, 24990, 80, 300, 17008, 80, 70, 901, 764, 1336, + 76, 20926, 74, 11089, 23361, 14029, 454, 1029, 637, 18571, 85, 9081, 87, 86611, + 509, 953, 80, 36825, 80, 354, 89, 7387, 44490, 4239, 7521, 77, 1910, 9907, 654, + 42050, 1029, 52000, 65, 1791, 73, 301, 70, 581, 88, 73, 1517, 8480, 73, 635, + 346, 20990, 24244, 85, 80, 74, 89, 16276, 93797, 86, 953, 87, 92241, 677, 85, + 36851, 998, 2580, 7488, 87, 44508, 5490, 14029, 85, 26738, 10033, 84, 7387, + 76, 2562, 66565, 728, 75, 30728, 5964, 74, 4239, 78, 292, 80, 88, 80, 4942, + 24672, 19239, 1146, 1394, 75406, 73, 89, 1302, 77, 5104, 9864, 87, 354, 53450, + 80, 73, 1247, 7047, 70, 2034, 77, 70391, 78498, 370, 6634, 20920, 9117, 79, + 10470, 5118, 9490, 392, 63643, 89, 540, 67, 4355, 80, 634, 85, 23361, 92715, + 2580, 85393, 80, 70, 32752, 46640, 2554, 2826, 4171, 4239, 12940, 2034, 88, + 9112, 88, 18126, 1091, 78, 1100, 377, 93797, 13045, 365, 23361, 71, 35035, 32547, + 13092, 85514, 1494, 73, 2727, 85, 14485, 87, 74, 1031, 7717, 80, 10333, 70, + 80, 23001, 74, 398, 87, 1302, 64123, 86, 2034, 5308, 85, 42818, 303, 5923, 88, + 5964, 7387, 13009, 51924, 87, 710, 69057, 325, 24752, 44508, 16510, 4937, 35531, + 9832, 36825, 1336, 339, 69, 66945, 73, 1247, 86746, 89, 5192, 71, 25299, 455, + 77, 455, 3315, 402, 1751, 14606, 819, 9117, 731, 30407, 28679, 2739, 2130, 331, + 4719, 92238, 79, 2249, 1804, 75, 2329, 575, 6043, 84, 327, 75384, 5104, 299, + 5717, 81, 466, 4752, 86, 89, 80, 1302, 652, 65, 4922, 544, 7141, 87, 46790, + 321, 32868, 69, 6511, 88, 911, 466, 75406, 3524, 444, 2933, 71, 29652, 20990, + 69, 7369, 17134, 69, 451, 372, 318, 953, 87, 501, 1302, 1804, 76, 365, 5972, + 2249, 28679, 80, 14088, 1029, 28990, 372, 100111, 6634, 63813, 2329, 2329, 331, + 27520, 80, 347, 8462, 46341, 44490, 575, 5010, 24470, 75, 3415, 3524, 454, 4381, + 566, 713, 4458, 64, 2319, 74, 3919, 86, 1026, 75, 11823, 89, 634, 14088, 69, + 1247, 88, 292, 321, 302, 80, 3988, 6634, 73, 41196, 2191, 88, 69, 7387, 1739, + 74, 27455, 4010, 84, 2249, 76, 2026, 85514, 752, 17008, 86, 329, 4484, 278, + 3172, 25100, 43505, 867, 81, 939, 27515, 64381, 70, 80, 22016, 86, 10776, 89, + 4908, 89, 70, 14092, 830, 25457, 427, 754, 75406, 20218, 1073, 5230, 85, 377, + 74, 4080, 1233, 19639, 12498, 39911, 2629, 12158, 13536, 582, 14170, 884, 85, + 38844, 79224, 4168, 87, 71, 2249, 73, 70, 80, 70, 19072, 953, 56029, 2641, 69, + 73, 2465, 36851, 73, 3059, 17008, 20824, 911, 80, 69417, 18442, 87, 51071, 360, + 89, 8480, 268, 75, 3906, 89835, 1073, 75, 9117, 1919, 88, 360, 96518, 8462, + 28679, 29700, 70, 1302, 65, 7488, 4239, 65116, 74, 21088, 2465, 10784, 85, 936, + 38359, 3647, 27278, 89, 8480, 8480, 3906, 1631, 1494, 295, 80, 318, 1631, 268, + 8050, 89, 3919, 61057, 77827, 39911, 354, 416, 337, 89, 9700, 69605, 6200, 1919, + 69, 7513, 73, 26738]], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '44957' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaWxOyOp/l799PsWvfOm+JICTsO04CAiYoiDg1NQWICKjIIQmkq7/7lD5dPdU3 + XhDkEJL/+q2V/Me//vrr7y5vymL6+5+//n7W4/T3//oeu2VT9vc/f/3vf/31119//cfv93+cWb7y + 8nar39Xv9F9j/b6V89///CX895H/f9I/f/0NxKClxgp4EdfwvQYlDBwa3usln99ibsHPRthT/bmU + YN7QvQGd6pBRa3NdIgZyzYUZLJ9km2bU5HVlJbCPzgNaz+LczCxxQ7g84QabxFUHhmxYKtWq3mAH + vdbDcgHvlUw3jkPUdbpv5tVkMxjHpYrY0DYmF2ZuwLN8V4gUaC6fywe31WovjtSYmevTdR8sYJ34 + EhJvzSun5/wUACO2a3pAVgqoEB8ruFRuhZEvSjkRVg9PlTQkIm7Gg0k3uHhBefPpSRs6SzQbpSfA + I2sDjOqK8slscwM0couwi0DKR/uVBsol2RLymo5mxI/uo4ftVTZwUEq7Zk7EPYFed68Qh1o8zO1m + pcDkNQ/Ub7rHwPlQi2COj5Qe/N3DXDbWnMK3xxhZT7eQTzu4bmGFrgv1QnLzZ80+JwAb9IEkYsjD + +EgHF949uKV3sbF9njz6FFqHUsDG9dNHizhVBqhGuODdDTyaKciPNWwXqSRsPVScry7bFG5hmhG0 + imROW0kW4PAWTWpJeBsxrLAACg25USw2L3/UkF3BLGkOSAblNMxhsl1gYxcuDXdP0iwPnEIgP6yW + KHIY5LyCWQvabWHgkzm+cuYLvgiIdgSo92Gez80tiyG9nq/U+LZ3xHcVcE/ICwmb6ymaZbEt4Il+ + GAGOa5qb4xEI8H33dtQ3/XXOr/6YwCx5HMhqPRlc0BCqFdGvR3wQT3E015+yhkqt5jjzg35Ypqtr + gO/7o1ezTD5zV5CBWHM6jM+9zYdhPZdw38wXethusMnNXBPUwNwZBLR6PdDTHJ7Uz21XIqZtM3Na + baMtXHa9Tk1tZs3YOLWmHmOxRKoLnGihWR4qyWGDqXW5PCLSypkIk3t+Qo9nooONddgTaI/LHQfA + DCPWP0QN2qukxm6TeLnkf9IOAFfhGC1oBGz3dkKo+6aJd0YrNNN8NGO4xOmC/SI9DVIzfzrFyayQ + FsbF8QneTiGkZhDiK3K0YTPEqQgk543QwqIajLphxNBiL0B3I94MU5ajEbLSrLF3OUoDu6iLslZx + aFCjeu74om+ZBe/EXmHUHKyIPcVOBNsz5kQRp3noay/WYPDoDHqLw6aZKx5mQFAVh2pv38iJrKU9 + DKTZpwcVtPlybswUon15xL/xvbBFEuDNen/IahcVnLtJZ8FojiXsxLszZ8nZOsF3Ne6pfu5avsSd + m0Cujw0OtMfDX86X1UnRr7OAz+en6vPbKSSQt8IH38aLHkn6uIdwRt6TLGDI/T6DgQvy/JAQOrIi + Z49mGwJNvmkYb5fJnCRWnGQNnk7f+rHlvLpCF5wlYpPv980XVjUlrObExbuNfvGXQ8xf6uFpQWxT + Zx9x/xP2ilxQhP1qjnJWRy8EJ8cYqNW4WTPn5iZTXSfTkXi/BjmPxjwG4bua6K65RZzMq2AF58dp + ROK3vizdZ4QwiIcdEdO9mi9AH7ZA259VtLg7J2dinDNwmEhDzQM8cQZqJ4VetM2wdo6Gpr/qexfG + 2q4jomhagGb2O4X4Kb2xIa0Rn0kOTxCXN0QGLXjzwU06G+7y3ZXMN02N2GXv2nDINjZaeif1Z7ON + NPklo4lqVDX5nGOvAmrS59iyFtGfbqt9/NMDqtGQ5ktuxJnylHc9DrbVGXBNNS2IwMWl1oWvIuZ3 + KwJi0r0RgG4V9Zr8dmEY7VPqkc5q5nJyEVz5cYudKwB8iQSrhnmpdfQ2y9HAz3mGIPDJDlvbj95Q + r9EK1Vt3Di0E4dOwqxclMBBUGTvf+kme60GDcXMgFJ12q4guhoDgvagARYCtzYGJOQP10xGpE40x + 4JFUFQBW1EQ0D2ST3W6+Ap5YzChiqQ0Wd9O3YIutDzoGYhNNVD4vECH9Qm1/3ZgzvlBREedyh3VV + LpoZFeMCt5xxJBkbO5ql08Lgu0g41fPS47woYwXowLXp/Xh8+XPSw1bJmtojnciihpdpp8A+8VS8 + t0s28DFNLKiGgk2A/NwNrDiKDF5UXcKHZ/4ESzGYBgx225gGxuXtcymHK/jpNgHeB9Nl4MeX6cKm + 03zq3W51PqVG7cKxUZ5EKUw5GlvproCWnC54D8qpedpQq2CumyMODmMTMflMDSjebIoNmGLAkXso + gEcsi0bb2I3YQwx6SGTjiXW+2ud8pO8UFH58JavEMHy2Yb0G1VLjeC/tAi6A2slgnuOEMF028q8+ + Zkp6ZjWqv/N3roWhB9uwm7B/3Ujggy9vAYSyzdCczEnEzHnvwmCnxIQvu0dErhsOYYXyBbF0feEs + nk8VRCs3o442jA2/7AoXBBL38d7zIB9CyTpBB2+f2HuIjcnOSLDBfnPOsB8p1jD3Bq9grF5TtJ3f + hs/bx72FG8pW1E0zbDLIxxEkdiHR/ePx4cuhWi+/90Hr7S5tPryuOrW4HLfY63HDGchdD37rCz6s + ho3/dK69BQ0FD2hzfqomLQ/RAimrdaIuL9df6vemht7RcWnA8g7Mct+FIOu3NUZ5G0dUiK8V7PZd + i111xOZXP05gXgSKjT045PM7lVuQ8KXDZqav+eQY4QKyimypkXPLZGD76OAUbDTsbdrKpGonV8AX + kiO2hFDj8+v0yKCiuAa136UcsewNRcCzfoWtx3pvSh/5c4Ju4i5YqzoHLLc5CoD2VlR8GC96TlTW + E2g1zxU1zuFnYPo16uFNb1/YPfmdP/RCm8AsogBtQ53k86m5eqDYVRY9njsL8Cm4bqEjPRfy+FBj + WC5t2UOopZheSeRH8zArsUp8ImAcCdXAb+1kAQbbGttz35pUH/UV7EvW0sCp0oYvz6yH/bwd8KHe + +v4871YB+F6fGuK+49OubxCUBGWNDao8wDKJnwq2JLzgwBTGfL4XKw9cDzeC5Pup5kzcow5szweO + jds5adh8kws4JqAjauQRn43WlsAtzDLs+G+Hz7I+d/CQxD49ZPZnWPZgYXDgc40eswNMNvA8BuMp + rhGorJKPWNkG4KeXwvngNCzbJSlUpv2O5FsfNvMpEBVYetKGtCs/4ptGASKMKONk4/cfwF5zs4XA + 3j3oXu7eAxdUzYPJmGL6qxfjtx5DF9oedY57JSff+QeLjbAl8rifB65Xggg/oCvpIZLqgYnVBcKb + 9fxgqwPtwEXv0kOzjwlS/Z1uzvNODIASiEeiNJEbsebpV6Au/AzJxg6DpcnlBMof4qNJKEhEb2CV + gqm8mmQTFkMz8Q9VlN7JAupImybinwZr8FAfe4xKwRwmEr69P/NXw1b7490Ksly7Y+N+Z9HkSP7r + xyN4HyfO8DSvOwV2z/REfYVIPk3DxwlSE4XYWSgyGftsethPzZmEoeJxvtJLD7DZ/2BbNFv+uqvg + BTDTT1i/abdo2Spaqh4/eYu4dSvAfK3MHtzelBL5BMtouZZ999M7JH/rAXPMmYEfr+0df2eyUNdK + 2Kz8GE3lvY+4vCYd0M1uoNdKOOdLvbYFqKeGRg2s18N8vl5O8Muj1NkiKxfWgqtA28096triAL79 + YygT7k40+/IcvQExA9trGJOxuqYmCT2jg63vHgkQ7ixfulu1wK8fItBSdCCqDrSh4ThPon75SXhB + WYMd5RM+fPFgZB+1A+o4t2iwnt0wfvkV4HyA1N4kLF/WhmarEagfX15so/lguQFUg5IheNq1/myy + OwJfvsTXve7n8+HxIUC7ugFFWV4DLqiuB61p7qh5F/bNsDxPHZzdOKSJZdwHtrqUpXJPxhcSZm1u + WLgBIQgOpxHrRr8d+EhYDFftUcOWdj3n7IMfHZDJ8Ui16PbMudsJJfjxm2/npKHrEyggF7ORHqz6 + 1Tye5fSC72gKECuXxzAfb+4KmikbkDyKL392F7uHws6/EQl5RsOFGRgwtNI39eXns+FW4iuwSp83 + rF1E219W12OmLpfAwt9617Bj+YHQPmcOWrp+4VxN3wXc3/oD2tytyJ9/fk0/I4KY+/D8Ga39ACKR + Gdi567O/6NutDXVfN6ejX2QmP+cnpCaZENCyrvCX54QQzm4S4oN+uuTTXXBK6DTGSDVfOEdsc00J + XJ2IinFmOoMwkiQF8MLe9GAWs8l3R3sF7+tBxwY9HCIqe1yE7Ik8aqnlOmeiqCDIa/2Cd5qumNMH + rUe4Hpac7vZVaL4XpMcq0sSerI4fPVpgIGugUA8hNsbkbs5nKtcgNhQFbWNSg2XOSf3zY2hR6k/D + T3N6AmcYBUiZ30XOF7RPYF3CkdpB2prLfe+yP/0P8pPq83lOBOV40gN6sNNg4LdTOgJXDrc0pCGO + Xn7UxVC03Jhq/MNMeu83ITTb+op4dfAa1u2vBeQXaSTSUw0axiPpBX0yjXjf3Xa5oJVhCyq6WFTL + rhXo7XYXwq+e0EOX3Yaff4B+vSDqqiM1WSvdt3/88VWNOZ/pe58CPJQ+NtdhM/CXfLDB7J+jb76A + AbspUwYG+zaTuWi1fKnfagVUUcVIud3qiNZVEMPc7sLv96WA66grwW2lJNjqZ28gfJuuoF8zRO0W + rMA0WmiBUbF+/OGj/hRm4h/+0qBTmkuVnz34Xk7B16988i+P23Cy8JXqTSOB+QVCQz1SzaVaqPSg + j/zbFvZrJ0XCaaUNwpf3gCZ+CJm7szwwmJgICtrrQvdriURdAwsBipYX0y/vDmPvtSMcxuseGyyq + +bKpMwVc9vJM9aKtoiWmTgvqt+xiFHZXczEVM1GrMtC/eYneSJqbnsCFRDZ60rvVCEWtiao4bCrq + vk5PwBaQQfDVD4zfq6whVX5zIcPhTLYFx4CZGVEU8X3KsO28jIitpi2B3/wIB5/7lC9b3SGgV+oT + 9S+vELBLHhfQkd4LtXXT4VKyNrdwoEFOVnxYTMaCwlVKiByK5bs+LDfYxZA8ugORpeWR8+EWaZCE + lkezk/syB6/RSgh8WcT7tYTyZffxArhv+AWxt1/nnGy0Wt3lzvXr597R0uunGiT364n6lVUCHqus + U1GuxfiP3iZnK4Rp20RE0K6bnI5kG4MbmqPf+PYZtc0VKM96hY23W0X8+PJdyOyPh9/PcOt3Ryft + oCHkCdaUp9/001CkUC/eNjbAAHx2CXEMss4Jfzxk9vezBSFBlU0PvZr6X1724HMF1gQ+ue5v9MvS + q/i5eRPhcRIGZtwHEci+71O8sTXAdyulhsnnpeLD8nLN6XHxvF8+Ri14nvjMPODKH9CXFJnWGfDe + 34xw8Q4OeX/5o/fuY60cT2aAcbVu/MXcpRU0pKCj5vVIzWllPuw//narM3EYG5Z2UJEShu29G4NJ + VUNRBX0h/td8ZRtvgdtO35HXV8/p85ynIIg/O+yb/j3vtDJ9weBjZIShS+gTxe4EWGa9hQ+9ujWX + 8Wn98fM4kC6tz97ncwd5FL6wk8DWHwWHZ8pH9zRqf/NI1h4UBI/UcIngn8NmdhfUb8eiDqhRLjkY + zyaBcPGwg11rXEwWu4MCpSaBZGuNiz/2k+lBsb251JA3Z1+6i16qfPUcm9KbD2RtuDYsSHGnv3xy + esjdC+oH8Uwet+nI57dehdBwdk9svuWk4RjdOuUsKAIONmruc3tnLNBRUYLN+tJGRCiMEbRusibS + l4f4DAcIM4cVCGwCC/B1by2gFpQdxe2A80ncHBGQ3bP85//8lQ8t7D7rlOq/++Owa8Gmfh3JM96M + +ZI51xhazXtFVp/Cz2fn2ttQKtUX1ZNZjEbeXwXw5Snq3HzfXCwtf/38O6p//TWtOQHf58GGumtz + qhRH78eLNIBy0oyut4dQ219UIrIwN+fG3muw3TUTaQel+JOXArG9u9Sf4Xng890rIZAPb3pwNkc+ + OHUb/vwA1aNdb3LvwmPIb0VIr1rnDSK5pwgww7LQQ3+8fVbdOhtOcGXiPb9Ww3CeXi/4zT+pDWQP + yJdoCGGGXQHvKs8dlpOsl+DLN2SbT0rEW88OwddPE0EYb2DWmpXwh3+8wpRzvhgwAF0kYrIxWmGY + fvwn7PY3rPuFYrJEqslPL7ArmmufHTZ3C+4EfYODY0qHEfJ2hKH+PiN569cRC3viwTGRO+p01Ttf + DtJJA9+8E1uuscvJVnfGnz6gzUsjA4kLjkCprgH6fq+I9dswVIPCvH155OgvK2PtKbtnZJHFxg9z + SdOsgKu3dUL8ABdOXk3BwHd8o3C+J/noejqEwUfLyDqXp2apvUKDtUIyukdO1fBQCkKg37OUgBlu + hvH+aU8AP06PP3kjja01VPSVv6fu/K59elHs1S9vo1+eGwiJTQsasVVj13wVDbkvpATP87rFB7M4 + +uwuyBkodrWFUelgfzm+Rw8+BvFF9Q81GvHSJj00yCnAB7k2h4lf8gpyZb2gp6Yr/nJWYgLeRcyR + tFZY9DkFKwX8/MTxiC1/8/X3kL3UFdpUihyNt53SAiWd2R9+mk7Baguiz6qkOFaXnDXON//YWSmO + DngV9Ze27IArn7YY7UOtEc8krKH7HjfUk9WK80IDKUTj64KN6vnkvDzkDNqPy4uI/gkN37xdABtp + fyG5unpFGyhpveoD8qEaOGmmmF2nEbgptPCuaz75BofVS5Xdi4xRlhtcKI6rBZo88rAVCjafgy3Q + 4HvailRDl9AU8oSPcLwShe7Oz5s/Y2LYkLBshe1vPsl2S5wCu/W2ZDlpSs5Ni7RQftjtV19yn8n7 + WgQrEwbkp3+z5Ey9ssnzD7W2n0ezxBS3gPvKARu9szU/03m0wS1fQryvV9hnv3zoW2/ovjwqA9fc + 8KQKJzfDcVWbQHTSvQUnISm+9dgyByOXFFgMwwF11b3wx69/+HM9HAnasNzPwQrqB+FMb4fY9b95 + X6mmcx0iMEzku36lnaBlWwXWn4nOpbEZEADTyaXWV+++eaIBETIv1EseNp8B2xaA2YOH5NTgYLRQ + T2DOV/rX71L++uXpp4acsPs5qsOCc38BbF8U1NLtPf/yPoJxgwm1WhSaf9ZbhM2pou7bSAa2ixfl + xyuIt68hmjNXSX71Esl2GQ7TZQ4YQKdsog4vjGa5q7xVE62vv/Ui84fFgAgOn7eElPCk+dx+fwJQ + KPnq124u1+hWg7u32pKOW04uasWhg6zUa3T9vEQwP+TqBehR1al92HzyBVXuCNdKExC4HAkg4WpT + AsetD9jyz+HA8x1LFMzME5G2qM37/TWOVaVMY+q1m4zT5Zl1YK08Aqwd+nzoDc5dePQVA+PiVeeL + wYEL3RY0dOdy0CxueT3B5WBcqWeFOJqPN22lGmsRk666Q396OnEIyrDIiPo++nz55b1f/kVCsTdN + CV/rRa2riv3h6UlrRBFa0jpCK2F2Gp7d9B4erMUl9WkinNNUOcFPJwWol8Mx+q4vjIqWJMq3fjr5 + /JFIAvjaiHAQrAmgh3vJ4JJzj3rPGke/8QBthO/Yz8TK/61vyG334fjwzYN+/Qn3rvlB4I41IEE/ + NaB3HSnWFaXlX782Alq4A/atZ9fM06hmUAeejTFREvPrH1J4ogPDv/H3OCTx65dv/upBw77rlWDZ + dfovH83JOo4LGL7riShn4ZTP2rEv4FlnBFstYn778zMoN2Lsre+RT0dbH+Fq252p9x4/JjcxGGE6 + VyHdlZdVMybSd3ynKKJaupbAH777MPwkPgk/0fjzV5KwXePv+nLOfnx2sJiLtesY5BOQpBG0Rt3T + /c39+H94eBpSDV/Qa91M4efgwmv13mBDqT8D8/xzoD6I9EFhuTuYlDjFCljep8ZaAyKfEWXFgLwt + IEbetAPCz6/+yU/ald3w2JIgtOLNAbvBuTYJuaeBKpXrF943QzMsxeBrYBscL2jZFELENTcN4ff5 + kFQftZxVzkGE4la8YbfbaIP00+dAWMvU+fI1z2yaysL22GAL65CPYXpd4GmwewKqOYrmXJI9qHjG + +uc/IpLSYfmzHursZ51L33xLiQfpTDZLWeR8VIQaLtPl8B2/jcmmOkPQreIbtrttPSwpbRgIxWNJ + zVuP+bLzK/LL87AGhVPzJ985yzeFvHlRN9y0Xi+4SPWWmujzjn75MPz7tyvgP//111//57fD4NXd + yud3Y8BUztO//3urwL+zW/ZvQRD/TcU/OxHImFXl3//81yaEvz9D9/pM/3fq2vI9/v3PX1BQ/2vD + wd9TN2XP/9nyr+8d//Nf/w8AAP//AwCe/y/15yAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c09bf89ed1e1c7e-AMS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 17:29:43 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-kd1j0bcill5flig1m29wdaof + openai-processing-ms: + - '471' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - a9c89e6436668562329d35d627f16245 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/test_llm_utils.py b/tests/integration/test_llm_utils.py index 74b86016..553d3699 100644 --- a/tests/integration/test_llm_utils.py +++ b/tests/integration/test_llm_utils.py @@ -1,9 +1,14 @@ import string +from unittest.mock import MagicMock import pytest from numpy.random import RandomState +from pytest_mock import MockerFixture -from autogpt.llm.llm_utils import get_ada_embedding +from autogpt.config import Config +from autogpt.llm import llm_utils +from autogpt.llm.api_manager import ApiManager +from autogpt.llm.modelsinfo import COSTS from tests.utils import requires_api_key @@ -16,10 +21,42 @@ def random_large_string(): return "".join(random.choice(list(string.ascii_lowercase), size=n_characters)) -@pytest.mark.xfail(reason="We have no mechanism for embedding large strings.") +@pytest.fixture() +def api_manager(mocker: MockerFixture): + api_manager = ApiManager() + mocker.patch.multiple( + api_manager, + total_prompt_tokens=0, + total_completion_tokens=0, + total_cost=0, + ) + yield api_manager + + +@pytest.fixture() +def spy_create_embedding(mocker: MockerFixture): + return mocker.spy(llm_utils, "create_embedding") + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_get_ada_embedding( + config: Config, api_manager: ApiManager, spy_create_embedding: MagicMock +): + token_cost = COSTS[config.embedding_model]["prompt"] + llm_utils.get_ada_embedding("test") + + spy_create_embedding.assert_called_once_with("test", model=config.embedding_model) + + assert (prompt_tokens := api_manager.get_total_prompt_tokens()) == 1 + assert api_manager.get_total_completion_tokens() == 0 + assert api_manager.get_total_cost() == (prompt_tokens * token_cost) / 1000 + + +@pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") def test_get_ada_embedding_large_context(random_large_string): # This test should be able to mock the openai call after we have a fix. We don't need # to hit the API to test the logic of the function (so not using vcr). This is a quick # regression test to document the issue. - get_ada_embedding(random_large_string) + llm_utils.get_ada_embedding(random_large_string) diff --git a/tests/test_llm_utils.py b/tests/unit/test_llm_utils.py similarity index 73% rename from tests/test_llm_utils.py rename to tests/unit/test_llm_utils.py index 27c9ff88..be36dc09 100644 --- a/tests/test_llm_utils.py +++ b/tests/unit/test_llm_utils.py @@ -1,8 +1,7 @@ import pytest from openai.error import APIError, RateLimitError -from autogpt.llm import COSTS, get_ada_embedding -from autogpt.llm.llm_utils import retry_openai_api +from autogpt.llm import llm_utils @pytest.fixture(params=[RateLimitError, APIError]) @@ -13,22 +12,12 @@ def error(request): return request.param("Error") -@pytest.fixture -def mock_create_embedding(mocker): - mock_response = mocker.MagicMock() - mock_response.usage.prompt_tokens = 5 - mock_response.__getitem__.side_effect = lambda key: [{"embedding": [0.1, 0.2, 0.3]}] - return mocker.patch( - "autogpt.llm.llm_utils.create_embedding", return_value=mock_response - ) - - def error_factory(error_instance, error_count, retry_count, warn_user=True): class RaisesError: def __init__(self): self.count = 0 - @retry_openai_api( + @llm_utils.retry_openai_api( num_retries=retry_count, backoff_base=0.001, warn_user=warn_user ) def __call__(self): @@ -41,7 +30,7 @@ def error_factory(error_instance, error_count, retry_count, warn_user=True): def test_retry_open_api_no_error(capsys): - @retry_openai_api() + @llm_utils.retry_openai_api() def f(): return 1 @@ -114,16 +103,31 @@ def test_retry_openapi_other_api_error(capsys): assert output.out == "" -def test_get_ada_embedding(mock_create_embedding, api_manager): - model = "text-embedding-ada-002" - embedding = get_ada_embedding("test") - mock_create_embedding.assert_called_once_with( - "test", model="text-embedding-ada-002" - ) - - assert embedding == [0.1, 0.2, 0.3] - - cost = COSTS[model]["prompt"] - assert api_manager.get_total_prompt_tokens() == 5 - assert api_manager.get_total_completion_tokens() == 0 - assert api_manager.get_total_cost() == (5 * cost) / 1000 +def test_chunked_tokens(): + text = "Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model" + expected_output = [ + ( + 13556, + 12279, + 2898, + 374, + 459, + 22772, + 1825, + 31874, + 3851, + 67908, + 279, + 17357, + 315, + 279, + 480, + 2898, + 12, + 19, + 4221, + 1646, + ) + ] + output = list(llm_utils.chunked_tokens(text, "cl100k_base", 8191)) + assert output == expected_output From 1d26f6b69720a561237f4d94d08bd9fae232aebb Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 2 May 2023 02:48:27 +0200 Subject: [PATCH 216/222] Add warning for LLM to avoid context overflow (#3646) --- autogpt/agent/agent.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index bd105932..dbae1198 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -5,6 +5,7 @@ from autogpt.config import Config from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message +from autogpt.llm.token_counter import count_string_tokens from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner @@ -233,6 +234,16 @@ class Agent: ) result = f"Command {command_name} returned: " f"{command_result}" + result_tlength = count_string_tokens( + str(command_result), cfg.fast_llm_model + ) + memory_tlength = count_string_tokens( + str(self.summary_memory), cfg.fast_llm_model + ) + if result_tlength + memory_tlength + 600 > cfg.fast_token_limit: + result = f"Failure: command {command_name} returned too much output. \ + Do not execute this command again with the same arguments." + for plugin in cfg.plugins: if not plugin.can_handle_post_command(): continue From 2e9c80a486c89ed7dc3df23e9193031903c04c91 Mon Sep 17 00:00:00 2001 From: gravelBridge <107640947+gravelBridge@users.noreply.github.com> Date: Mon, 1 May 2023 20:49:44 -0700 Subject: [PATCH 217/222] Fix MACOS Zip Import Error when compressing plugin (#3629) Co-authored-by: Nicholas Tindle --- autogpt/plugins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/plugins.py b/autogpt/plugins.py index a5fe43d8..dbf37017 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -33,7 +33,7 @@ def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]: result = [] with zipfile.ZipFile(zip_path, "r") as zfile: for name in zfile.namelist(): - if name.endswith("__init__.py"): + if name.endswith("__init__.py") and not name.startswith("__MACOSX"): logger.debug(f"Found module '{name}' in the zipfile at: {name}") result.append(name) if len(result) == 0: From 0e1c0c55f87b77214a933727fa410f0cb173e8bb Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 2 May 2023 13:17:09 +0200 Subject: [PATCH 218/222] Synchronize `stable` -> `master` (#3677) * Revert "Merge branch 'master' into stable" This reverts commit c4008971f7a85c2a82744aa6fd940de44b137591, reversing changes made to fe855fef13156aaa22153baaa953f55e0fdb6294. * Fix `validate_json` file error when cwd != project root (#2665) Co-authored-by: qianchengliang * Revert "Revert "Merge branch 'master' into stable"" This reverts commit 999990b614f9c5e32dbfb560ab9516755d212884. --------- Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Co-authored-by: Mick <30898949+mickjagger19@users.noreply.github.com> Co-authored-by: qianchengliang --- autogpt/json_utils/utilities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index 7fcd2c0a..933de8e9 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -1,6 +1,6 @@ """Utilities for the json_fixes package.""" import json -import os +import os.path import re from jsonschema import Draft7Validator From 3a80e2f399b3d802f0c962487f8071d97ee72bb1 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 2 May 2023 13:26:30 +0200 Subject: [PATCH 219/222] Revert "Revert "Merge branch 'master' into stable"" This reverts commit 999990b614f9c5e32dbfb560ab9516755d212884. --- .env.template | 7 + .github/workflows/pr-label.yml | 20 ++ .gitignore | 2 + .isort.cfg | 10 + README.md | 31 +- autogpt/agent/agent.py | 95 ++++--- autogpt/agent/agent_manager.py | 56 +++- autogpt/app.py | 148 +++------- autogpt/chat.py | 24 +- autogpt/cli.py | 30 +- autogpt/commands/analyze_code.py | 6 + autogpt/commands/audio_text.py | 41 ++- autogpt/commands/command.py | 153 ++++++++++ autogpt/commands/execute_code.py | 38 ++- autogpt/commands/file_operations.py | 17 ++ autogpt/commands/git_operations.py | 20 +- autogpt/commands/google_search.py | 34 ++- autogpt/commands/image_gen.py | 3 +- autogpt/commands/improve_code.py | 6 + autogpt/commands/twitter.py | 24 +- autogpt/commands/web_selenium.py | 6 + autogpt/commands/write_tests.py | 6 + autogpt/config/ai_config.py | 40 ++- autogpt/config/config.py | 18 ++ autogpt/llm_utils.py | 63 +++-- autogpt/models/base_open_ai_plugin.py | 199 +++++++++++++ autogpt/plugins.py | 265 ++++++++++++++++++ autogpt/prompts/__init__.py | 0 .../generator.py} | 39 ++- autogpt/{ => prompts}/prompt.py | 107 +------ autogpt/token_counter.py | 5 +- autogpt/types/openai.py | 9 + autogpt/utils.py | 2 +- plugin.png | Bin 0 -> 33356 bytes plugins/__PUT_PLUGIN_ZIPS_HERE__ | 0 requirements.txt | 5 + scripts/__init__.py | 0 tests/mocks/__init__.py | 0 tests/mocks/mock_commands.py | 6 + tests/test_commands.py | 177 ++++++++++++ tests/test_prompt_generator.py | 3 +- .../Auto-GPT-Plugin-Test-master.zip | Bin 0 -> 14927 bytes .../unit/models/test_base_open_api_plugin.py | 79 ++++++ tests/unit/test_browse_scrape_text.py | 41 ++- tests/unit/test_plugins.py | 112 ++++++++ 45 files changed, 1601 insertions(+), 346 deletions(-) create mode 100644 .isort.cfg create mode 100644 autogpt/commands/command.py create mode 100644 autogpt/models/base_open_ai_plugin.py create mode 100644 autogpt/plugins.py create mode 100644 autogpt/prompts/__init__.py rename autogpt/{promptgenerator.py => prompts/generator.py} (78%) rename autogpt/{ => prompts}/prompt.py (50%) create mode 100644 autogpt/types/openai.py create mode 100644 plugin.png create mode 100644 plugins/__PUT_PLUGIN_ZIPS_HERE__ create mode 100644 scripts/__init__.py create mode 100644 tests/mocks/__init__.py create mode 100644 tests/mocks/mock_commands.py create mode 100644 tests/test_commands.py create mode 100644 tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip create mode 100644 tests/unit/models/test_base_open_api_plugin.py create mode 100644 tests/unit/test_plugins.py diff --git a/.env.template b/.env.template index f1b511c2..60edecd6 100644 --- a/.env.template +++ b/.env.template @@ -188,3 +188,10 @@ OPENAI_API_KEY=your-openai-api-key # TW_CONSUMER_SECRET= # TW_ACCESS_TOKEN= # TW_ACCESS_TOKEN_SECRET= + +################################################################################ +### ALLOWLISTED PLUGINS +################################################################################ + +#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) +ALLOWLISTED_PLUGINS= diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index 92c5a66b..f1b200b0 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -26,3 +26,23 @@ jobs: repoToken: "${{ secrets.GITHUB_TOKEN }}" commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request." commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly." + + size: + if: ${{ github.event_name == 'pull_request_target' }} + permissions: + issues: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: codelytv/pr-size-labeler@v1.7.0 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + s_label: "size/s" + s_max_size: "10" + m_label: "size/m" + m_max_size: "50" + l_label: "size/l" + l_max_size: "200" + xl_label: "size/xl" + fail_if_xl: "false" + github_api_url: "api.github.com" diff --git a/.gitignore b/.gitignore index a6b3b80f..0c277d89 100644 --- a/.gitignore +++ b/.gitignore @@ -157,5 +157,7 @@ vicuna-* # mac .DS_Store +openai/ + # news CURRENT_BULLETIN.md \ No newline at end of file diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 00000000..17eab482 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,10 @@ +[settings] +profile = black +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 +sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER +skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist diff --git a/README.md b/README.md index 6d636c56..3e811a1c 100644 --- a/README.md +++ b/README.md @@ -31,14 +31,25 @@ Your support is greatly appreciated Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.

+

-InfluxData    Roost.AI    NucleiAI    Algohash    TypingMind    

+
+InfluxData +Roost.AI +NucleiAI +Algohash +TypingMind + + +
+

robinicus  prompthero  crizzler  tob-le-rone  FSTatSBS  toverly1  ddtarazona  Nalhos  Kazamario  pingbotan  indoor47  AuroraHolding  kreativai  hunteraraujo  Explorergt92  judegomila   thepok   SpacingLily  merwanehamadi  m  zkonduit  maxxflyer  tekelsey  digisomni  nocodeclarity  tjarmain -Josecodesalot  saten-private  kenndanielso  johnculkin  Daniel1357  0xmatchmaker  belharethsami  nicoguyon  josephcmiller2  KiaArmani  Mobivs  rocks6  Odin519Tomas  ChrisDMT  thisisjeffchen  RealChrisSean  AIdevelopersAI  scryptedinc  jun784  goldenrecursion  allenstecat  LeeRobidas  cfarquhar  avy-ai  omphos  sunchongren  CrazySwami  fruition  Web3Capital  jazgarewal  rejunity  dexterityx  shawnharmsen  tommygeee  abhinav-pandey29  ColinConwell  kMag410  lucas-chu  Heitechsoft  bentoml  MediConCenHK  nnkostov  founderblocks-sils  CarmenCocoa  angiaou  fabrietech  Partender  RThaweewat  GalaxyVideoAgency  Brodie0  sultanmeghji  CatsMeow492  caitlynmeeks  garythebat  concreit  Pythagora-io  ASmithOWL  Cameron-Fulton  joaomdmoura  Dradstone  st617  wenfengwang  morcos  CrypteorCapital  jd3655  mathewhawkins  ZERO-A-ONE  MayurVirkar  SwftCoins  marv-technology  cxs  iddelacruz  AryaXAI  lmaugustin  Mr-Bishop42  vixul-accelerator  TheStoneMX  ciscodebs  ntwrite  DataMetis  ikarosai  refinery1  MetaPath01  ternary5  arjunb023  yx3110  vkozacek  eelbaz  rapidstartup  txtr99  tob-le-rone  neverinstall  projectonegames  DailyBotHQ  comet-ml  rickscode  webbcolton  MBassi91  

+CrypteorCapital  eelbaz  lucas-chu  tob-le-rone  jazgarewal  yx3110  MetaPath01  tommi-joentakanen  GalaxyVideoAgency  ciscodebs  josephjacks  mathewhawkins  cxs  txtr99  ChrisDMT  MayurVirkar  comet-ml  RThaweewat  DailyBotHQ  jacobyoby  jd3655  MBassi91  chatgpt-prompts  marv-technology  CrazySwami  tullytim  sultanmeghji  cfarquhar  goldenrecursion  ikarosai  avy-ai  MediConCenHK  RealChrisSean  DataMetis  CarmenCocoa  Cameron-Fulton  rejunity  belharethsami  AcountoOU  CatsMeow492  fabrietech  kenndanielso  arthur-x88  Heitechsoft  knifour  rickscode  Brodie0  ternary5  dexterityx  turintech  projectonegames  ZERO-A-ONE  KiaArmani  caitlynmeeks  tommygeee  st617  webbcolton  jondwillis  Mr-Bishop42  ASmithOWL  omphos  neverinstall  lmaugustin  vkozacek  abhinav-pandey29  ColinConwell  Partender  shawnharmsen  TheStoneMX  wenfengwang  allenstecat  johnculkin  Web3Capital  sunchongren  bentoml  angiaou  Dradstone  jun784  ZoneSixGames  Pythagora-io  nnkostov  AIdevelopersAI  josephcmiller2  VoiceBeer  AryaXAI  saten-private  refinery1  Mobivs  ntwrite  Josecodesalot  horazius  nicoguyon  0xmatchmaker  rocks6  SwftCoins  iddelacruz  scryptedinc  thisisjeffchen  kMag410  rapidstartup  founderblocks-sils  concreit  Odin519Tomas  Daniel1357  fruition  SparkplanAI  anvarazizov  joaomdmoura  morcos  LeeRobidas  arjunb023  garythebat  humungasaurus  

+ ## 🚀 Features @@ -254,6 +265,22 @@ export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" ``` +## Plugins + +See https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template for the template of the plugins. + +⚠️💀 WARNING 💀⚠️: Review the code of any plugin you use, this allows for any Python to be executed and do malicious things. Like stealing your API keys. + +Drop the repo's zipfile in the plugins folder. + +![Download Zip](https://raw.githubusercontent.com/BillSchumacher/Auto-GPT/master/plugin.png) + +If you add the plugins class name to the `ALLOWLISTED_PLUGINS` in the `.env` you will not be prompted otherwise you'll be warned before loading the plugin: + +``` +ALLOWLISTED_PLUGINS=example-plugin1,example-plugin2,example-plugin3 +``` + ## Setting Your Cache Type By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone. diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index ee7885f8..189338f5 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -19,18 +19,25 @@ class Agent: memory: The memory object to use. full_message_history: The full message history. next_action_count: The number of actions to execute. - system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. - Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals. + system_prompt: The system prompt is the initial prompt that defines everything + the AI needs to know to achieve its task successfully. + Currently, the dynamic and customizable information in the system prompt are + ai_name, description and goals. - triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: - Determine which next command to use, and respond using the format specified above: - The triggering prompt is not part of the system prompt because between the system prompt and the triggering - prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. + triggering_prompt: The last sentence the AI will see before answering. + For Auto-GPT, this prompt is: + Determine which next command to use, and respond using the format specified + above: + The triggering prompt is not part of the system prompt because between the + system prompt and the triggering + prompt we have contextual information that can distract the AI and make it + forget that its goal is to find the next task to achieve. SYSTEM PROMPT CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) TRIGGERING PROMPT - The triggering prompt reminds the AI about its short term meta task (defining the next task) + The triggering prompt reminds the AI about its short term meta task + (defining the next task) """ def __init__( @@ -39,6 +46,8 @@ class Agent: memory, full_message_history, next_action_count, + command_registry, + config, system_prompt, triggering_prompt, ): @@ -46,6 +55,8 @@ class Agent: self.memory = memory self.full_message_history = full_message_history self.next_action_count = next_action_count + self.command_registry = command_registry + self.config = config self.system_prompt = system_prompt self.triggering_prompt = triggering_prompt @@ -73,6 +84,7 @@ class Agent: # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( + self, self.system_prompt, self.triggering_prompt, self.full_message_history, @@ -81,6 +93,10 @@ class Agent: ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply) + for plugin in cfg.plugins: + if not plugin.can_handle_post_planning(): + continue + assistant_reply_json = plugin.post_planning(self, assistant_reply_json) # Print Assistant thoughts if assistant_reply_json != {}: @@ -89,14 +105,13 @@ class Agent: try: print_assistant_thoughts(self.ai_name, assistant_reply_json) command_name, arguments = get_command(assistant_reply_json) - # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"] if cfg.speak_mode: say_text(f"I want to execute {command_name}") except Exception as e: logger.error("Error: \n", str(e)) if not cfg.continuous_mode and self.next_action_count == 0: - ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit logger.typewriter_log( @@ -168,30 +183,46 @@ class Agent: elif command_name == "human_feedback": result = f"Human feedback: {user_input}" else: - result = ( - f"Command {command_name} returned: " - f"{execute_command(command_name, arguments)}" + for plugin in cfg.plugins: + if not plugin.can_handle_pre_command(): + continue + command_name, arguments = plugin.pre_command( + command_name, arguments + ) + command_result = execute_command( + self.command_registry, + command_name, + arguments, + self.config.prompt_generator, ) + result = f"Command {command_name} returned: " f"{command_result}" + + for plugin in cfg.plugins: + if not plugin.can_handle_post_command(): + continue + result = plugin.post_command(command_name, result) if self.next_action_count > 0: self.next_action_count -= 1 - - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " - f"\nResult: {result} " - f"\nHuman Feedback: {user_input} " - ) - - self.memory.add(memory_to_add) - - # Check if there's a result from the command append it to the message - # history - if result is not None: - self.full_message_history.append(create_chat_message("system", result)) - logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) - else: - self.full_message_history.append( - create_chat_message("system", "Unable to execute command") - ) - logger.typewriter_log( - "SYSTEM: ", Fore.YELLOW, "Unable to execute command" + if command_name != "do_nothing": + memory_to_add = ( + f"Assistant Reply: {assistant_reply} " + f"\nResult: {result} " + f"\nHuman Feedback: {user_input} " ) + + self.memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + self.full_message_history.append( + create_chat_message("system", result) + ) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) + else: + self.full_message_history.append( + create_chat_message("system", "Unable to execute command") + ) + logger.typewriter_log( + "SYSTEM: ", Fore.YELLOW, "Unable to execute command" + ) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 898767a4..9a62ef61 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,10 +1,11 @@ """Agent manager for managing GPT agents""" from __future__ import annotations -from typing import Union +from typing import List, Union -from autogpt.config.config import Singleton +from autogpt.config.config import Config, Singleton from autogpt.llm_utils import create_chat_completion +from autogpt.types.openai import Message class AgentManager(metaclass=Singleton): @@ -13,6 +14,7 @@ class AgentManager(metaclass=Singleton): def __init__(self): self.next_key = 0 self.agents = {} # key, (task, full_message_history, model) + self.cfg = Config() # Create new GPT agent # TODO: Centralise use of create_chat_completion() to globally enforce token limit @@ -28,19 +30,32 @@ class AgentManager(metaclass=Singleton): Returns: The key of the new agent """ - messages = [ + messages: List[Message] = [ {"role": "user", "content": prompt}, ] - + for plugin in self.cfg.plugins: + if not plugin.can_handle_pre_instruction(): + continue + if plugin_messages := plugin.pre_instruction(messages): + messages.extend(iter(plugin_messages)) # Start GPT instance agent_reply = create_chat_completion( model=model, messages=messages, ) - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) + plugins_reply = "" + for i, plugin in enumerate(self.cfg.plugins): + if not plugin.can_handle_on_instruction(): + continue + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" + plugins_reply = f"{plugins_reply}{sep}{plugin_result}" + + if plugins_reply and plugins_reply != "": + messages.append({"role": "assistant", "content": plugins_reply}) key = self.next_key # This is done instead of len(agents) to make keys unique even if agents # are deleted @@ -48,6 +63,11 @@ class AgentManager(metaclass=Singleton): self.agents[key] = (task, messages, model) + for plugin in self.cfg.plugins: + if not plugin.can_handle_post_instruction(): + continue + agent_reply = plugin.post_instruction(agent_reply) + return key, agent_reply def message_agent(self, key: str | int, message: str) -> str: @@ -65,15 +85,37 @@ class AgentManager(metaclass=Singleton): # Add user message to message history before sending to agent messages.append({"role": "user", "content": message}) + for plugin in self.cfg.plugins: + if not plugin.can_handle_pre_instruction(): + continue + if plugin_messages := plugin.pre_instruction(messages): + for plugin_message in plugin_messages: + messages.append(plugin_message) + # Start GPT instance agent_reply = create_chat_completion( model=model, messages=messages, ) - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) + plugins_reply = agent_reply + for i, plugin in enumerate(self.cfg.plugins): + if not plugin.can_handle_on_instruction(): + continue + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" + plugins_reply = f"{plugins_reply}{sep}{plugin_result}" + # Update full message history + if plugins_reply and plugins_reply != "": + messages.append({"role": "assistant", "content": plugins_reply}) + + for plugin in self.cfg.plugins: + if not plugin.can_handle_post_instruction(): + continue + agent_reply = plugin.post_instruction(agent_reply) + return agent_reply def list_agents(self) -> list[tuple[str | int, str]]: @@ -86,7 +128,7 @@ class AgentManager(metaclass=Singleton): # Return a list of agent keys and their tasks return [(key, task) for key, (task, _, _) in self.agents.items()] - def delete_agent(self, key: Union[str, int]) -> bool: + def delete_agent(self, key: str | int) -> bool: """Delete an agent from the agent manager Args: diff --git a/autogpt/app.py b/autogpt/app.py index 58d9f716..cf8e29a3 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -3,33 +3,12 @@ import json from typing import Dict, List, NoReturn, Union from autogpt.agent.agent_manager import AgentManager -from autogpt.commands.analyze_code import analyze_code -from autogpt.commands.audio_text import read_audio_from_file -from autogpt.commands.execute_code import ( - execute_python_file, - execute_shell, - execute_shell_popen, -) -from autogpt.commands.file_operations import ( - append_to_file, - delete_file, - download_file, - read_file, - search_files, - write_to_file, -) -from autogpt.commands.git_operations import clone_repository -from autogpt.commands.google_search import google_official_search, google_search -from autogpt.commands.image_gen import generate_image -from autogpt.commands.improve_code import improve_code -from autogpt.commands.twitter import send_tweet +from autogpt.commands.command import CommandRegistry, command from autogpt.commands.web_requests import scrape_links, scrape_text -from autogpt.commands.web_selenium import browse_website -from autogpt.commands.write_tests import write_tests from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_and_parse_json from autogpt.memory import get_memory from autogpt.processing.text import summarize_text +from autogpt.prompts.generator import PromptGenerator from autogpt.speech import say_text CFG = Config() @@ -108,7 +87,12 @@ def map_command_synonyms(command_name: str): return command_name -def execute_command(command_name: str, arguments): +def execute_command( + command_registry: CommandRegistry, + command_name: str, + arguments, + prompt: PromptGenerator, +): """Execute the command and return the result Args: @@ -119,105 +103,29 @@ def execute_command(command_name: str, arguments): str: The result of the command """ try: + cmd = command_registry.commands.get(command_name) + + # If the command is found, call it with the provided arguments + if cmd: + return cmd(**arguments) + + # TODO: Remove commands below after they are moved to the command registry. command_name = map_command_synonyms(command_name.lower()) - if command_name == "google": - # Check if the Google API key is set and use the official search method - # If the API key is not set or has only whitespaces, use the unofficial - # search method - key = CFG.google_api_key - if key and key.strip() and key != "your-google-api-key": - google_result = google_official_search(arguments["input"]) - return google_result - else: - google_result = google_search(arguments["input"]) - # google_result can be a list or a string depending on the search results - if isinstance(google_result, list): - safe_message = [ - google_result_single.encode("utf-8", "ignore") - for google_result_single in google_result - ] - else: - safe_message = google_result.encode("utf-8", "ignore") + if command_name == "memory_add": + return get_memory(CFG).add(arguments["string"]) - return safe_message.decode("utf-8") - elif command_name == "memory_add": - memory = get_memory(CFG) - return memory.add(arguments["string"]) - elif command_name == "start_agent": - return start_agent( - arguments["name"], arguments["task"], arguments["prompt"] - ) - elif command_name == "message_agent": - return message_agent(arguments["key"], arguments["message"]) - elif command_name == "list_agents": - return list_agents() - elif command_name == "delete_agent": - return delete_agent(arguments["key"]) - elif command_name == "get_text_summary": - return get_text_summary(arguments["url"], arguments["question"]) - elif command_name == "get_hyperlinks": - return get_hyperlinks(arguments["url"]) - elif command_name == "clone_repository": - return clone_repository( - arguments["repository_url"], arguments["clone_path"] - ) - elif command_name == "read_file": - return read_file(arguments["file"]) - elif command_name == "write_to_file": - return write_to_file(arguments["file"], arguments["text"]) - elif command_name == "append_to_file": - return append_to_file(arguments["file"], arguments["text"]) - elif command_name == "delete_file": - return delete_file(arguments["file"]) - elif command_name == "search_files": - return search_files(arguments["directory"]) - elif command_name == "download_file": - if not CFG.allow_downloads: - return "Error: You do not have user authorization to download files locally." - return download_file(arguments["url"], arguments["file"]) - elif command_name == "browse_website": - return browse_website(arguments["url"], arguments["question"]) # TODO: Change these to take in a file rather than pasted code, if # non-file is given, return instructions "Input should be a python - # filepath, write your code to file and try again" - elif command_name == "analyze_code": - return analyze_code(arguments["code"]) - elif command_name == "improve_code": - return improve_code(arguments["suggestions"], arguments["code"]) - elif command_name == "write_tests": - return write_tests(arguments["code"], arguments.get("focus")) - elif command_name == "execute_python_file": # Add this command - return execute_python_file(arguments["file"]) - elif command_name == "execute_shell": - if CFG.execute_local_commands: - return execute_shell(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "execute_shell_popen": - if CFG.execute_local_commands: - return execute_shell_popen(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "read_audio_from_file": - return read_audio_from_file(arguments["file"]) - elif command_name == "generate_image": - return generate_image(arguments["prompt"]) - elif command_name == "send_tweet": - return send_tweet(arguments["text"]) + # filepath, write your code to file and try again elif command_name == "do_nothing": return "No action performed." elif command_name == "task_complete": shutdown() else: + for command in prompt.commands: + if command_name == command["label"] or command_name == command["name"]: + return command["function"](*arguments.values()) return ( f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" " list for available commands and only respond in the specified JSON" @@ -227,6 +135,9 @@ def execute_command(command_name: str, arguments): return f"Error: {str(e)}" +@command( + "get_text_summary", "Get text summary", '"url": "", "question": ""' +) def get_text_summary(url: str, question: str) -> str: """Return the results of a Google search @@ -242,6 +153,7 @@ def get_text_summary(url: str, question: str) -> str: return f""" "Result" : {summary}""" +@command("get_hyperlinks", "Get text summary", '"url": ""') def get_hyperlinks(url: str) -> Union[str, List[str]]: """Return the results of a Google search @@ -260,6 +172,11 @@ def shutdown() -> NoReturn: quit() +@command( + "start_agent", + "Start GPT Agent", + '"name": "", "task": "", "prompt": ""', +) def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: """Start an agent with a given name, task, and prompt @@ -292,6 +209,7 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> return f"Agent {name} created with key {key}. First response: {agent_response}" +@command("message_agent", "Message GPT Agent", '"key": "", "message": ""') def message_agent(key: str, message: str) -> str: """Message an agent with a given key and message""" # Check if the key is a valid integer @@ -306,7 +224,8 @@ def message_agent(key: str, message: str) -> str: return agent_response -def list_agents(): +@command("list_agents", "List GPT Agents", "") +def list_agents() -> str: """List all agents Returns: @@ -317,6 +236,7 @@ def list_agents(): ) +@command("delete_agent", "Delete GPT Agent", '"key": ""') def delete_agent(key: str) -> str: """Delete an agent with a given key diff --git a/autogpt/chat.py b/autogpt/chat.py index 1f6bca96..f9fc9471 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -6,11 +6,12 @@ from autogpt import token_counter from autogpt.config import Config from autogpt.llm_utils import create_chat_completion from autogpt.logs import logger +from autogpt.types.openai import Message cfg = Config() -def create_chat_message(role, content): +def create_chat_message(role, content) -> Message: """ Create a chat message with the given role and content. @@ -51,7 +52,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model): # TODO: Change debug from hardcode to argument def chat_with_ai( - prompt, user_input, full_message_history, permanent_memory, token_limit + agent, prompt, user_input, full_message_history, permanent_memory, token_limit ): """Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.""" @@ -135,6 +136,25 @@ def chat_with_ai( # Append user input, the length of this is accounted for above current_context.extend([create_chat_message("user", user_input)]) + plugin_count = len(cfg.plugins) + for i, plugin in enumerate(cfg.plugins): + if not plugin.can_handle_on_planning(): + continue + plugin_response = plugin.on_planning( + agent.prompt_generator, current_context + ) + if not plugin_response or plugin_response == "": + continue + tokens_to_add = token_counter.count_message_tokens( + [create_chat_message("system", plugin_response)], model + ) + if current_tokens_used + tokens_to_add > send_token_limit: + if cfg.debug_mode: + print("Plugin response too long, skipping:", plugin_response) + print("Plugins remaining at stop:", plugin_count - i) + break + current_context.append(create_chat_message("system", plugin_response)) + # Calculate remaining tokens tokens_remaining = token_limit - current_tokens_used # assert tokens_remaining >= 0, "Tokens remaining is negative. diff --git a/autogpt/cli.py b/autogpt/cli.py index 6fe9ecbb..51a946a7 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -75,11 +75,13 @@ def main( from colorama import Fore from autogpt.agent.agent import Agent + from autogpt.commands.command import CommandRegistry from autogpt.config import Config, check_openai_api_key from autogpt.configurator import create_config from autogpt.logs import logger from autogpt.memory import get_memory - from autogpt.prompt import construct_prompt + from autogpt.plugins import scan_plugins + from autogpt.prompts.prompt import construct_main_ai_config from autogpt.utils import get_current_git_branch, get_latest_bulletin if ctx.invoked_subcommand is None: @@ -123,7 +125,26 @@ def main( "parts of Auto-GPT with this version. " "Please consider upgrading to Python 3.10 or higher.", ) - system_prompt = construct_prompt() + + cfg = Config() + cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) + # Create a CommandRegistry instance and scan default folder + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.analyze_code") + command_registry.import_commands("autogpt.commands.audio_text") + command_registry.import_commands("autogpt.commands.execute_code") + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.commands.git_operations") + command_registry.import_commands("autogpt.commands.google_search") + command_registry.import_commands("autogpt.commands.image_gen") + command_registry.import_commands("autogpt.commands.improve_code") + command_registry.import_commands("autogpt.commands.twitter") + command_registry.import_commands("autogpt.commands.web_selenium") + command_registry.import_commands("autogpt.commands.write_tests") + command_registry.import_commands("autogpt.app") + ai_name = "" + ai_config = construct_main_ai_config() + ai_config.command_registry = command_registry # print(prompt) # Initialize variables full_message_history = [] @@ -140,11 +161,16 @@ def main( "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" ) logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) + system_prompt = ai_config.construct_full_prompt() + if cfg.debug_mode: + logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt) agent = Agent( ai_name=ai_name, memory=memory, full_message_history=full_message_history, next_action_count=next_action_count, + command_registry=command_registry, + config=ai_config, system_prompt=system_prompt, triggering_prompt=triggering_prompt, ) diff --git a/autogpt/commands/analyze_code.py b/autogpt/commands/analyze_code.py index e02ea4c5..b87d73e1 100644 --- a/autogpt/commands/analyze_code.py +++ b/autogpt/commands/analyze_code.py @@ -1,9 +1,15 @@ """Code evaluation module.""" from __future__ import annotations +from autogpt.commands.command import command from autogpt.llm_utils import call_ai_function +@command( + "analyze_code", + "Analyze Code", + '"code": ""', +) def analyze_code(code: str) -> list[str]: """ A function that takes in a string and returns a response from create chat diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py index cae32d4e..b409fefd 100644 --- a/autogpt/commands/audio_text.py +++ b/autogpt/commands/audio_text.py @@ -1,24 +1,51 @@ +"""Commands for converting audio to text.""" import json import requests +from autogpt.commands.command import command from autogpt.config import Config from autogpt.workspace import path_in_workspace -cfg = Config() +CFG = Config() -def read_audio_from_file(audio_path): - audio_path = path_in_workspace(audio_path) +@command( + "read_audio_from_file", + "Convert Audio to text", + '"filename": ""', + CFG.huggingface_audio_to_text_model, + "Configure huggingface_audio_to_text_model.", +) +def read_audio_from_file(filename: str) -> str: + """ + Convert audio to text. + + Args: + audio_path (str): The path to the audio file + + Returns: + str: The text from the audio + """ + audio_path = path_in_workspace(filename) with open(audio_path, "rb") as audio_file: audio = audio_file.read() return read_audio(audio) -def read_audio(audio): - model = cfg.huggingface_audio_to_text_model +def read_audio(audio: bytes) -> str: + """ + Convert audio to text. + + Args: + audio (bytes): The audio to convert + + Returns: + str: The text from the audio + """ + model = CFG.huggingface_audio_to_text_model api_url = f"https://api-inference.huggingface.co/models/{model}" - api_token = cfg.huggingface_api_token + api_token = CFG.huggingface_api_token headers = {"Authorization": f"Bearer {api_token}"} if api_token is None: @@ -33,4 +60,4 @@ def read_audio(audio): ) text = json.loads(response.content.decode("utf-8"))["text"] - return "The audio says: " + text + return f"The audio says: {text}" diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py new file mode 100644 index 00000000..e97af008 --- /dev/null +++ b/autogpt/commands/command.py @@ -0,0 +1,153 @@ +import importlib +import inspect +from typing import Any, Callable, Optional + +# Unique identifier for auto-gpt commands +AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command" + + +class Command: + """A class representing a command. + + Attributes: + name (str): The name of the command. + description (str): A brief description of what the command does. + signature (str): The signature of the function that the command executes. Defaults to None. + """ + + def __init__( + self, + name: str, + description: str, + method: Callable[..., Any], + signature: str = "", + enabled: bool = True, + disabled_reason: Optional[str] = None, + ): + self.name = name + self.description = description + self.method = method + self.signature = signature if signature else str(inspect.signature(self.method)) + self.enabled = enabled + self.disabled_reason = disabled_reason + + def __call__(self, *args, **kwargs) -> Any: + if not self.enabled: + return f"Command '{self.name}' is disabled: {self.disabled_reason}" + return self.method(*args, **kwargs) + + def __str__(self) -> str: + return f"{self.name}: {self.description}, args: {self.signature}" + + +class CommandRegistry: + """ + The CommandRegistry class is a manager for a collection of Command objects. + It allows the registration, modification, and retrieval of Command objects, + as well as the scanning and loading of command plugins from a specified + directory. + """ + + def __init__(self): + self.commands = {} + + def _import_module(self, module_name: str) -> Any: + return importlib.import_module(module_name) + + def _reload_module(self, module: Any) -> Any: + return importlib.reload(module) + + def register(self, cmd: Command) -> None: + self.commands[cmd.name] = cmd + + def unregister(self, command_name: str): + if command_name in self.commands: + del self.commands[command_name] + else: + raise KeyError(f"Command '{command_name}' not found in registry.") + + def reload_commands(self) -> None: + """Reloads all loaded command plugins.""" + for cmd_name in self.commands: + cmd = self.commands[cmd_name] + module = self._import_module(cmd.__module__) + reloaded_module = self._reload_module(module) + if hasattr(reloaded_module, "register"): + reloaded_module.register(self) + + def get_command(self, name: str) -> Callable[..., Any]: + return self.commands[name] + + def call(self, command_name: str, **kwargs) -> Any: + if command_name not in self.commands: + raise KeyError(f"Command '{command_name}' not found in registry.") + command = self.commands[command_name] + return command(**kwargs) + + def command_prompt(self) -> str: + """ + Returns a string representation of all registered `Command` objects for use in a prompt + """ + commands_list = [ + f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values()) + ] + return "\n".join(commands_list) + + def import_commands(self, module_name: str) -> None: + """ + Imports the specified Python module containing command plugins. + + This method imports the associated module and registers any functions or + classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute + as `Command` objects. The registered `Command` objects are then added to the + `commands` dictionary of the `CommandRegistry` object. + + Args: + module_name (str): The name of the module to import for command plugins. + """ + + module = importlib.import_module(module_name) + + for attr_name in dir(module): + attr = getattr(module, attr_name) + # Register decorated functions + if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr( + attr, AUTO_GPT_COMMAND_IDENTIFIER + ): + self.register(attr.command) + # Register command classes + elif ( + inspect.isclass(attr) and issubclass(attr, Command) and attr != Command + ): + cmd_instance = attr() + self.register(cmd_instance) + + +def command( + name: str, + description: str, + signature: str = "", + enabled: bool = True, + disabled_reason: Optional[str] = None, +) -> Callable[..., Any]: + """The command decorator is used to create Command objects from ordinary functions.""" + + def decorator(func: Callable[..., Any]) -> Command: + cmd = Command( + name=name, + description=description, + method=func, + signature=signature, + enabled=enabled, + disabled_reason=disabled_reason, + ) + + def wrapper(*args, **kwargs) -> Any: + return func(*args, **kwargs) + + wrapper.command = cmd + + setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True) + return wrapper + + return decorator diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 11266f85..ff35d428 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -5,19 +5,24 @@ import subprocess import docker from docker.errors import ImageNotFound +from autogpt.commands.command import command +from autogpt.config import Config from autogpt.workspace import WORKSPACE_PATH, path_in_workspace +CFG = Config() -def execute_python_file(file: str) -> str: + +@command("execute_python_file", "Execute Python File", '"filename": ""') +def execute_python_file(filename: str) -> str: """Execute a Python file in a Docker container and return the output Args: - file (str): The name of the file to execute + filename (str): The name of the file to execute Returns: str: The output of the file """ - + file = filename print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'") if not file.endswith(".py"): @@ -94,6 +99,15 @@ def execute_python_file(file: str) -> str: return f"Error: {str(e)}" +@command( + "execute_shell", + "Execute Shell Command, non-interactive commands only", + '"command_line": ""', + CFG.execute_local_commands, + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction.", +) def execute_shell(command_line: str) -> str: """Execute a shell command and return the output @@ -103,6 +117,13 @@ def execute_shell(command_line: str) -> str: Returns: str: The output of the command """ + + if not CFG.execute_local_commands: + return ( + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction." + ) current_dir = os.getcwd() # Change dir into workspace if necessary if str(WORKSPACE_PATH) not in current_dir: @@ -117,9 +138,16 @@ def execute_shell(command_line: str) -> str: os.chdir(current_dir) - return output - +@command( + "execute_shell_popen", + "Execute Shell Command, non-interactive commands only", + '"command_line": ""', + CFG.execute_local_commands, + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction.", +) def execute_shell_popen(command_line) -> str: """Execute a shell command with Popen and returns an english description of the event and the process id diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index ad145ec9..b73fb987 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -9,10 +9,13 @@ import requests from colorama import Back, Fore from requests.adapters import HTTPAdapter, Retry +from autogpt.commands.command import command +from autogpt.config import Config from autogpt.spinner import Spinner from autogpt.utils import readable_file_size from autogpt.workspace import WORKSPACE_PATH, path_in_workspace +CFG = Config() LOG_FILE = "file_logger.txt" LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE @@ -81,6 +84,7 @@ def split_file( start += max_length - overlap +@command("read_file", "Read file", '"filename": ""') def read_file(filename: str) -> str: """Read a file and return the contents @@ -133,6 +137,7 @@ def ingest_file( print(f"Error while ingesting file '{filename}': {str(e)}") +@command("write_to_file", "Write to file", '"filename": "", "text": ""') def write_to_file(filename: str, text: str) -> str: """Write text to a file @@ -158,6 +163,9 @@ def write_to_file(filename: str, text: str) -> str: return f"Error: {str(e)}" +@command( + "append_to_file", "Append to file", '"filename": "", "text": ""' +) def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: """Append text to a file @@ -181,6 +189,7 @@ def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: return f"Error: {str(e)}" +@command("delete_file", "Delete file", '"filename": ""') def delete_file(filename: str) -> str: """Delete a file @@ -201,6 +210,7 @@ def delete_file(filename: str) -> str: return f"Error: {str(e)}" +@command("search_files", "Search Files", '"directory": ""') def search_files(directory: str) -> list[str]: """Search for files in a directory @@ -227,6 +237,13 @@ def search_files(directory: str) -> list[str]: return found_files +@command( + "download_file", + "Search Files", + '"url": "", "filename": ""', + CFG.allow_downloads, + "Error: You do not have user authorization to download files locally.", +) def download_file(url, filename): """Downloads a file Args: diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 028f3b8d..1fb99e5b 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -1,26 +1,34 @@ """Git operations for autogpt""" -import git +from git.repo import Repo +from autogpt.commands.command import command from autogpt.config import Config from autogpt.workspace import path_in_workspace CFG = Config() -def clone_repository(repo_url: str, clone_path: str) -> str: +@command( + "clone_repository", + "Clone Repositoryy", + '"repository_url": "", "clone_path": ""', + CFG.github_username and CFG.github_api_key, + "Configure github_username and github_api_key.", +) +def clone_repository(repository_url: str, clone_path: str) -> str: """Clone a GitHub repository locally Args: - repo_url (str): The URL of the repository to clone + repository_url (str): The URL of the repository to clone clone_path (str): The path to clone the repository to Returns: str: The result of the clone operation""" - split_url = repo_url.split("//") + split_url = repository_url.split("//") auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) safe_clone_path = path_in_workspace(clone_path) try: - git.Repo.clone_from(auth_repo_url, safe_clone_path) - return f"""Cloned {repo_url} to {safe_clone_path}""" + Repo.clone_from(auth_repo_url, safe_clone_path) + return f"""Cloned {repository_url} to {safe_clone_path}""" except Exception as e: return f"Error: {str(e)}" diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py index 7d38ce75..fcc1a9f4 100644 --- a/autogpt/commands/google_search.py +++ b/autogpt/commands/google_search.py @@ -5,11 +5,13 @@ import json from duckduckgo_search import ddg +from autogpt.commands.command import command from autogpt.config import Config CFG = Config() +@command("google", "Google Search", '"query": ""', not CFG.google_api_key) def google_search(query: str, num_results: int = 8) -> str: """Return the results of a Google search @@ -31,9 +33,17 @@ def google_search(query: str, num_results: int = 8) -> str: for j in results: search_results.append(j) - return json.dumps(search_results, ensure_ascii=False, indent=4) + results = json.dumps(search_results, ensure_ascii=False, indent=4) + return safe_google_results(results) +@command( + "google", + "Google Search", + '"query": ""', + bool(CFG.google_api_key), + "Configure google_api_key.", +) def google_official_search(query: str, num_results: int = 8) -> str | list[str]: """Return the results of a Google search using the official Google API @@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]: return "Error: The provided Google API key is invalid or missing." else: return f"Error: {e}" + # google_result can be a list or a string depending on the search results # Return the list of search result URLs - return search_results_links + return safe_google_results(search_results_links) + + +def safe_google_results(results: str | list) -> str: + """ + Return the results of a google search in a safe format. + + Args: + results (str | list): The search results. + + Returns: + str: The results of the search. + """ + if isinstance(results, list): + safe_message = json.dumps( + [result.enocde("utf-8", "ignore") for result in results] + ) + else: + safe_message = results.encode("utf-8", "ignore").decode("utf-8") + return safe_message diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 0809fcdd..60cdaec0 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -1,6 +1,5 @@ """ Image Generation Module for AutoGPT.""" import io -import os.path import uuid from base64 import b64decode @@ -8,12 +7,14 @@ import openai import requests from PIL import Image +from autogpt.commands.command import command from autogpt.config import Config from autogpt.workspace import path_in_workspace CFG = Config() +@command("generate_image", "Generate Image", '"prompt": ""', CFG.image_provider) def generate_image(prompt: str, size: int = 256) -> str: """Generate an image from a prompt. diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py index e3440d8b..41a369b4 100644 --- a/autogpt/commands/improve_code.py +++ b/autogpt/commands/improve_code.py @@ -2,9 +2,15 @@ from __future__ import annotations import json +from autogpt.commands.command import command from autogpt.llm_utils import call_ai_function +@command( + "improve_code", + "Get Improved Code", + '"suggestions": "", "code": ""', +) def improve_code(suggestions: list[str], code: str) -> str: """ A function that takes in code and suggestions and returns a response from create diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py index 3eaed36e..f0502271 100644 --- a/autogpt/commands/twitter.py +++ b/autogpt/commands/twitter.py @@ -1,12 +1,30 @@ +"""A module that contains a command to send a tweet.""" import os import tweepy from dotenv import load_dotenv +from autogpt.commands.command import command + load_dotenv() -def send_tweet(tweet_text): +@command( + "send_tweet", + "Send Tweet", + '"tweet_text": ""', +) +def send_tweet(tweet_text: str) -> str: + """ + A function that takes in a string and returns a response from create chat + completion api call. + + Args: + tweet_text (str): Text to be tweeted. + + Returns: + A result from sending the tweet. + """ consumer_key = os.environ.get("TW_CONSUMER_KEY") consumer_secret = os.environ.get("TW_CONSUMER_SECRET") access_token = os.environ.get("TW_ACCESS_TOKEN") @@ -21,6 +39,6 @@ def send_tweet(tweet_text): # Send tweet try: api.update_status(tweet_text) - print("Tweet sent successfully!") + return "Tweet sent successfully!" except tweepy.TweepyException as e: - print("Error sending tweet: {}".format(e.reason)) + return f"Error sending tweet: {e.reason}" diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 11bdfeb1..e0e0d70a 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -18,6 +18,7 @@ from webdriver_manager.chrome import ChromeDriverManager from webdriver_manager.firefox import GeckoDriverManager import autogpt.processing.text as summary +from autogpt.commands.command import command from autogpt.config import Config from autogpt.processing.html import extract_hyperlinks, format_hyperlinks @@ -25,6 +26,11 @@ FILE_DIR = Path(__file__).parent.parent CFG = Config() +@command( + "browse_website", + "Browse Website", + '"url": "", "question": ""', +) def browse_website(url: str, question: str) -> tuple[str, WebDriver]: """Browse a website and return the answer and links to the user diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py index 35a08653..91cd9304 100644 --- a/autogpt/commands/write_tests.py +++ b/autogpt/commands/write_tests.py @@ -3,9 +3,15 @@ from __future__ import annotations import json +from autogpt.commands.command import command from autogpt.llm_utils import call_ai_function +@command( + "write_tests", + "Write Tests", + '"code": "", "focus": ""', +) def write_tests(code: str, focus: list[str]) -> str: """ A function that takes in code and focus topics and returns a response from create diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index d50c30be..1e48ab4d 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -5,10 +5,16 @@ A module that contains the AIConfig class object that contains the configuration from __future__ import annotations import os -from typing import Type +from pathlib import Path +from typing import Optional, Type import yaml +from autogpt.prompts.generator import PromptGenerator + +# Soon this will go in a folder where it remembers more stuff about the run(s) +SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml") + class AIConfig: """ @@ -38,9 +44,8 @@ class AIConfig: self.ai_name = ai_name self.ai_role = ai_role self.ai_goals = ai_goals - - # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml") + self.prompt_generator = None + self.command_registry = None @staticmethod def load(config_file: str = SAVE_FILE) -> "AIConfig": @@ -89,7 +94,9 @@ class AIConfig: with open(config_file, "w", encoding="utf-8") as file: yaml.dump(config, file, allow_unicode=True) - def construct_full_prompt(self) -> str: + def construct_full_prompt( + self, prompt_generator: Optional[PromptGenerator] = None + ) -> str: """ Returns a prompt to the user with the class information in an organized fashion. @@ -108,14 +115,25 @@ class AIConfig: "" ) - from autogpt.prompt import get_prompt + from autogpt.config import Config + from autogpt.prompts.prompt import build_default_prompt_generator + + cfg = Config() + if prompt_generator is None: + prompt_generator = build_default_prompt_generator() + prompt_generator.goals = self.ai_goals + prompt_generator.name = self.ai_name + prompt_generator.role = self.ai_role + prompt_generator.command_registry = self.command_registry + for plugin in cfg.plugins: + if not plugin.can_handle_post_prompt(): + continue + prompt_generator = plugin.post_prompt(prompt_generator) # Construct full prompt - full_prompt = ( - f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" - ) + full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n" for i, goal in enumerate(self.ai_goals): full_prompt += f"{i+1}. {goal}\n" - - full_prompt += f"\n\n{get_prompt()}" + self.prompt_generator = prompt_generator + full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}" return full_prompt diff --git a/autogpt/config/config.py b/autogpt/config/config.py index c284a4ac..801df2bb 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -1,8 +1,10 @@ """Configuration class to store the state of bools for different scripts access.""" import os +from typing import List import openai import yaml +from auto_gpt_plugin_template import AutoGPTPluginTemplate from colorama import Fore from dotenv import load_dotenv @@ -123,6 +125,18 @@ class Config(metaclass=Singleton): # Initialize the OpenAI API client openai.api_key = self.openai_api_key + self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins") + self.plugins: List[AutoGPTPluginTemplate] = [] + self.plugins_openai = [] + + plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS") + if plugins_allowlist: + plugins_allowlist = plugins_allowlist.split(",") + self.plugins_whitelist = plugins_allowlist + else: + self.plugins_whitelist = [] + self.plugins_blacklist = [] + def get_azure_deployment_id_for_model(self, model: str) -> str: """ Returns the relevant deployment id for the model specified. @@ -241,6 +255,10 @@ class Config(metaclass=Singleton): """Set the debug mode value.""" self.debug_mode = value + def set_plugins(self, value: list) -> None: + """Set the plugins value.""" + self.plugins = value + def check_openai_api_key() -> None: """Check if the OpenAI API key is set in config.py or as an environment variable.""" diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py index 821820ff..8b85959c 100644 --- a/autogpt/llm_utils.py +++ b/autogpt/llm_utils.py @@ -1,7 +1,7 @@ from __future__ import annotations import time -from ast import List +from typing import List, Optional import openai from colorama import Fore, Style @@ -9,6 +9,7 @@ from openai.error import APIError, RateLimitError from autogpt.config import Config from autogpt.logs import logger +from autogpt.types.openai import Message CFG = Config() @@ -37,8 +38,8 @@ def call_ai_function( # For each arg, if any are None, convert to "None": args = [str(arg) if arg is not None else "None" for arg in args] # parse args to comma separated string - args = ", ".join(args) - messages = [ + args: str = ", ".join(args) + messages: List[Message] = [ { "role": "system", "content": f"You are now the following python function: ```# {description}" @@ -53,15 +54,15 @@ def call_ai_function( # Overly simple abstraction until we create something better # simple retry mechanism when getting a rate error or a bad gateway def create_chat_completion( - messages: list, # type: ignore - model: str | None = None, + messages: List[Message], # type: ignore + model: Optional[str] = None, temperature: float = CFG.temperature, - max_tokens: int | None = None, + max_tokens: Optional[int] = None, ) -> str: """Create a chat completion using the OpenAI API Args: - messages (list[dict[str, str]]): The messages to send to the chat completion + messages (List[Message]): The messages to send to the chat completion model (str, optional): The model to use. Defaults to None. temperature (float, optional): The temperature to use. Defaults to 0.9. max_tokens (int, optional): The max tokens to use. Defaults to None. @@ -69,15 +70,28 @@ def create_chat_completion( Returns: str: The response from the chat completion """ - response = None num_retries = 10 warned_user = False if CFG.debug_mode: print( - Fore.GREEN - + f"Creating chat completion with model {model}, temperature {temperature}," - f" max_tokens {max_tokens}" + Fore.RESET + f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" ) + for plugin in CFG.plugins: + if plugin.can_handle_chat_completion( + messages=messages, + model=model, + temperature=temperature, + max_tokens=max_tokens, + ): + message = plugin.handle_chat_completion( + messages=messages, + model=model, + temperature=temperature, + max_tokens=max_tokens, + ) + if message is not None: + return message + response = None for attempt in range(num_retries): backoff = 2 ** (attempt + 2) try: @@ -100,8 +114,7 @@ def create_chat_completion( except RateLimitError: if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"Reached rate limit, passing..." + Fore.RESET, + f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" ) if not warned_user: logger.double_check( @@ -110,16 +123,14 @@ def create_chat_completion( ) warned_user = True except APIError as e: - if e.http_status == 502: - pass - else: + if e.http_status != 502: raise if attempt == num_retries - 1: raise if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", ) time.sleep(backoff) if response is None: @@ -134,8 +145,12 @@ def create_chat_completion( raise RuntimeError(f"Failed to get response after {num_retries} retries") else: quit(1) - - return response.choices[0].message["content"] + resp = response.choices[0].message["content"] + for plugin in CFG.plugins: + if not plugin.can_handle_on_response(): + continue + resp = plugin.on_response(resp) + return resp def create_embedding_with_ada(text) -> list: @@ -158,15 +173,13 @@ def create_embedding_with_ada(text) -> list: except RateLimitError: pass except APIError as e: - if e.http_status == 502: - pass - else: + if e.http_status != 502: raise if attempt == num_retries - 1: raise if CFG.debug_mode: print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", ) time.sleep(backoff) diff --git a/autogpt/models/base_open_ai_plugin.py b/autogpt/models/base_open_ai_plugin.py new file mode 100644 index 00000000..046295c0 --- /dev/null +++ b/autogpt/models/base_open_ai_plugin.py @@ -0,0 +1,199 @@ +"""Handles loading of plugins.""" +from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar + +from auto_gpt_plugin_template import AutoGPTPluginTemplate + +PromptGenerator = TypeVar("PromptGenerator") + + +class Message(TypedDict): + role: str + content: str + + +class BaseOpenAIPlugin(AutoGPTPluginTemplate): + """ + This is a BaseOpenAIPlugin class for generating Auto-GPT plugins. + """ + + def __init__(self, manifests_specs_clients: dict): + # super().__init__() + self._name = manifests_specs_clients["manifest"]["name_for_model"] + self._version = manifests_specs_clients["manifest"]["schema_version"] + self._description = manifests_specs_clients["manifest"]["description_for_model"] + self._client = manifests_specs_clients["client"] + self._manifest = manifests_specs_clients["manifest"] + self._openapi_spec = manifests_specs_clients["openapi_spec"] + + def can_handle_on_response(self) -> bool: + """This method is called to check that the plugin can + handle the on_response method. + Returns: + bool: True if the plugin can handle the on_response method.""" + return False + + def on_response(self, response: str, *args, **kwargs) -> str: + """This method is called when a response is received from the model.""" + return response + + def can_handle_post_prompt(self) -> bool: + """This method is called to check that the plugin can + handle the post_prompt method. + Returns: + bool: True if the plugin can handle the post_prompt method.""" + return False + + def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: + """This method is called just after the generate_prompt is called, + but actually before the prompt is generated. + Args: + prompt (PromptGenerator): The prompt generator. + Returns: + PromptGenerator: The prompt generator. + """ + return prompt + + def can_handle_on_planning(self) -> bool: + """This method is called to check that the plugin can + handle the on_planning method. + Returns: + bool: True if the plugin can handle the on_planning method.""" + return False + + def on_planning( + self, prompt: PromptGenerator, messages: List[Message] + ) -> Optional[str]: + """This method is called before the planning chat completion is done. + Args: + prompt (PromptGenerator): The prompt generator. + messages (List[str]): The list of messages. + """ + pass + + def can_handle_post_planning(self) -> bool: + """This method is called to check that the plugin can + handle the post_planning method. + Returns: + bool: True if the plugin can handle the post_planning method.""" + return False + + def post_planning(self, response: str) -> str: + """This method is called after the planning chat completion is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the pre_instruction method. + Returns: + bool: True if the plugin can handle the pre_instruction method.""" + return False + + def pre_instruction(self, messages: List[Message]) -> List[Message]: + """This method is called before the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + List[Message]: The resulting list of messages. + """ + return messages + + def can_handle_on_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the on_instruction method. + Returns: + bool: True if the plugin can handle the on_instruction method.""" + return False + + def on_instruction(self, messages: List[Message]) -> Optional[str]: + """This method is called when the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + Optional[str]: The resulting message. + """ + pass + + def can_handle_post_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the post_instruction method. + Returns: + bool: True if the plugin can handle the post_instruction method.""" + return False + + def post_instruction(self, response: str) -> str: + """This method is called after the instruction chat is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_command(self) -> bool: + """This method is called to check that the plugin can + handle the pre_command method. + Returns: + bool: True if the plugin can handle the pre_command method.""" + return False + + def pre_command( + self, command_name: str, arguments: Dict[str, Any] + ) -> Tuple[str, Dict[str, Any]]: + """This method is called before the command is executed. + Args: + command_name (str): The command name. + arguments (Dict[str, Any]): The arguments. + Returns: + Tuple[str, Dict[str, Any]]: The command name and the arguments. + """ + return command_name, arguments + + def can_handle_post_command(self) -> bool: + """This method is called to check that the plugin can + handle the post_command method. + Returns: + bool: True if the plugin can handle the post_command method.""" + return False + + def post_command(self, command_name: str, response: str) -> str: + """This method is called after the command is executed. + Args: + command_name (str): The command name. + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_chat_completion( + self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int + ) -> bool: + """This method is called to check that the plugin can + handle the chat_completion method. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + bool: True if the plugin can handle the chat_completion method.""" + return False + + def handle_chat_completion( + self, messages: List[Message], model: str, temperature: float, max_tokens: int + ) -> str: + """This method is called when the chat completion is done. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + str: The resulting response. + """ + pass diff --git a/autogpt/plugins.py b/autogpt/plugins.py new file mode 100644 index 00000000..b536acbd --- /dev/null +++ b/autogpt/plugins.py @@ -0,0 +1,265 @@ +"""Handles loading of plugins.""" + +import importlib +import json +import os +import zipfile +from pathlib import Path +from typing import List, Optional, Tuple +from urllib.parse import urlparse +from zipimport import zipimporter + +import openapi_python_client +import requests +from auto_gpt_plugin_template import AutoGPTPluginTemplate +from openapi_python_client.cli import Config as OpenAPIConfig + +from autogpt.config import Config +from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin + + +def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]: + """ + Inspect a zipfile for a module. + + Args: + zip_path (str): Path to the zipfile. + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + Optional[str]: The name of the module if found, else None. + """ + with zipfile.ZipFile(zip_path, "r") as zfile: + for name in zfile.namelist(): + if name.endswith("__init__.py"): + if debug: + print(f"Found module '{name}' in the zipfile at: {name}") + return name + if debug: + print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") + return None + + +def write_dict_to_json_file(data: dict, file_path: str) -> None: + """ + Write a dictionary to a JSON file. + Args: + data (dict): Dictionary to write. + file_path (str): Path to the file. + """ + with open(file_path, "w") as file: + json.dump(data, file, indent=4) + + +def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict: + """ + Fetch the manifest for a list of OpenAI plugins. + Args: + urls (List): List of URLs to fetch. + Returns: + dict: per url dictionary of manifest and spec. + """ + # TODO add directory scan + manifests = {} + for url in cfg.plugins_openai: + openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}" + create_directory_if_not_exists(openai_plugin_client_dir) + if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"): + try: + response = requests.get(f"{url}/.well-known/ai-plugin.json") + if response.status_code == 200: + manifest = response.json() + if manifest["schema_version"] != "v1": + print( + f"Unsupported manifest version: {manifest['schem_version']} for {url}" + ) + continue + if manifest["api"]["type"] != "openapi": + print( + f"Unsupported API type: {manifest['api']['type']} for {url}" + ) + continue + write_dict_to_json_file( + manifest, f"{openai_plugin_client_dir}/ai-plugin.json" + ) + else: + print(f"Failed to fetch manifest for {url}: {response.status_code}") + except requests.exceptions.RequestException as e: + print(f"Error while requesting manifest from {url}: {e}") + else: + print(f"Manifest for {url} already exists") + manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json")) + if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"): + openapi_spec = openapi_python_client._get_document( + url=manifest["api"]["url"], path=None, timeout=5 + ) + write_dict_to_json_file( + openapi_spec, f"{openai_plugin_client_dir}/openapi.json" + ) + else: + print(f"OpenAPI spec for {url} already exists") + openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json")) + manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec} + return manifests + + +def create_directory_if_not_exists(directory_path: str) -> bool: + """ + Create a directory if it does not exist. + Args: + directory_path (str): Path to the directory. + Returns: + bool: True if the directory was created, else False. + """ + if not os.path.exists(directory_path): + try: + os.makedirs(directory_path) + print(f"Created directory: {directory_path}") + return True + except OSError as e: + print(f"Error creating directory {directory_path}: {e}") + return False + else: + print(f"Directory {directory_path} already exists") + return True + + +def initialize_openai_plugins( + manifests_specs: dict, cfg: Config, debug: bool = False +) -> dict: + """ + Initialize OpenAI plugins. + Args: + manifests_specs (dict): per url dictionary of manifest and spec. + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + dict: per url dictionary of manifest, spec and client. + """ + openai_plugins_dir = f"{cfg.plugins_dir}/openai" + if create_directory_if_not_exists(openai_plugins_dir): + for url, manifest_spec in manifests_specs.items(): + openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}" + _meta_option = (openapi_python_client.MetaType.SETUP,) + _config = OpenAPIConfig( + **{ + "project_name_override": "client", + "package_name_override": "client", + } + ) + prev_cwd = Path.cwd() + os.chdir(openai_plugin_client_dir) + Path("ai-plugin.json") + if not os.path.exists("client"): + client_results = openapi_python_client.create_new_client( + url=manifest_spec["manifest"]["api"]["url"], + path=None, + meta=_meta_option, + config=_config, + ) + if client_results: + print( + f"Error creating OpenAPI client: {client_results[0].header} \n" + f" details: {client_results[0].detail}" + ) + continue + spec = importlib.util.spec_from_file_location( + "client", "client/client/client.py" + ) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + client = module.Client(base_url=url) + os.chdir(prev_cwd) + manifest_spec["client"] = client + return manifests_specs + + +def instantiate_openai_plugin_clients( + manifests_specs_clients: dict, cfg: Config, debug: bool = False +) -> dict: + """ + Instantiates BaseOpenAIPlugin instances for each OpenAI plugin. + Args: + manifests_specs_clients (dict): per url dictionary of manifest, spec and client. + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + plugins (dict): per url dictionary of BaseOpenAIPlugin instances. + + """ + plugins = {} + for url, manifest_spec_client in manifests_specs_clients.items(): + plugins[url] = BaseOpenAIPlugin(manifest_spec_client) + return plugins + + +def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]: + """Scan the plugins directory for plugins and loads them. + + Args: + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + List[Tuple[str, Path]]: List of plugins. + """ + loaded_plugins = [] + # Generic plugins + plugins_path_path = Path(cfg.plugins_dir) + for plugin in plugins_path_path.glob("*.zip"): + if module := inspect_zip_for_module(str(plugin), debug): + plugin = Path(plugin) + module = Path(module) + if debug: + print(f"Plugin: {plugin} Module: {module}") + zipped_package = zipimporter(str(plugin)) + zipped_module = zipped_package.load_module(str(module.parent)) + for key in dir(zipped_module): + if key.startswith("__"): + continue + a_module = getattr(zipped_module, key) + a_keys = dir(a_module) + if ( + "_abc_impl" in a_keys + and a_module.__name__ != "AutoGPTPluginTemplate" + and blacklist_whitelist_check(a_module.__name__, cfg) + ): + loaded_plugins.append(a_module()) + # OpenAI plugins + if cfg.plugins_openai: + manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg) + if manifests_specs.keys(): + manifests_specs_clients = initialize_openai_plugins( + manifests_specs, cfg, debug + ) + for url, openai_plugin_meta in manifests_specs_clients.items(): + if blacklist_whitelist_check(url, cfg): + plugin = BaseOpenAIPlugin(openai_plugin_meta) + loaded_plugins.append(plugin) + + if loaded_plugins: + print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------") + for plugin in loaded_plugins: + print(f"{plugin._name}: {plugin._version} - {plugin._description}") + return loaded_plugins + + +def blacklist_whitelist_check(plugin_name: str, cfg: Config) -> bool: + """Check if the plugin is in the whitelist or blacklist. + + Args: + plugin_name (str): Name of the plugin. + cfg (Config): Config object. + + Returns: + True or False + """ + if plugin_name in cfg.plugins_blacklist: + return False + if plugin_name in cfg.plugins_whitelist: + return True + ack = input( + f"WARNNG Plugin {plugin_name} found. But not in the" + " whitelist... Load? (y/n): " + ) + return ack.lower() == "y" diff --git a/autogpt/prompts/__init__.py b/autogpt/prompts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/promptgenerator.py b/autogpt/prompts/generator.py similarity index 78% rename from autogpt/promptgenerator.py rename to autogpt/prompts/generator.py index 0ad7046a..c9a441d8 100644 --- a/autogpt/promptgenerator.py +++ b/autogpt/prompts/generator.py @@ -1,8 +1,6 @@ """ A module for generating custom prompt strings.""" -from __future__ import annotations - import json -from typing import Any +from typing import Any, Callable, Dict, List, Optional class PromptGenerator: @@ -20,6 +18,10 @@ class PromptGenerator: self.commands = [] self.resources = [] self.performance_evaluation = [] + self.goals = [] + self.command_registry = None + self.name = "Bob" + self.role = "AI" self.response_format = { "thoughts": { "text": "thought", @@ -40,7 +42,13 @@ class PromptGenerator: """ self.constraints.append(constraint) - def add_command(self, command_label: str, command_name: str, args=None) -> None: + def add_command( + self, + command_label: str, + command_name: str, + args=None, + function: Optional[Callable] = None, + ) -> None: """ Add a command to the commands list with a label, name, and optional arguments. @@ -49,6 +57,8 @@ class PromptGenerator: command_name (str): The name of the command. args (dict, optional): A dictionary containing argument names and their values. Defaults to None. + function (callable, optional): A callable function to be called when + the command is executed. Defaults to None. """ if args is None: args = {} @@ -59,11 +69,12 @@ class PromptGenerator: "label": command_label, "name": command_name, "args": command_args, + "function": function, } self.commands.append(command) - def _generate_command_string(self, command: dict[str, Any]) -> str: + def _generate_command_string(self, command: Dict[str, Any]) -> str: """ Generate a formatted string representation of a command. @@ -96,7 +107,7 @@ class PromptGenerator: """ self.performance_evaluation.append(evaluation) - def _generate_numbered_list(self, items: list[Any], item_type="list") -> str: + def _generate_numbered_list(self, items: List[Any], item_type="list") -> str: """ Generate a numbered list from given items based on the item_type. @@ -109,10 +120,16 @@ class PromptGenerator: str: The formatted numbered list. """ if item_type == "command": - return "\n".join( - f"{i+1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ) + command_strings = [] + if self.command_registry: + command_strings += [ + str(item) + for item in self.command_registry.commands.values() + if item.enabled + ] + # These are the commands that are added manually, do_nothing and terminate + command_strings += [self._generate_command_string(item) for item in items] + return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings)) else: return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) @@ -134,5 +151,5 @@ class PromptGenerator: f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" "You should only respond in JSON format as described below \nResponse" f" Format: \n{formatted_response_format} \nEnsure the response can be" - " parsed by Python json.loads" + "parsed by Python json.loads" ) diff --git a/autogpt/prompt.py b/autogpt/prompts/prompt.py similarity index 50% rename from autogpt/prompt.py rename to autogpt/prompts/prompt.py index 08754605..79de04ea 100644 --- a/autogpt/prompt.py +++ b/autogpt/prompts/prompt.py @@ -1,17 +1,16 @@ from colorama import Fore -from autogpt.config import Config from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config from autogpt.logs import logger -from autogpt.promptgenerator import PromptGenerator +from autogpt.prompts.generator import PromptGenerator from autogpt.setup import prompt_user from autogpt.utils import clean_input CFG = Config() -def get_prompt() -> str: +def build_default_prompt_generator() -> PromptGenerator: """ This function generates a prompt string that includes various constraints, commands, resources, and performance evaluations. @@ -20,9 +19,6 @@ def get_prompt() -> str: str: The generated prompt string. """ - # Initialize the Config object - cfg = Config() - # Initialize the PromptGenerator object prompt_generator = PromptGenerator() @@ -39,96 +35,12 @@ def get_prompt() -> str: prompt_generator.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' ) - prompt_generator.add_constraint( - "Use subprocesses for commands that will not terminate within a few minutes" - ) # Define the command list commands = [ - ("Google Search", "google", {"input": ""}), - ( - "Browse Website", - "browse_website", - {"url": "", "question": ""}, - ), - ( - "Start GPT Agent", - "start_agent", - {"name": "", "task": "", "prompt": ""}, - ), - ( - "Message GPT Agent", - "message_agent", - {"key": "", "message": ""}, - ), - ("List GPT Agents", "list_agents", {}), - ("Delete GPT Agent", "delete_agent", {"key": ""}), - ( - "Clone Repository", - "clone_repository", - {"repository_url": "", "clone_path": ""}, - ), - ("Write to file", "write_to_file", {"file": "", "text": ""}), - ("Read file", "read_file", {"file": ""}), - ("Append to file", "append_to_file", {"file": "", "text": ""}), - ("Delete file", "delete_file", {"file": ""}), - ("Search Files", "search_files", {"directory": ""}), - ("Analyze Code", "analyze_code", {"code": ""}), - ( - "Get Improved Code", - "improve_code", - {"suggestions": "", "code": ""}, - ), - ( - "Write Tests", - "write_tests", - {"code": "", "focus": ""}, - ), - ("Execute Python File", "execute_python_file", {"file": ""}), - ("Generate Image", "generate_image", {"prompt": ""}), - ("Send Tweet", "send_tweet", {"text": ""}), - ] - - # Only add the audio to text command if the model is specified - if cfg.huggingface_audio_to_text_model: - commands.append( - ("Convert Audio to text", "read_audio_from_file", {"file": ""}), - ) - - # Only add shell command to the prompt if the AI is allowed to execute it - if cfg.execute_local_commands: - commands.append( - ( - "Execute Shell Command, non-interactive commands only", - "execute_shell", - {"command_line": ""}, - ), - ) - commands.append( - ( - "Execute Shell Command Popen, non-interactive commands only", - "execute_shell_popen", - {"command_line": ""}, - ), - ) - - # Only add the download file command if the AI is allowed to execute it - if cfg.allow_downloads: - commands.append( - ( - "Downloads a file from the internet, and stores it locally", - "download_file", - {"url": "", "file": ""}, - ), - ) - - # Add these command last. - commands.append( ("Do Nothing", "do_nothing", {}), - ) - commands.append( ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), - ) + ] # Add commands to the PromptGenerator object for command_label, command_name, args in commands: @@ -159,12 +71,11 @@ def get_prompt() -> str: "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." ) - - # Generate the prompt string - return prompt_generator.generate_prompt_string() + prompt_generator.add_performance_evaluation("Write all code to a file.") + return prompt_generator -def construct_prompt() -> str: +def construct_main_ai_config() -> AIConfig: """Construct the prompt for the AI to respond to Returns: @@ -196,8 +107,4 @@ Continue (y/n): """ config = prompt_user() config.save(CFG.ai_settings_file) - # Get rid of this global: - global ai_name - ai_name = config.ai_name - - return config.construct_full_prompt() + return config diff --git a/autogpt/token_counter.py b/autogpt/token_counter.py index 338fe6be..2d50547b 100644 --- a/autogpt/token_counter.py +++ b/autogpt/token_counter.py @@ -1,13 +1,16 @@ """Functions for counting the number of tokens in a message or string.""" from __future__ import annotations +from typing import List + import tiktoken from autogpt.logs import logger +from autogpt.types.openai import Message def count_message_tokens( - messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301" + messages: List[Message], model: str = "gpt-3.5-turbo-0301" ) -> int: """ Returns the number of tokens used by a list of messages. diff --git a/autogpt/types/openai.py b/autogpt/types/openai.py new file mode 100644 index 00000000..2af85785 --- /dev/null +++ b/autogpt/types/openai.py @@ -0,0 +1,9 @@ +"""Type helpers for working with the OpenAI library""" +from typing import TypedDict + + +class Message(TypedDict): + """OpenAI Message object containing a role and the message content""" + + role: str + content: str diff --git a/autogpt/utils.py b/autogpt/utils.py index e93d5ac7..dffd0662 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -3,7 +3,7 @@ import os import requests import yaml from colorama import Fore -from git import Repo +from git.repo import Repo def clean_input(prompt: str = ""): diff --git a/plugin.png b/plugin.png new file mode 100644 index 0000000000000000000000000000000000000000..865ce3c922d7783efde0ebaa273519eca202d654 GIT binary patch literal 33356 zcmd42Ra9I-*DVTxU_pZh4M76|f;&xc3GVLh?vg+P!QGt%clXBK-QC?9X!LHr@0@?! zhx>TXxc8w)Lv`(vU8~lbbJnUq^0MM+D1<05FfeG65+aH)FtB~VrvwQBC@C1TrU$-| z?IkpvU|`U@Uq7(%^yoxDA)>RSj40wB{5x2V_Zd!f3jZk*Rd@d8;%si`2vb-#ECCdu zIs-)_CQe3<7WU2-cD69s$mA42Im&Cfu${e!qlKBdGfWA_Fb`0Q{-07u6NA^PADu01 zOkmh=F_3{@kY9gMvNLsdGjKG4(XoGdt^V#mjaxVwSsMUV9bxL*Ya)SSq}O6K14mm6 zTQit;qTL{%4Dr9uyEqz|z(8ICj{bKwVLKZe6I*APo8bg7P=@eYP1(ZP+61QHbdm-J z<|B-x$Twy8wBr>IZ{?Y%my0>$WaI_)Duee473GrtUylbPq~)`0aYeofzAb*z)Ou?H zhSmn#)IQbKWg)=VH%ZGYYJU7!h5d$7aGug%=uIFyeX=CW?QNeiW3?&8fH9QMN5u4$ zV{et0^~`IO|2Bd;%O>e3rVtXjGU+L!Kk&!IyaX2tc_5}MCGjudU*7i-z>k3>$l^d* zXGCBwP-YCX5efbj)lgw#wLY1Gb8Gkd-HuZ9>w-KyK z9t(1>r>P>&;CGX4B9w6v_emF7GB^A}u@n_9gtu=u4t69J6tbH`CrfZ~@iq?*B<1Dh zLXhA zohkE=A2vm4vNwOcYT}-hRv^~+_+kzkQx=m}5w;0ye|5BznnM+AM(BuL1O(y|;~Sd^ z7=$!DKJbub(&fkcY?1;UD!cJ(iQXyLS~eXm5XYqqTHtQ%of;}@RUeiXO^g?eC9=`v zIV#n9vJDD2V7MJ;+K{gFD-90JC2^I<#mIc_i+|6d2;)!wJ#v61P!;stEUSxQS&^Yj z`2C+_#6&ty&-A`0;@I;Y3=;+#jxt-bSf*{F^b0SzSORf5j; z{y^uKLhJdxHktLYq>3^5WOfL=1Y|G9|Ix0>qTT0XI?=!7G* zTql_VxTee%Wo8kh=c%NZnRL01b$nD>bAOIVyH2O>HS(viB-=D+xopv^n{_4B1Nh(9 zrNl?fDf0qG?g=|BIwK4Jc4-3}7(i1s9b|IDwW4US!nuF^pxkA3w`MORL-42v0p$I$ zRiTKVo0B3E3)4Od3mE8@A|G}wK<4f}-GtNL`B>G_&GKRjZudKsbntY|WVuB+G9)5k_kJbgyF zbhV$6&%QK~f&6*#FIE%vE>?$fmo*3PSsX49cYQ8;zZcoY5%tg)w{2Fdgc596#?Y*O zwdolszkLj=szKnFn)_W#NkO z(WXxURWP%Gn=7=h{|_=BX3{`!troXuk3XYhQedtVfAU4&&ZD8BoY0Vyor?^sjzFz{zP7B1{ zHqPbyug1dYVf6Yl=cWaniP^S2ZTV{H+VTkog)>ZNjj17pG+zTB_(CSlovhW zGYKi8znI12hB%`;|(2y6WriOfTT;XK)blOwv%y~z8A{jc+lOQsPexyZ}+)X*6Id^{;Go~G0R zIH8S0($zvb02@4?9mH=}!sbekWfjpT@^z^=}$z7tKe8K#iwp zzPAg1Qfvu0uPEC5a8?s^GXBhJW|Y(Mjp zs&`K*A->;0#7Q?Zqw@Jie6z4^=i#hFdG=)Z@$XZ5{&UTWLmuhv)!<_$mX<@MlfsRw zfG_0Cdv80O&Gr$#S1xJ&x>kw9#?WbyI)95W^=?$`I@`pUPBhNT-2LV0)&uy!s)K!7 zyRPxQ-%P`a$WWAXYa-@S!KW8`2-ca|?g{KS^xsbp{&YYB6TYx1ExA#)W6Ou(Y@e)W zY9(B1eCxTB1g}R|-0ZXI(=JQ@e%eVR$uRx(OnUwN41^-mD<=!0{R@M-1 zm6@!?%{=AmyuF_99<`$Esv<6-pT$7hXFlm zUW-ZIlb+@ut9JiwRP1`mi|n1D0FM(5m3-rOcNO$^&i(@>(;h}9aW)d|;9VT(i-xQt zZt>LCA9!B}Ih@Dy&we_W9BVo^|N6#s*YO=@)+ob;f--Z@vUhq5PmCP{8E72I|AJzt zA?HSiIeW?TGl|P#b{@CoWRBy_ARoAr1gzUPVJ|zT>@Tv zOM^q|l^`SGva6M5$n zP}S-yKDn?C)o<@JE!15pIm1XC696JjEMallc7gR8w;aigCjY3_{rn*Z3qEf6JM2I20a zcpRQ8`exp-dhXL;UtBD40Gm=nyub90l(y9~aCoad4BxT_wJd}D%CaZg)q*kkgPH#z z&nhaFuQAT5+U>b)=iFbo!s3Ssw+q_)hz}?hUY0E$%T$}Ez9RA>o{O+BEN&UQz5UtO zB%COdb#NfzHQ{#)FFSJ50+d8|ww!^!Y)U7B|H!5?Nt2!ZH0}!#sz1aD;zyG;8o`I!jq?L{dezPs^gUXjZ&GvKPQPV zp{-AxBO)Q$Aex9n*HhtDVTgI9*2h-$(@gNMn-N>lBgnk|;A7~)*I{BMbPl@;Ck-{L zxGpYSPRaV#U!etE6#~u-LCbehqwh!ypVV+x-)Wdchc}=QT+e@Ud2E*a$if))BX+mz zP)laJX&zd#!sWk0lXVnq;0%ekcaC$2I70XpRdFg(k!-zw-1bsIJ{QUH)SGjbl?8Ku zVO8%;y)&rVF; z=S}HUM;wb5t6GocTrwpi3VEDhk5g#@pEARW9KrI+@5a1DLBGbu6ljOJ4QfZqjM(UC zafyn>n`dbUKa5(p)NUWA{-TvaVg$b&-z0b4)Dwa}@xhB;a8kv&H7K6W;M zk3?`pH~ie%G!Cx8WKb(=x8!!te2(**j-q?mv`=eUS$9zEQw%Pv&0`X=ikv7t_Za2c zV|;EqT0w=!bGw!JkEK6j+!AmP?*g{tHQKP(FZ}ChLxje3$)Z<>SVl@OW-dlBhy)ex z{BU8RVBWR$R3##vvGq~jZvNY^!T~=v1FRogjr;=Z!HXl;4HJRgvYYkk3B_x;8boR_ zOM4E~tGNLzaEViGarVwNKd!4mx#E!uUy`6Cp#ps0ZW6}S(ZcRVXs>bkDV3PC5p$3Z zu#lJNTz~4pxAA=3^WKrEnqFa{yM;@%6I4DReNJxpL8NLVRi4&(@{cpALGl{yM^5$9 zg2(af055nomtdk1@!oy@ZUrsGKX`rgu5!h@ZMKSmzRj=tD%4;8(G0@IV*`?`?-h?8aV4qJ_V&jC+@i*q^o-N|I8TY71XeFs$bBa?+^*3>p8K0~ zB>YKM5j8YIM;MPHH;tJe5t?c&$F|zVVU}eoRJP{Cp>uaCUJQM5Z397#v+tO8o!O-< z3`l;*Bjh$eU6JR!Xbjwy?|z-UH{J^SQKFdnIAN-YERHBj+1eID`f`g}c_FR-!O4G` zqA?CZVy!&h@)~!rl&}{!et5uro_Gk9^H*J9!O__x{ zTCq=}BZ&&EUK0EsaIL-{G5YqZIG`UJ$>D;*G%?3|t+GmuOp?b1x;5Fs!*TWMHcyQ$ zta0;u2$M$pY!a)2rZ+1h&TXkLw~AOVvO)6?r__U%muEMb2KWBH-J|NACGBSo%f6`C z2+&!bZ}lX*!~6=@1fWS+TXq)l@??FzJOC<9s9j=F^#hCGS`5rJ=!};5f)E+0L{|@6$ZwOyyp% zB3ua?UC*}Sy#vpX&A5Nq-6e14_#|3sYf}3Gd^N&pewAvL%%jiZluHvZAfY?z#&uw( z@or{ewZ2)78fy*AE@eRNbplFVs=MkMhV6DH?C7P${-;N4}m4+$!new4C@ypT^<3jr&$=RgI zaP0DwEvnNZ;XvCp1MelNvGq|1WCZ$d?BC&CpFdXE$t|%FY}i~sm&;*YT_1z&qCCBp zHTJy$j*Q?^1-0_>Amnf*h+d7c8DQXI_V!49ktC(ggbOsMQ&A6QQF0iJ@q12f*EwAc zsU+0VWLJCw_-<2&$DTE6=*u@p8pWfY=$gJXYIeG%Q*$<{E4C?EXz^s`>OZG!-(!R${Hr;zepy28lTxioS;}JM zjL-fHFDEDWJ@1l7yU4zGY6@FeQZoKQ?tgTSmSmkkGq14_t8mc~4HdXjjymDDRWa+= zm)E3Qx`9_RdwX+AYFFhw9{@7X+K7x~<=p4r>Qs?sJ=&kfXVB74G!^8JCBR zAEUuV^!tc1<%JG02VcotL*D8UO4q6S%kz!RXQx`a4BqKYHSUtdTihj3kApl3d~V*N z-&~nae#vlPc_z5{*B_qsqdhr|HGNVcL8CgXxkIQF&+k%SD>uNTA zbMw=iCpe3py�Qu_k*VOPd=Te@4YaGyOisrS>;Q>73+o{g2Q!_A}O_Cc-jEj|d#X zZ$b-Q*;o-G)wj-xe_hageEWf2-n6Ije&XhtS zoOhcGqqhjeb3?yeK~`61vxJ3-87#Y}6xz&-f3G_QF%g!FHRagu7^bSDwLzZ8=Vf&rY;qCMq#8R%-t}mMqK5u4EJR4#Zo_Iw za`8|b>|B_(I7cWHeb>1Z*JuW&Cs8-49zJ2U3ho z9%!GLm8GAmUn389nX)41ui_ogizb~&u92?!*XxZU_BhA2>OmagG~S6&<)onVho6e! zI6}d_c{=i#A+h>36@}ka9WhkGXI@Uo7SZh$L!F(_E>*djP~2Q!(Y z2mfITO=C2d?`LJLO9e~_70={MwAU7^RYKd`Vi0HE4fNthktT$4SmA|*KtV@VcewIr zaap6~9k%FKey(nc1(!mvx-q2du(__O18LU+r$>9J2HK`}mZZd-z2p04R&O7D?Fi*& zBY(CMCzTvc411hP z|E0L$qS;5uiVF6i44sFx7Fi7ESH(HfATIV?neM-ahTy76MP#vwoFp9Vf4z!jI*mVb zl8~%ddRIi>=O{3LM(OF{{7lD2efJVMi>piiC`HsW=rC>nqEjUtV@AGmaDr zsk6qbow>dJWnRce7(U~2GWsT_C{>M!mdYkorBr(F6TwcCFtF0KdFw16t!7&^!@s6L z$Sv|^qi-(K2=S-ri=@@quZLggc8@sORU>CYA@N0LrWC+=hjQL0BesN8L|PPcHe86v z1JN7xj5$891U??r%PZu4C;V!Sfg-};BbSZNx3(*v`VRSWsgQ7;vT>vp>DTe8_%DGr zro}|(7vPQGO3P}>>>AqUk6AOA2qt<*d|8p$FUGi7Co`C#pJDnJ3LYQhue<-v27tnMubLw!cPc@3_BsA02oHe{k@-*BgnZ9OH zXsM=TmOp1uK!02lscH13af`qUf3wBYpV6Ap{^qP;RWm4Rp^7Vnq{k-I}rs@;XEzPF&aIN!~jtLEdJxtEGlmEU4kYM-Gi45rf>=c z+|1*hH6=^L5Ew`#)T-`^yxcq&9-scKK$VM)MvFecm5y>RUdz{dhWwi~*T6@Xcr_)X zsh24pSR2o>MYJLoS6i9$pdUKx*Wd7XzZ=fy@seeyN!Wa0iE_zIJLige z(v?gRhN(fo9K|LTkTw=uW63^p(FV$ap8X#m`PzvSHn5@(ohE)-;Na}c{(HgY>|+M? zUgW{d)_*OXM3iIM4j80X?{8hfYa*zRp+PKD&h+5cWy*0dSFDwkY#auf>`z5yNB0OSbe75NeZQf6fQ(8|7s)R94UfWz!*DhG%xkB zUHRuvVHjF^29vJekhGTrahDVI4`@s;Gmz%34@FYO1*tZcB_45{LjEk*4T%$L#-Ox!;9>CF`1caTUE++ zHkt?zs1CD)g$lxQBLeuG1BK-xmyXJq-)RC zW;IDRC7yJ2yV8WtoKC4cW<;Zl5yLjkXFPrMrb;840s~gV?et|V(vF8SD#NdF1shH~ zmsBpEPf-Sz)i!{ceBg2BI%3rM1b8lQEojk|>&c>ykBuFZ-L>%Vg}{^@7c>Mo-BnGO zw*S1Q7xU4y(N2BZjnx4Rm~&gv{JGE~tLb zMYOSDz|;}MIU>Yx>NgrCO*!00ZV!@Vadd1FY`FHji>(1Z$E2O1gu*R7U#N+ql9Gqv z?swn|Rzhw}@TioEjcu5po&p~8J>Ktn)uD*9`l2pi1UW!7IuDk}uN~X!W8ro?RV}TO zuD3N#=PAqp^EvT^PEN82bq=4|=;?tV;x5%yzVeAmnyP~d45`+;)h%Tm@}0M9{|cs_ z@1`Q%t!nq3c|P&F=C~YjaF__h`2v>RqM7)wr}`*pieRf}D4R8+_$ple^v@iDTCN8Q z8k#ULuG>qK)D7+h?$?R!4kzCs-Z{}ybR9e*BO`xL=1}p`RaVw6-@#=_?a*B& zgt^BiyxEaleOn$EmE9&r=^;ZMiW|3bUng#j`ioGuI$59AG#laSEA>&LRJK74%p&i!s9h$v@1 z=NAisR6|s(Orq_Iwrl(6FLcUQ#y7PEz^O71nz}U7jt;mCVG*nO>c?yZ+0gb~<#*tn z86G$Mv0iA2ek9@~&a3`{Kin~2?pL!2Pqxd>NS zx?I^wt2jsfR+$}iHrpDs45~t3xaD~3guQ=M&OpWOp%dRKdy*TL&A#FU>F%c{n+fXE8mw#rCy={dNWPEA80Y6LPwB-t^-~X)?GYk9BEn*?zm_Fup+%M31y^1r zi>O6ibon}5i)+>`pNMh)<+w!yIUMoP_SR`%HX3}EX7Sca4rDj5u!x8Fe)iH$u;FH# z`CvQjagy83+dHq89R5X&X3GZ>LJFAbJhnbLq-hH%=q)HixW#MX!`D#@fq+RpRyR7w zRje!9S?PeMj6`lp4+2F3aYZmi`PX4>cs^I<1Y}H21-y%dOG8b?2V|8@t4{S}mPe%L zOv`eHNOt^9l7CH9cq__mz>@rx5LLU?wP(cy01Fe=>}9FL^V7sS>F|(CSD$yPK$y+h zn6U#Th1ffjhTL#=RX)Epdo7&s*z~!{nC~m1$ux9Xl#F;e4}jWrO-h-1rbF`4&!0=Q zge_f1U(;3!SlJ70W{ejFe=~X`JTC<3t~5^-KO`Evcoa;cbKMkSMJTEO2`w99wdUXW zpCJ0Ct#%%D!4GYNKN;aXN>i?iH)wCM@qxkdZO3{|+KvxsqacwRCQN-Gc5mTeLf;<^ zQ(Ie?V*&}`i(2@5VE{dvUue%D5q-N>UMQhsmjXsBvvOSNgWNx6ykx1S1pj{VYL>NJ z4SNa(n+$woh0{Y>D@JasbRE^hAUo=+al5S*SVR3>& z1K>xtZ@Qo-_g?QME>WXr1*0P}o;tpSt4JXEM-#JAIO0EiClzlUx8<97Lk~Cz!5rkf+rmb;-naqn z#BqdgafaM~t@=E~#-{sdF}3*2$;uLgWEb0N2uJWaxgs+7y;v-}`w79O>gohz;B&rQ zvG7kHzhZ`q2}=&hv^{Q01JwsQ)$kn-HbwJf(lxhC-jRzaF&|{O^N71_94Aj%ue2Zs zOz5-f=Fc#DF2i53<;zuXfCpSIoBq3-QO(EA*|HSIX$i;8VTM2neT0II?6QRf`ITQv z4O>jmi#?8hE~TizJw8b;CHC^#fpv1B85K`i)5Zz`Dz#`?~mt+N@80B z_I^C4(TfAKjBKsj)8nQ3$hEenB;dS!AuKX7^EfXCnOvj`2_4jWGlo(p=F!nJC;fDyuy+i8|rxh|y2yH$70 z9~Q2`_lbwB?d6B?*QU$6;pCE+F~51ga~MR{$%Q%}BJlG_IjmFC>2hmo?V4}bpvqb0 z%R21jP5#BKmfgnQ-n%5LVmt5^N1;XG;o}(J!}rz_fD9=Ab5h2Ksk2@Ei+%)57rd_y ztV-{%W9v->jBKYw#>2bPRL<V5ekFR9v>kL*7SaXB*r>p|NfhktDpX z%2pXcq`tu(a${X{bBXMFUtw_Qoan@KF;@TF0RY$eS|@y-bc}Rf*WJ-(v8jALknGbr zTqqvn_LIt4wZ|3S&QpiASeOa2yRUnQ9Jndn`xdh79l}5d5 zxbT?@j10*Qe5u^nmpdw+^wCEVr{ZaG*2oI8TUS&n1nN~EGV*K#_&Qq~Ry$u^(qh#w zh=h_{H^(!f@n@y*;lH`7Ge6Pr1~D7;jaQq=xun>EpAyoJ(=-|UmS)cYYyDCQXP7MD z!=s^*PfJHv=A1#F$TY=G7gDy;RMgekDRgmjQ!*=gz-l&XKc)ZiK^c$C@4B*!3-FHS zYGvC+_zz1k9vBs^FP*}3;E3V}x2+G&JgG!bNo>zrUHnp@zSrEY6fY!~N3E%@AT zWX@`Tx)B@uETh~18yvUeHj+uNqUX(N&nmwccbtJ%>TDvbxA4 z+y!k&C*YonyjydVlePOt^R(*A!iYc-XW%s{K+`>#D+k#z>FFLp0Oz>a6qP=b)ge4? z;euYu}LY6|qhm|vSI05H42KzN9UrY7MOnNP{hTRfp)9%sPZfq3xM zdprI5cxGu=v(Lq9j_dncwVEB^u?^-TY=9YX8Y>)$>gwv&=DTQd;o;!D2SR~a!GepQ zL%>!;lv1-35KSj0Nd)3#-9;#K`((46_R6eRHaBk^jA+IiL!o$13@4{t>%I1mbK+#I zPZuB9+iPv78FOb$7kboo4US^d5RR-Zq8O^)mMl2 zAq;rhd+(&)%3JSM>U04xxHIFn@f3H_J2w%L@oF`)_T^j`!1bnaIh1Iy{hL({<=F$2 zpUni|geKK0>|t3ardPa741hpD((kJ-B~txu}0zs&KGr z?nh8$e?Pj2m}^C&4{?`RK|PJndGrC}UTxHC+VF&Ri7qr^Y6A`dZ5JTwuy@bR0530p z6e<4i<7ey7KHF;3sMWq)8avnj$wk%-JPgeFb^R(P{U1v@4#qRMPPzyyyOZfF>X%am z5{H1D3btnIiIHKh>cN$*Ab-#anBR4qg7={oH2JCG?4p@J8hRzbV-ziQ0 zm4^=4cO)&$yyWK?@$cOrln^o49v0o#Y-*7$$oy6{5;F4Ja0E1Y`KZ2(72f?NCPM#V ziXh-S1(7VdY&14Av9AbsXl8G+1UL`K(cpI1C&Q*$L2U0ud)eKO?t%fjhi}i&;hnFx zLiD+Zm~F0H>>4!JFMSc_*?tA0swkJ2(|u6Y$des4mh}I=Vw3)J1st4h!1DWmk!#Cx zI^WTvT&By>yk>XjEFhtxQnFOR0X#p}V;eI%ngkGv7af{rOaB~?M+1W!o^shYXVu9? z>jT7&3dqCRpU6yWRz3k=mRCvZRRHnpcmu@QXkXf{4JHTCPgXH8X%S0}UYr*fvFW3k z4WA}CN=D8@|L*e#6B z7(A^Oetx)(aal-jmFs939liO1ne&YLf#ZGdgy8*;r@v(LkrU?n`(OJOx5f=}0WLzk zoY~v)WI=#dH^shaUlIWb99W#R5%c>5vj7F;$%(gi42NgOUk(2KOj?cLof zSDE*f6U<=*9ASVa1cQTzSLr@sXqaoH-eS(|^8ir;2hLGaVG$DxSv!X{rNyLDp;g&d zXQqs>jtKaVJt8`{_Z*OQQRQDUm7-?@bod)oYYd*ImoYGR~S64Ps zmowcde<`WIr~I`w8>&MXKZ1PGs|&;G`d#V0z>;;@Zeb3*=LO((`)l3+qrSYlA`*oL zV%9ae9{u>)8D&60z+w%Lg0j-&=9W~Z<#y#P929B1e;$oH1(ENQwo{=W7&rG?hpych;ZM1k-@_Ro(Vx}Fzm zk9S*<=vzdgfV+HzdxU1<>sBnd0xeF%#q~;+&s{w-Wzt;r8|;pxo3Wk_&}6_As4yra zDfrF$T(JW_!L+@dYuubG9Ri1Vdj_DlrCPn@d?Gdi{}Lq{o4i>28f;Q5^rV1_ima&p zB!c?5+-~#sF5#|mV^gCqR>K?gxBfPPqzIOn_* zItJS7kEU=wnhlRHR=tcgd78&l>w~4`!$*EUI-DORL)ZqK!ksDrPY@l+yk=N_awGI zb9!1**3PH{M={9)M!%=1C&vK@_D>9mi8K4`0A^t^AE zPGE%92LxJnBqb%KWG>$~fURlV9e#pH*OI5e$(g0^F63vL-lTE!ecgc-iy)R{@c$8@es^g%ZL+<$SM7CcDxAjOp4v=n;X^03)amq}1^as6E!qGcz90xLM}0}uhv-;u=Mw3oBQ z)`sZ=3mO3smU@-|$59^%*VnwMZZ*9zkD2zif`15F38>F}FGG8PYBT+Dk2EwS1lqT@ zW;in#|2P&3HXMvYbl;v3*GR3w^Vey1eT4+SF~~aTLv;=MqdHz8Ix)W^j(<$d`?lvx z`qV05FxD0J!y~PMz@eRaf9bi@|LDNy=Uo__65Mn~76NY6faX}Us##C^9f{m_tOM== z3DvnJ4(=mp4TAQD(52eaC^jQwBY|lsC&y4xQL!PzEdW3w%=Y7gsTa+zCzSmBB&Dk2 zLH!YEnstMJ^YR>ThCkG&*O#J8_tBe8-kfx)TT(LG%EKi}At|UukaCx-GLAlvIcjm6 zX?l-L-~xQ<=U#@<0_df1w#CrMi13(rTFb6g^nrIVHa=dou`q45^#OC5F}T(11`}^K z!{z0f(@Zv#8vGH%4-A8F1Od3S{oQ$X5&o*bYMJ`LtK7wGf*p$As#(r%D=ov$F-zPF zSVHnwi}#|U^_IU^H5s=0BGq`;hm&~`zjBr@O3TQY%tfRF`fB`nuzf1^$}Q;5zr5|S zXmwT;X+!sY1S2r3ODRE^KlEw^U5-vpM^EooH$8Pp;WiN_TE#Kg2+@Eu$^}3b_j>?? z-+#dR=Hc<7>o;1DR`I`>pl%Y6YRz417h&dHOx)EXD_Gvgs~S9=R=t{i%l(S8{$Ei`%&~r> zlMRe-P!zM5F1u^LVl@2uCU(HLzgaimHMqReeo3JCFL-4N#}s?zmQ{L;=mgT8Lbql*Bs{NcokhR)AuRa-HlJ26(bq| zCp*j9sK4K*LaAQwjFNK8=dgGaZtpeH;^xJ zH0wJ8xJcOd5w;V|?LpfX!sew{u5Jq2_nRW<;OFOp=1>5pvO7}UzE6-?zR^L^&;W5c zw=LZ3plE@m$tzr}fKb+XC@CrOBK%mTW@Ev^aM0?Z*#l-})A#GNC zuM2o3%!H`k5GdGMNO^0HTI8Do3|dHzGHMwCd=@S3|6R7x(K`at9dbUP0LP2!y9R;* zV!5#4KgA~ed$p%nUyM9TVB@s5;zAq}TovhhwRS5~@ISJ##a+an=DMWLdOtz)w|8}` zK1%~m8059he0g=LemgRUUMpSrABoHruDY#&cW#>V3@5_o}8=AMj8aDVj-dL28M>*+!U5OOE(H;EG<^@&`eBBaBy~=ste}R zd$5Fp5hfa)%oz3Bj!z96wt*XM`T6+1&#-s*_fAkpMxgE~qiTTTk_=2sYpcan9YbCf zOPO9XbqBDo!UG44b84m~R>O(n;uEpnZ@OdoIPKNhUtitk*VM!n&y_V6BLSSENM2hx zuvb|4X!ge`qf~&S7T2M(rqr=7Nct6}D**`^<>lb!(21;Nyoihp-N`rripvNB{)Z=h zcBRK$-uA5Y6`Z>QRyhhV;|WV0#KZ}6WS6WJ`WKC8}3=Eu*}=ZlK-l z`U}v+8q%o09HS$kk$P%v%)gfeE@6sb@03DzC;z=8mICcNrtGUDS^0^>B@}s>tAh8E zqN3VISDFE|tzBN8J9o5@RP_f4W{`WHoDBd|m;&$wxRKJ(Oq+wZUH$n`sv1RHZI?-1 z@bVr4;N~EQKlJYUJot-Bpqa5b_}sVT{I!kNHRLuNd37`4AH69gG{BirRK>3{pZFWwwL473!6QbZ8V{U^ zQb%OA18CZ|xJ5<{#@P}@@40Mb2VFD<b z?9Sm1^7E2@vd{nf7ZcOGZG7gwVD>(EZf4>X z!1e%Q6f!rb1+bQbCias~Ps^-{c5K{V-}ST^ynK+r%0_TA^-_OR*lexw5Y`J2k>&wY zC|&a#1Iz<@eKkfL5e1NqnSun9*6@`29sdFe_u31r{C&RGPkDkp=fax?-ukmx!P=Wp17u`2W@jOekhIGCj~&#0UM@bT2mNp3+w)Ob`BU7`lb!NCXf*4n_PJ2d{#K>_^P zhXGH&hI{K?z~*EW-wzzk&Fxn!e>j?3OsYDlOHk2q2ncd`T2ukx;kteA4+M{qgc8YL zTWO6>v|nhIBhBr!!5u}ns)gdAzyjqJ6v(HqvhV>GF`7B1Y88C7J4(T5i)l8NM$Tv( zb^iky4f$LC%*Eps(^t=70*Ltqu+ZqlTFHZGa3B$2mZI)P`(YW&fN} zzt5jrn*Q{^s9~3Qggh9P*Y&u)jD**wE`6t8z~xLAFiv25{BHQ>08qOU)g6Q+PPdFE z@Ci3P+`R1>KtLzC=?1Co0rLcMIR56F&3`F3JJSXJh)Sp#0v&2)tdl*xWL zK7e@gh>JP^N8Hrv=_G={K?cBDq%{|x2RU?VdwP5APZnz}>rdxjlT!FSuZ=HqdMt-} z^3aGRWOdanWY|i4ydYO@>+P_;kz{&DE=fQ&2hY|KvqExpx3=hvd`P+EFC!h4NTj{{ z+N16Ri1%f=-^G;NUPD}}mR-5qg(4%pS_%C6VEo~VeFLy`HX+sC+$yf!BxIlu0J}`x zf7WjPg^G%*$6(b$DPOf1iu#d_vU_)EtZjMr zs0LuWn`f=BDHW+se867m7(1I?;bPPsGvW zI%_3BLr5Kuh|K&hFOLa&y2A$ODPU$LE>o5&CKY8S+`M25Zh!4&;3iW3_f#psVVaI* z^x*4xE0`oLWNOuvHA11#;?mErE@r*?Czv`y75 zr;#l;djh}Mu<`*LVezre_#noUA)wgF?$_I2@pb4WIgnL$)0HLK(aJKxWyK3U*HJ-$B!9>8hUo_y(BLv3t-GjRk7vT}le2dxk(Qx$*g8|2#wG!Z>!>v9vmQ;>Ovwd0$Z7NaPx8)@on7VmjHu%B8Xq8EKZ*Rd4BOTo?WG*2lCFSsPh!Pl%4zFv`!>li_^6rw|Y~hjCf0Yr9 zHtXajvOy{7fnh!=K|vV%;>0|)p2!jEGVScsd;m2#A1uE0XVhuN1aW7m6sxM;-xsfH z?NqcfAld+=k%obNX1ARVSSnyCDdrCA9|3;5wfb;MVSBzVwT3ae+6VaiV&JCUtLz16 z7LZmHF8(>qOh8g>di>J3hz?LTfQZI4l_faJrSi<|Tpk{J7yf$kB9`}R=ByDuK%{Sl z6_lQBwgXYGl8nUGkH{}sW z+40H08Vh?3%*7*-BxCNIpMmIDz=q1}eB2GdOpOP~UcHtGA~`vE8o$>~T>=&F1J!(vosgaWh5P+6$LYi4eP;lUA&V*43?(we-b(WW zSzhv1asG>%<8ePL1!`^P(EVN#AXADrLPA1~R>NG3dZQ*&zqR%r9eKhK5}y| z|MT-Rr4OX%1n>W9>@B0}h?;gm5+GQD1-D?q-JRe8f^%>Q5Zv7%fk1E)NN^|E!QI^w z-0k4*65Q|Redn7sv+i1V<_8Nnr+fGA+O?~zo_eZ!R7|aazh_DG7dok6xmC9y4|h-A zrm+0GkHl^BNUI>nRIuhE@Fp#ZH{E&5@6*Cu=@*)(LXeQSUF91;uk!+D<>GmX~Y1O{wZv;Yg5QPj{554Z8ZkwyFXI9g!) zHfSKxztLZBbI%eK8hc3f@L*{(S4IY;kS&8Qc=mpB$0xz#()a7v;PR8!zdZ3N^~@xZ zJ=i`jB2W2`(LdmEqpCIIVn+>71!TC_M^Ut83dGg$QPqFYN$Z^lJj59TMn(7*@S>!G zq^c}oaT#t0bzTQajbR{i%))MI@k{uC>gjn6f}2P{`VklN6Nx>nk4Adq7ct*opHfms zYYT{DD6d{UrEW?^9^)k(0*E`yL5S2u%xeK+DqC1}c9$|`^?MLLA_xfy9iN?b4G)t< z#IsH>`R477gJAyT1gY@R-+PPHQ0mvk$^dj(*pYOOBCnw#XLMvZ%8!xTTI+amg}diNJ!QRk0K!{$mw*`3~nQxg-) zJ~u8hni7}@VwhYkDG&7l0fONC^XJc>DMOSYlO&Ww|7;&7vUNKCPEJqn zi4UNFBRjf7VLRheRa`Yta2mPZK!`5nX{7!CIbV_l5e(CgjwJ(riIquosr;Y5&hC63 z9Ze`m_F1^m-4b?#5t8MeE@E@-q$UH|3uyazynaJJK2ysYHE>oHRWiiiK6@n5%IbBj z42O!Qc(Rc@%$5k<79|fud0qN!`Ge`(bpM7QV`JEPt&D(G*`rvOL<*}B_@K>cI_IJq z8!Id1cBYq_Mvk1Cf}Z|MVtc;oJSe9*DEIL*-BU<=d(;y7S6%AUCqwa$4mbig)0cbG z_px9&ud^_6YU-b3V=-?0CFIEQc$85ch*H3(;-A${s*aB_PMd86W5~&cakDpRF3d%c6TLmA=m z_|@FpHVX|w17}>s(}{~Yxiaa}>6%K>23UHYrcB-4c{`#X$>Ej`=EYXavw3=7@WnWM z2XZrF^Wd<(VMajwX0fA`o6Bbd&t=`8wDo5N0Xv#BN2V0{#mkV@R^3`I9VP1pxVyW% ztGn??m%T*^`Q+qS=P7IVg>$v5^L5!05-e+H^eJC3zq4K3_NiiBxC9|jRJ+4No@p_| z*4xAMPVgDuulOy87L7uOzN($GeZoxAVOH|bX~I<7vz!6jVu9CA)vMs1i~Si zno;C$ulWEmQ~o>IhI%Fp${eHn@Edj7+<&UE0sc&5Z+h2nKl)00>VgBLYU+udc36`bVkm&erT+ zNJ~9M)S=m>rEi|}atM;$!_MaQUE)EJF)Ao1=tF`>WMmK~$iW_e@BicAgFLJ25rmk> zMj%AWG!PK*eAPb+egI=3CdRnI^+?RpGE^ZidM=5`=l-6)lrGQO1JDDzW^2?u%}#}= znIb|Auu|U3QF95s3~YbRn$6qGL`ir#j=^L1ZttX@v5%gPZuyfHD%_&9y{aO$CB}Sg z0sYyYeT|#^zs0J;+rSuTEZ6M3ZDO!6H3A!tlvH*n} zFVQ>_@qMqLX!lP57~n(xI?=QzMP~5AoSdZbW*+fVG=b-7p%*hoZ!f`ZiKX>@Ef#(;8yT2j` z1s8h+9BT$|+FYD3mmNANoxR}H=>Ap~EBnnn$W{ShSH`@z&!0UYO>3UM(S>WV$2$$2{ z9g*t5O+k?fHzyxSj!fF?zI52uxh$)GMm|-nFJUR$xnyf83t;h<{-WEhfBio?eXOU+ zP)PZP%A2xMmY0>iv?QN4{mhSSlEsEvpL$tm8IYL6p4|$gdGqih@R>UXMj!J51gn|S z!QK49Mo6Y_8^(|~2R_Tool+uXm@B%hAHja$W z91?zsts68-vLesjh}Sh-Vi=-cl;$Wl?Mx&G_ zezu&siPS1(ks6upT=R;E?8>u#bBJ?RJMm>BhxF^zE)yBsC!rV$vJZVwqPm}s)tQ@5 zAwfk&O=@cS^$S-LgQ#fopfOdjs-~iH0m)j&y4eKCQDEKX)(eeyNuYCOaet-;Vto|& z0!@^gKM_2hM+J)c8n?XNj#hi$Tyl8+!=d@mB)#{2En-c!*G53z^waNIm$-_fqDefE zeCbXoU#mCYe3K;P4pu4RjO%HY4qJgrDarMr(0sUi<-Tx>boxTd@q_c~*YT#XQBg@L z$TLI;ggKcx8BQG+q9Rs(s7Z*)9kO3kbRiOb8aBCba~{V^I!DRsQj{uHM&QxCmuG%QwYBp~t*|e4W8%rq}F!z62d{55L zsTjQ2;AEny0ju%7tccy3-Q>8X;5u?gP(b^#>{&)T34hASCgD>q*|;(X=#=vug!jDR4v?L+V~&4b|O+`n0&;P|5dI8uDAE?;#g`xZFOv9PTU zsT0%Fx7fCq$P#}&`D=CgJEN8OV1xP8It#*D7C(RtjIh^*6-RU(1bAIv!-=eWCGRoI z)vm7TZdS-ef*wiC1Y~MhWJaCrJ6pu{#lb7~2Jk2nQWXh|U2kAfm|Nr*d z{y&_$PG)j)OgIQwj=^zN?UuoGFZF&Ljq~&K2Fv0nA9H23>)>pMQb!;?`@cS9mB4Y` zsD;h27w{;N#q%;JSG81BM$^upJVzEMK*1DjKpm6@M4u;Dm6YS@Gy(Ns0-?I`rEj6q ztZ)~p1*`X~9IHd>&*XYFlrV>+XI$sKFWH_y!|A>YS9lCFCB~@ECwOv6;c!`{j8xuZ zRzCN!y*P8Z+%z+^PwQtjc2z;4qx>#zPMbRDZ>WR{Cy{V7^J*aFXYZW7Q?h;lknc)N zkI*OUg!Z$SaX3BR6}($WZY`j$ucQzRM)Mt-W`sMcX3rw>dRm0iNiC$B9xCis)u_?@#->rUq>(0HC}guxw~ zp9!l_Mg=%nX0DGYAgVO_{m%AD)o?#+2&VfpYe(Eku;fzs@#lUHRR#p$zc-S-b}9CbXZm zFrlJ!ZI5|zd)MY`q4Cns(Yg3Jh2H*yyFY44oH5kEicgkTVW)mP!{?#pN>u|Hybh^w zPwX!xB_%f7ghTR1Vz$^lc@@sH8aigWE#xk4PPG-+a1m)diSNky=_5s^7o^gqIYG;# zc}$?nE5=fg75}>?PkJU~(fFxWB+ZHGQ!Qyc;&6FKM@L+1mAYlRn@#5n>4&sLPABb} z6N#x8!$zBHN<7qbv%@;Y5{m5JXxYjU4-{7N<{Je6H6y3)W&z|}#1IIIi|fj-7tWRYwVNXww!%}HB#hloIA6n&PUR1(7Y&&9h{Q9j_lmorUcYi? z`I~+ie$Jw!3k$E`N)Na;GWC2B%7lu~__W^a8BmXUx+Wa5E>b@cTZY|t8-!#+i9IN4 z)ie=dlRD~p%1n=NDl#isQctVlpbjyu(!`H*eU+Q%z4!Rr8uCli`JV?jZ@Pd3jx>Jw zZU4V=zQ(y7CWK5*hmgV2q(HN!7054&G_s~q=ASKOhqesXQV;Y7fBM>g?-of7T20jQ z>IkT0gs`CE3qRchg%Jwg!}~JP)4=CCBo7KoUas8E{(~QtJQ1fB18eY%J9$yvI_P%k z^yXdU9QDc^o-K-q2;H7>lIr7%Z42cy_zKN?X6w>P2qnK^nkRe%xICYvd2-IrdBj7q zmb2~aLGoq6Bkg{v#Bvm)=qMDv-d9ki0|X(bqs zRz^mX$v%ir8Ay2*my){>K>3q)Qr;Qj1tS<+1S(aQ)x_0X6524*ciJH8NRD(b>Zo&R zLJ#UV#p<}W*&Lg07ro!heV~5W^00*spPZoY#KgpCRXhEvA>*VBZaiwy?NX`ZQigcd zHI^4yvt>6oR2Aea5$x?yNNEue6^W1fzMM>b?=pwqWGXvkAIUslFO=C^Lt6G{9nx|B66OIb}{K~9c= zwdI)d*|P^GpMS&a8yhEoBrsG|RCbhm0Nc#tUxC#6#-`IAb1mP*Us!K)9=@=Yp5DCn z-uRfaR+9%i@6w#Z2MC0ok$Iyx-OqSo&i}`Ehq#aqg^>x*RKo%Io1^1XOXm+0F%1G~ z1AokXM=foNq!chy-5Z{jp~!5x;fc+b`%B0>oVrsJ7c}tPU z{vpQDk3YCDnNd0$zn!8Tkla@G)+9h%7C1(Y;*YmOq#sfRiuH$zg#k(JsmKR%|4c37Jz=(@-0JzQvn6+avdrp!9qOa%r?AiqRzTPQxSwSAyE-5IZC z747Yw)x-A|dE9qSK0huiaXC4&I(HmKEci>$WY9zampvNZ+Im4mWo%M^e*qhw^|g)m z9^o^yc7#U%(Ci3=kvtsC7TIWQ;cqjE_g+Yq;~}>5r}jBD*-!bBD9MdYMT4m>72WFS zDXOb;1%7*(*;EvvB=XQD4aZ|Q^XCgZ(#h#5t=lIiwLC?WVGPmZ+1z36ht1@!qj2-3 zFZVvjM;M`@q0a^|7IXZy`xf8$uY=pac(|ehg(ys);~r00c}|9s+-KcCik{rAYPb0C zOJ^4+NJ>f`Z}{_^4%P}1`ky1f85lkYAfc0fIyoJNKVIHP3Js4o0e_!f?AC@Z;SbMy zo<8XuhR_up04utpND6FgIUaUT_U}(*&`AXT_}ADtS_wyz3dZG&jRLj(HJ448zr8I_JBrDXzdDL~bXpz}b31rQ>rMA) zT6qTzD_!kK)|b#)5Ux__ATU(N$K?VyH4T}`^5-u? z$II|=cb=n^CCNU*y|FPK8?3Cc@$vl=mb@H$E!wZHz0ZEPw4}K&dB(4fVM|95{f!W` zVP34N;!;&pJq8%007Ftp2(p)qbQT@z{Y^6S=AXqM6O)s%2?-Xl?rS~Kv&W+^Kt#>w zu}nTJbjoDte~$|qVCWO@*wF1w`vV@R41gGjIqlL{92S8`+XZOB14TtnY<5<<{lP*H1j7o{Mj5AtZ=XFn_Ank{5Y|#Vsmrn+%i{zX!z#M zVX7xg0Vo7O3Ob=YXHUFSdnTE=a9sAAhUwWNHN__n=NDvE~3LQYBf91dSo zvnP^t__DEOt5Qi6QOamef!Fbrxi$mJpy};>qHsiZ7oF}NUYhz#2eOf~uV?pOt|l~m z*Z96JLG%?1+tiY?x8Qs0xo0 zV@j6Bh^57AT|jK)|CQdm^mxIGjg8Ib!p>$sL?RtQ(C*J;|7=;^kIbIMKM+t#I~s$X z$H8>8OZ!QpVjbwdsAPlg(x83CH*`=r;v_^L3IazkSiMjMZ(g{fGv5kZERDj%zbBca z;8|BT%ynt%)>z!`T3KK3j}L2ug0j1dA;F#TJPeSCacW}Ge_1zEZP?s=V+#T_gp8Wp zU~cP0Dq&`Qn4yZw--`oSBG^PwXw0v~!4mx?Yg>>y-8dar5}p0YKc;FRpI=zGk$wKS zx!}HpnBjd%_L79}vr*?a?n1R_Vc*hi*t)O0yggrU%09`%*&iwo?^`zGui6p%diqxf zsqW8@hju5ElBXWX=Gjb9;j^><4h$^Uo2S2JW{&iK^cFrU@r(}XrzGI^s&Y#KU7$|J zV&1}exSp&gE$MY;i1>z}pc!kt>kQYi=F@<5!xvbR6mVFYye58ew%5Znhp~->B_-w{ zp7&bDqM1(TZ(m<-Qxkw0L`z861N4{t%qvCN_!0nCg^G$+Wotg9ZaJk6*&wvCwpPIj z8u`xF)a=Vf)U4HbXvnisw~L^a-r7=AGTJ~Y;*+qticNWYk(qzR#XUK>ztoyeoTp%N zmcWjHi0H^i@dcc}A>`oxnnm~DKB|j= z80l<(G<&wU3UIZD#>3NUX3Y+Y=di7;cK`JEPfsc2GT=zHPgY-xj+W?mKB0^8@$!?2 zP?{rHaI^?zmL}_y(h%D>%Yt&Begkg>`SD_1P4F1M@|$J>=6ziVO8m=t8Fe-vTG}iIUxEfE6?ddo& z3^8@&zJ;1-d-AhW87C;52AgVy5@Jb!J}0r3QDnm6|3Y6!kcvh%)~ki2{SO!5s3F`} z^wDOV!hEJI^XJbvr{gv;&|L@4;nHCmfr^fg0JI3}>+kzJ(D%EkDI#Xoc+qo<6sj;e z=LkvwkYByBR=HnWUH$WSXa!UlQqj-^&(8~89KnWv(yN7#;Y2nz3d+j%>Z+SL&sLiA z5^5)&9Zjc;ia3Z43!QTvm3qEm)BRqI)my2esTmH1^03Hv|1COWVF{zj2hRtPW15(p zR`JZu((Xk;LjyqS4uR0C{e{YJv8`z~|5|c#e{+~$kO0%ZnfZ;(?C$HyK+3hC$o;w+ zTt%gGenE-Zd|kOv?G=BT;a$`CT5g`XGDKg&LM!3owtT9!PhCO7?1e<|OKH$Z>3F20 zHB$5h%_zM{rojGAYS(7wtAAx!`uMT^|HyR>=~1V5;c_wCRmXzGbYr7PXY_35{pVN2 zJR|n+{K205bT9Mf$5lW3^T0p?A&>8#k1}t{0sDMVc&6H#H_dC)^5SrzGg8g23kFMW zZoE9+p-p!e?eDC5gC4l+?Bs-ujJ*6qW>Za5xBqa# z+hcIm?6&psRy56H6{RGiK4wJ|J+G!FYW+1WVD?y)Zy-CYA7c%vCoElHkP1mkNxh|~ zjR}p2SnmRU2fD=7k^f_VLb(Ycmjx#8k`rg)l3yNVdZU0ApWXYABr7|+uF2ir7Snfr z*s<&W?DfiuKIy-MnU>T>z(2C4HIz{`R;}!Igw@zaH0?6jr3eJU%;u#H>{#gO3(gM4 zdujAdHwZz~j707|jJk&UA3uJy@78Ksm&lpFBoj(#4@6Sa)s+E) zPXchlE}=S9L`ZcFM4RUee@S*$fYm}N2e}pIthV-n4%VADZ)$w*aRuEEC%TS<-m-Ey zu1eh%XA_eCL#AE0&V7Q~kG6b0vnt<>Qvu)&iqzqd^4L3qt}IOw+1+XSR*W z!o<|xVUBp^Fo<-trlB^zn%k=)tE3daEOeI~9+^YeL5FPH%S_%jpR5QOWM_b! z2GkTm&-3x*D_U9%%=K$nHKRb5OEHD(M`#1Z%T3!@I4qeJaj_f_@stK5agM^!=Ex&p z@^YTk%&|7|M?0Ik;glf(BX~bEjtw^5^-^IIOy9L6M;l%#@ZIW-mM$zn0~}UN9t7)W zr<3D51CO1@7;Ed*0hZ#UfnKPDo24LGbpGBI`$3^dYUTO6d2saKOzh>=4#t)C59jPP z1;4MtPWKYL(GZ=(*oEnME9~_P6#-OM3ch1M|fai5)y$bQGM>Ena ziY#y_>eHd@Ahh{^02~oTD@z6gedSu=V3wEy@BnQXp;Q-A)*>KkOL^DI^XsR9{$SAr z*99c4eN?CFo<}#FyuAZ)<&J!n4tcP{hFO|WNNZO3zaYlJ!QJz;VW*dmxslpGpWO$H zFn~TyE=N(tkGfa@EUU~~MHRx{AHK_go1e>2c+$U^xW7@V`d7S-D}+NGt)bwC2D8hS z)f<0Of#D;KYw&Kp>MDzLkvoN7*)kl&W{m@lm9q5p9f!X$4GAW9;REws#;Ifz!QAf6 z`VRNfwrmd7+WHA5U{%D|6`1D2UXjj?!~Uum7?MpslL=)IURcnY;!YSf(4SMEnp#yQ zQ>gvw@9gMUg-810EU-=mF>?(}72#E7wa2$~jNxyHHT-ul&cxX!cW4>bWx>K{KM(1zzl8*={1FR+}~ap94am;(on12O5XCKPuUh)5ft zV9<n)}9Ve6M)7npIMPI6LT&Q;~okj!8jITpd@gkKY|0TYF#CBB_qcawQUB z;IY8pAg%<60Z0UaT{nC!LLc10S3BF6cn>;uBL0Jat-0=!5zz3zqG3FZA`UrmQqjU8 z`cnkA#DnYF)Ymm{I6kmsYXi01iqVPu4*WcJB=Wr>SXrK1%^yz=<; z@@SiZi`cOc6`y|dx9GzkMOoWPYURCoh7sXD(~5^_fBuwN`C zNr1S5Ex{zS(q!Zs>*nfgYNtHT{-iwRTudQ`h1Q--)ENp{T3WjO^Hncve*HAU6V(5d zOj9IPrpWmgxg#eSs9TzHbt&RtgsYbfF%0(^Pb$mi=oypq6|e7i2lH|jQ!9EgOeU~g zTZn~|d?THyOErPpsgeILj82NzW64b8l`kyoA{|g+=l~U_zIO~o1_Cj7i6OG}?Y5>W z!n4)vyT0z;yw1;$D*@ywh+x~MnEa3~Ip^V+n*pz*$jfR;+p2WZN;MpLU*-Tq39((s zrwjw&`tnpx__a|Ebd%S6vMGbP&`KC;I-u?A+dPG9h?x`wHBQw%nJvq3c1r*cmd%GG zyC|pen2#%~;D_}XM2!xXP3f{_iNVbJ^xoy%}O)Iq6kfv872|2H>Yb+xxx zRsivK`ll6_ti;SGUFT@or-OhcEryvgx?(rEGj6|nf{1o}a^l$F4=N>r54kHeEoXsI z#H!W8f*Ku!z>`{sOHv43{W8H7rJm>#mkA+xl3E@0^}_94l?9GShA@0Sw@Tatvo+bH zp$3_q0Xr#6R5YvJdsSn~6VYCHHp?+d&>Zv$a=y7d8s6u3-kvKK92P$PfHbXcfeY;m zY=8E(fm>v`HD{uL!F~h3j7tzHc6VM!5;QqvM(xOv*`Qag=mXt;Bs{2vy_C!z8Xr4w zyjiW2el&M>kM_cnuhQ4FtDW9{tf78pdk=gVE0`oXD@hT{Rf%26fMEaU$2g}B^}%p$ z{s~H8V#F-IHXS7hoYS8=b%~4IuSQEEO0z#;lx;RwnjbkhR&53E*~5M8?0NMRes8=u zy*b66)0~{Ut8t|w-CI@wNzAqWGdDVH@*GGyN4hv#8iv0V z9XZ_49lkzQ5;Zwl?N+?x&n{|8$&oi$jnlO=yE zM3x~g4dHRgzJ`C$757(y_eaH8n`~06QL+2<4rGIe-ZTFGJ>_SDbJ=v`zNr-2)eE1` z{Fc)Xo^}3|V`&tn+i~@G=+({DaeIU7T#b3U-wVSxhGxTw3pS2bG2g$x^YS7xFpvgb zZ~eubH6ae^puD_+@#W#df&RT>{}=dM$J4Urh5K6!at@9dP%wHt8OJ>D)IX?rs#9t- zOx7F{7G-ugU;pWM9`8+n5orU~#>$Yb-w-SY))!EcX~WF8m0vI?w3HS5?4DeL3qcI7 zgiub&@UtpvH2m4esRsY@V+O;%DF6OVL%9<|5AN3`H&p(&=Fiw$z4J~(*8&jHKEFU4 zlQTA^)KqF}=-$Yo$+OV-3BsWzL+?ne^747V+k^O@e0FnZh{-&@nZ?5m_IOW7H7N19 zGl7hgrkzb6r&~{t`M(nl?tN)5NkUxIv(*tWhx59cejX>tU-E33_pjxIg`GM+VHO2) zKw=q+&Eg$ySk|()HZ?`U`)d#O=!0iHLVXLG*Bz{Tb=SGwpPnDny?Hxex?T2nAn$dU ziHY6hMMhRu3;=|`K7)VZ;Q7g(k`nZ# zQC*-xTtI+Y)A^>Z&%^8%`uPs&Z1T0g*UyB6|KvSDTrqYcY2bZ+)YaR23KKl~1R5TG z4f?jSyDMjDNe>btU#38qh^WMNkehsbex3tJpFh`sn(2R=nbA6|TSJFhTU~E2J<@Q$ zt)87OYshT~ZH}XO2knzI!SC_*=5Cyy8d(}|i(R2;`ff;Pf{!Rx*3J0!52HA9GeqfR z!O(uIebS8ZlN^pP+>6D|sCDmsjvMR_#8nMjrLgeu0?_=8;N{n#pp#yt!>wTA-0yK0 zjBko`WgZ{k33%<8FhuV#hlTE67SPj25OM}@=Ua>^KFA&Yd5|CwUR|yQCzo zt4k6{C9E)+n4O*7=k9Os+1_-tO189Puf6{sc@%&r(C}H-`x3B7_yqOw>)+&F>+}*o8CFHVfzNxI9J@bqm>!o@J|sM%v*s8Ks|mu%Z(Uae z9+i14u(}b-%jW!7ot(v%7;mtNiREQw*-P&aj*^mH0YuK>b!O2MMJ(Tui69+G)@_P~ z^^#No)W7~Y`Ar=V5C9ssZo z$lAGSKiu&do5R9+Y_ian_T@q^XvDymnAG8Ioiv_FXxDO%S8SsAq-eHR`Jw0v4DMH+ z<4en))o$|9MDMZA&`uf$q zwf@5+cQrjd@lQ)4IJoJ#xeZD8x0lXqRhA&_7Yq zx(U#INJv=d&u~8#^PrD8NMrzdQ2WAQWLw+2r`s_bsEY)xl1977Ujx<^`1%<#I!fl8 zy6u`=aZ!=Yzxm!MQji`P2L#;NG9!t~g#u7Om?Ct|7}|Q*0Xob{7#O6*s&8fzgUGGp z`>6@H&d$bR4_c;}P@3lF_kh50;d8lu>cj8B&3O}PeApmbaggSIefULX-mRMs&#&g! ziFcH$#lIYx0SBRrms@dpZ8_PDwo2f|WR+V3H0<=D{zX>W!xNRD{Y6jJ&N#aV(M+Q` z#*QV(5~u@SSno`^Nwv$D_V$UXiSPcqI3Vr6{%H#no8$u+p}yX8f2sCObxG8=*l1m2 zW9(0trfyeRrQr-{L()hY8?a$;m3w1=iN>JD4YCv-+nL|C=cudodCgO9eVw5+diOW4L1(}_Q@Co4#41~-WGlgrMNdU5mV?P|_xpY-qO`|(mnk>D0B&kkN>gK z8N3F>5G{Jw2K~iYke^SzqeW@b8@;9!Ei9+*g7d@o#$ggt4idYSx3@>n_*}Nh0frvJ zMg;O%(NDl=9x5sUA6powIfE>pOMdtH-1$Q7Jm)Fd&E?+kDQi>PWuqIGAV*w_=Gj|z z`XB2*KMB2j0ac*+FiAtB&l!3s+yMG!AhrGIlpzqXA*fw+V@ykX0SJ_)Gsi}eWcDBh z4-mhUiZKqMS1+Dwa}F4p={K_d{U=tP%Zw+pFnRIGP|+=1`G@F=ir-GED_B8rX%!|@ zib`mzp-BTtME~}7RY0OO*h#BL2&w}E3YChT=bv#v&GvXFO!c_)={8O317Y3!T8dbF z|64>>9@P(dPK7-Y1kc(=+9miB96}PXs6wKn?bQ+kkuc1Tmw~2C{fJNQ7k#`IIoj!< zYC0Q}7xLOv=}|~%1Lc|3WZCx2{_@tEf=*Y+V7t6i;d430#KryfJLNqM@DD)$xmv6D z?*ZWUzr$2rk9VB*&DVJJ^nX7zcwuMvMr;B^FE%CRgTW&GbNE4axh3s;9wVd74-ID6 zmqRJC($X((*VYmp&L`1(iTpMkN|J>L-FbOIpP72Uu;M}i&;Y>R-4C|v%w}@UUdRwG z9-za$ELz-<6bqqIRiN?XhQyjdX(m4zl#bNr5NFMEN%mkNi1FdmNk3vjl7GR=N54>4 zpUhf?S|#eXHyE0}n;@^KteDGrzI3!8;WIycXZhd*{G`r({z~pA1QQo94?Y233a}=G zU5=&>np`HZbUtrfvV$0*IR4T>prjmXu<`q^WYS+rXhBje&1vOE z>U9b&FLl#!M7=G6x&qHFHSGW5+?|dvhUd&+4$6tCFbzLPv~9)Pk&}X%=eCcj+dNw1 z3(PzDod42H;PGu3teJH-g8@gp49?!rP@~vliRXQ0G1mv#^Souas3fRtF~<)BTfDa4 z46g|xG#rJbz0y`4dG|lGYIgl^r`{f5NY&c#qJ66`a`u3Nx&>92I=)@1+VxM##iO&% zn=?h>)ibi?Ug|3Ktep7OEo9X#X<6>pehDl*i)#lH_PmeGb z)PNB(5R)BzUhoM;&6ulsCeWaAFrUstnLk54N!nh*ZqWKhMOv)xH(zbb6it`P3sp3Q zY`RT$(DeBgY}42+8w$-<+P@LM9!6+s)A?Q;ysD zqS__KK)1@!LDlPY3n(Nh=dZ?Tv&r3=F63kLFm01Zt zr>UG%RF%LYUFxjO`owai%nBb3-`xc-;q6I8hDK@R#D3ZuTXi|qsOHbwkaXc>!2z!U z-rWt3H;;GL;(j{AxUIhut)#?GJ+)x0UOlynw?_J4=!ba2=T-7`uRu-5T^-2ZzjD3H z8$qS%Zw~Da$6nBbiCg4D0~(VBTm1ZhEBxoLTT=&1>hzHm#T}W=#`W*s~ zE80ucnl3en3glD_g4YN%3aZXVJx>Amj8`yp3E9$f5&bg4NDh zyA4hr3j`N-Z8&Ujzpql0_2*)OB894ZJ7W?Nh*eH zeQL`xASl+!q-*#csTElh#HBy&JQK5JhAXIWnu1z+zFt%qhhhg=khOjuYVsv6-0EN? zTRR4wHg~&=pb?D*j8`S9$b?Ot-R<_z31(jT+IsZgfIFsi*+KkRLIXGd*K9%^wkyR= z!@b@`48VtkmGL`_kx1hgeRA%Y4TrlWhlAP?Lbl9nY^psal!?4bcbBO5mndz9MpolB zidwdU{a<-b1V>k6se$dnC!>{C$un#K2Q#lS_H@&I$R6GDq3W>kyGVWwJ6ljM+Rdya zO1)6EZAsKpN6iK#(z028636kV*^u)Ywr3SmY`FTj*^2?c#mFIrZ?x^D0#dKC1T`V< zb79+=U`=IK2D06!T)f;14!@MXmS$QCYq1=p5sn%gNs$ng#nHNhPp(UfJmN9ZVt+d3o)aCVOt^PGc6ALbA;fm^GqvWN4|1!=HY)jD5%{*()0khB^wxEojT=TA(JgXAB`jvpn#D>~W=^&MI`MI*JxuVp2 ze*1Xx6$ze8@0iga-8i^#>#FJ`?GuC&T4v>B@U)S+N^=H0dlmJT+|F$J%=hZaZ@+JH z;3OYAOc}cbS3vKP=xXrHv$v$ zq#3O`1Bzel&Jh-S+p>6chFOWSbP+;3fu;qF&|5?$k52_|e;3=5J|J`?B($U@#_uuH zjoexk7^~(Na4+St6Vth*$3uf9zLk~mObXszBsnWIMnwgN#>r*a-F7nccA~-prk=YS zfIQ-4Wb^4jZ^cYEC=QvrAw?nd_uEvnAeeT@MR1GIqY0uRi*+ lMi+*@qeB(3HS2fd&&hUnqbl#H0d5&iR!UK_MEqmG{{rW%L>K@7 literal 0 HcmV?d00001 diff --git a/plugins/__PUT_PLUGIN_ZIPS_HERE__ b/plugins/__PUT_PLUGIN_ZIPS_HERE__ new file mode 100644 index 00000000..e69de29b diff --git a/requirements.txt b/requirements.txt index 66c90c79..2052a9ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,6 +31,7 @@ pre-commit black isort gitpython==3.1.31 +auto-gpt-plugin-template # Items below this point will not be included in the Docker Image @@ -42,3 +43,7 @@ pytest-benchmark pytest-cov pytest-integration pytest-mock + + +# OpenAI and Generic plugins import +openapi-python-client==0.13.4 diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/mocks/__init__.py b/tests/mocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/mocks/mock_commands.py b/tests/mocks/mock_commands.py new file mode 100644 index 00000000..d64284bc --- /dev/null +++ b/tests/mocks/mock_commands.py @@ -0,0 +1,6 @@ +from autogpt.commands.command import command + + +@command("function_based", "Function-based test command") +def function_based(arg1: int, arg2: str) -> str: + return f"{arg1} - {arg2}" diff --git a/tests/test_commands.py b/tests/test_commands.py new file mode 100644 index 00000000..8a7771f6 --- /dev/null +++ b/tests/test_commands.py @@ -0,0 +1,177 @@ +import os +import shutil +import sys +from pathlib import Path + +import pytest + +from autogpt.commands.command import Command, CommandRegistry + + +class TestCommand: + @staticmethod + def example_function(arg1: int, arg2: str) -> str: + return f"{arg1} - {arg2}" + + def test_command_creation(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + assert cmd.name == "example" + assert cmd.description == "Example command" + assert cmd.method == self.example_function + assert cmd.signature == "(arg1: int, arg2: str) -> str" + + def test_command_call(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + result = cmd(arg1=1, arg2="test") + assert result == "1 - test" + + def test_command_call_with_invalid_arguments(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + with pytest.raises(TypeError): + cmd(arg1="invalid", does_not_exist="test") + + def test_command_default_signature(self): + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + assert cmd.signature == "(arg1: int, arg2: str) -> str" + + def test_command_custom_signature(self): + custom_signature = "custom_arg1: int, custom_arg2: str" + cmd = Command( + name="example", + description="Example command", + method=self.example_function, + signature=custom_signature, + ) + + assert cmd.signature == custom_signature + + +class TestCommandRegistry: + @staticmethod + def example_function(arg1: int, arg2: str) -> str: + return f"{arg1} - {arg2}" + + def test_register_command(self): + """Test that a command can be registered to the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + + assert cmd.name in registry.commands + assert registry.commands[cmd.name] == cmd + + def test_unregister_command(self): + """Test that a command can be unregistered from the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + registry.unregister(cmd.name) + + assert cmd.name not in registry.commands + + def test_get_command(self): + """Test that a command can be retrieved from the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + retrieved_cmd = registry.get_command(cmd.name) + + assert retrieved_cmd == cmd + + def test_get_nonexistent_command(self): + """Test that attempting to get a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.get_command("nonexistent_command") + + def test_call_command(self): + """Test that a command can be called through the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + result = registry.call("example", arg1=1, arg2="test") + + assert result == "1 - test" + + def test_call_nonexistent_command(self): + """Test that attempting to call a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.call("nonexistent_command", arg1=1, arg2="test") + + def test_get_command_prompt(self): + """Test that the command prompt is correctly formatted.""" + registry = CommandRegistry() + cmd = Command( + name="example", description="Example command", method=self.example_function + ) + + registry.register(cmd) + command_prompt = registry.command_prompt() + + assert f"(arg1: int, arg2: str)" in command_prompt + + def test_import_mock_commands_module(self): + """Test that the registry can import a module with mock command plugins.""" + registry = CommandRegistry() + mock_commands_module = "tests.mocks.mock_commands" + + registry.import_commands(mock_commands_module) + + assert "function_based" in registry.commands + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description + == "Function-based test command" + ) + + def test_import_temp_command_file_module(self, tmp_path): + """Test that the registry can import a command plugins module from a temp file.""" + registry = CommandRegistry() + + # Create a temp command file + src = Path(os.getcwd()) / "tests/mocks/mock_commands.py" + temp_commands_file = tmp_path / "mock_commands.py" + shutil.copyfile(src, temp_commands_file) + + # Add the temp directory to sys.path to make the module importable + sys.path.append(str(tmp_path)) + + temp_commands_module = "mock_commands" + registry.import_commands(temp_commands_module) + + # Remove the temp directory from sys.path + sys.path.remove(str(tmp_path)) + + assert "function_based" in registry.commands + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description + == "Function-based test command" + ) diff --git a/tests/test_prompt_generator.py b/tests/test_prompt_generator.py index 6a0bfd6c..1fa1754d 100644 --- a/tests/test_prompt_generator.py +++ b/tests/test_prompt_generator.py @@ -1,6 +1,6 @@ from unittest import TestCase -from autogpt.promptgenerator import PromptGenerator +from autogpt.prompts.generator import PromptGenerator class TestPromptGenerator(TestCase): @@ -38,6 +38,7 @@ class TestPromptGenerator(TestCase): "label": command_label, "name": command_name, "args": args, + "function": None, } self.assertIn(command, self.generator.commands) diff --git a/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip b/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip new file mode 100644 index 0000000000000000000000000000000000000000..00bc1f4f58dc1c8e07c7ca3adce6be605fe6a3ce GIT binary patch literal 14927 zcmbW8WmuibvbJ$|4esvl?iSnv1eb-oJHdi`un^qcg1ZOT1b27$k2%?UvWMAca<1>? zY8L#ttKRNj)m7b3E69L?!2td7Z0N_T{m&o&`h^Pw4J72`XhSEVphBl$>0}D9rc*I? zaHO-+cW^YeXHZoE2Lk3j;5SnE7Xs>k5FrF3$Ns=TK&GHTKoq}0&>Pw~8{6xf8rvKG zNo!FOvRGn-Yq?j4a@SD5%jJROuM>uyHx91Sy4T0ml7fpwGs@q;KBk};-qCA`*hJBE z_m+1*%icCcifI>$n`;6i zg1PIoFWbx@fhkijv#z@vPGE|9mN9iU{zeh!h8$Hi^@%!?MTRRG*0mj2mr-OCgeq1% z5Lnmq9f&Xkub=54QvnwlbF$CekK&JuMlOTpM_3k!{*a`zWPIUJ4V*A(H#iUmc2bt%nCfe@Yf?5G-F^yX3EZ2?CocC0y zxw>`S4MY7&Ca7Xd!bvz(VR!;qdxUbjf`fU@K>X_>%{4Kt4a{?ZL@v6F^4!foS`P(9U6 z@JN!(>}~$m`Ziu$M?@%S<(D3Ai6z||hJTOe&nYq2nWl^RG9~_KRKFWfVF#%W_6Y>!nSn3;E!2H|H&uOEXG0b59Vj}IOk^Ih#iKV`U zG3TEKqNO(9e=DGrlVoDPG3K5(=>2=Qlu>_ zLPJ&oNi$0fU=1*zzmG>Cpq9=tMI;-7_bqOC ztHt3c_>8vMeG3wYfj979eK#pO6a4lv-4P*yfXIL6yD7jCU}|k+Z~UkGR;u&X3+zZ< zv+78;ejq_)z*fpRhsr2C1!1IYDLD*cm0t>CL>YSYrOapdX0PFDwQN4})AC)Xj)a#_ z?XxLcxgyLcmcQ#En*aW!vor5D*)i4a=NZ= zX1kPlK(X6ZKibvNj9xF-s*W*<#Ir{QL|Qqeh2R=>1T~Ib%4%M7Tg=IEE8$4*-C4TQ z*;(vEio@(H&meS}ePgsP+IuWUytX;!_l^z1pSh0{1Iy>n2frvf6Ai5cP~|A}nXv?A zHmU+@HH<$%hFE{y8`ILv*%={EKxne|Rh9?h-d@eh+ZMMJ-IB>qp zT12zQp#$l~o-8B?mQGz!nP)KeqVAYr=+YZ6^P3A)NxNYM5fF*DB^YKeBHXetenKkS zYa)N#w9K<%=)a3e+Nq6WwDCKBR##6M^%YW)GKgN^c7GYwsm~UP8vjy=-}oFJ zzTQgO5ohA9AWMN1K@ONEnq|K>Y}%n-#2%I8dmCDe$aPH5P@l7l64{OTJqM_SM8f_S zF1BKmmZHfCA1pR`Y>VEf{gO!BYmV4n(03@02ZRFhq@Z6f#KMw9D0Q!ogj!s0hvm(s z7<)Cpm{?Ako!2-DJ}xX}t+YN}!h4J&Q<&R&(gwRUCC6`E4JP*#^`iDuD}Rb~EJGz6 zXNaUXlB@9YBUo78=UsgeA85sM!iD_a1drnKq(|Z*WZIdI?uxt7gJ@dY>_Xse6KKQT zvTTH-deQbF+0s3QJ*jE-5C}Sn@-o<8F|kh zeO-1Qt}I5pgZyjvaAmI+%e{0>z)S38`CZ(!wKt|Sw6U@RIKF)RvA)o|>04R;8AN+x zM$JDkBK017ee}bx%O!0L#HYSk2&T;KHc|_?B^jWkIq2eO44dwuYhi=LGif~@8fDVk zH?)InC(NnxO(}>FOH@tSE8Ccur@f<>gabu&FN*_>&1U$re9kwzioX=&YYs*NWk@Xs z9=vj>i&||IqHKBS#ljR3?576)ZI)hAd5OCgzJFrg{9U}IwARYjLxR9^qr?m)4tczA zPdF5MRRH@N25jG{kKmr|kxVp^a<^Bp+dMZ5{gjZxTLGQX@tGYJnK8tXpN{luav~af zVpcGPa7)I^LW^TMtBN&pE^uhMxe+lvSl+`pSO9b137&6r>z_^-3e=tEtT1X1McArR z6d3^^i&|14S!*~C4;L~p6YV1>9I3f(l&`)x=Aa&rU*_*BI{zr>3l|tZQ|rEAdy0*r zB~#k5?a{?1N~L)!tRHSmd`5ic`0Fr%9-&c)z6?=31P~DQ?}o|2#>w8$*xv20qoboT z{BeO3sr6bJ?VCP(w6nZB6j3W3v|kH&tc(Umc4D!W)l{s6Y6?E8{XECq{CoT5{2`JY zX?yu=xHma>ijSU`3FRx>QP0nQquC_UHj!K%@oURww<#-eAsCZz>_II!_RI$czAfi- zC2Hy?WF{5lg5+R*?Z^-t=f#Gyx;k&#!B>!1%}nG7O;?fxzenmxW_=~<&hci=bn-AI zT3ynhmqr;aw@#T_YWg4+k;B9g=0#;^;BRPO@|hjA4>ac>Mmkr(=i#2f6A6)|1(!T3 zJOvyTBJ3R}4akpA8zQuqSL0it5M^>WiHs!YqK76VM{3S`T}o4Usa@zR!CJH4qrKYRqNR3%DkUzHMd!k#JT!v@rX2uU)!vx(Fz|wD|_FnMAF8;6xH8xe*Chf?}Ohe#A|8n>gUT2n%Q8 zI(}D+^iadl$hG|yic)0HS6o~IHJYBbZ14}x;A@7?_?P6PM*ZI%8W2(8?PDZ+5PY7% z<%^~TPx?IE)$|2-wLY=JNiInG#zt>L7jAteREoh=7b*%72FI&d$WQKj)5y|UKRmy$ z1ab<_VcJ5?;mIqFd!zB0FUw~@a)3>aiHU7AEYh=R9eP~s>rfdf3uB{x{LJTG23h(v z>x@nZsq1YKxS4HyCtY0FL;q72F3pcLyUUlQ@ztM6smi57B4L7$a=w8KkaO7it$WcL z?|;TL<>bkzORdeA3>uLOB_cJHJ!|E?D+Q!riXF?nslqZEL%+_F<#aQY8LOlG(A7e7 z8y13SfVfpciB_i0dFh5SoBLU;J=m0e5uui2Qrpp;!dNHan?fM?VHtI+m-@H+cd@4l zapu&9TwhMQ={FaQ*YQU2qM!lc@AZ~it*xiz*#Y00oc;8JpVI1&wCc^mq~A2jI_Q+b zD!FJQ>9#d*}rkk3_msaUoi!mVaKl_h8pfI7E4Kp$dF2EeAf2w^$j z7wnvGq$l`=Icv!r8cS!L>4XaFz;!%Zd zGp>Jo?DHz;G~LMuxPt%zv4Q>+ zuP2D!s7s(BJ9ApIS;xych+cMQ-X^3pf_Cto12aS$TVbObE0DhNcPA2r7X#!-d{bRG z+6QlJ+X)N~Avn^$1VMmf5gP`xb`Y%B;?wzS6^NWf6vrUGTb4ED>E@1I8oy+37dfD% zExPYsv%>&bEwXR`T`pe6x(b+*U!&4;43+WN;aZ88ZyjhcgsyoJF=DgrDeRSk7?7s`dz&@sd zQE6~~dayykoU_A*jITMD-M2HYUSpUSEkb(OxnI(ydbb`tqnLG)&J<93x8gI;HFVe? z5M_30;)h^++q~C;-q?7&s&@=4qN8o|sGb(HA=2H|)EFE_VA}F*$e;Q2PB2%^q^}l8 zvqQXCJvkH(qq*~%*e{*cqL$4UYFMTkvKHk1(P}VlHCXp(1coZOG>fLCJyiwy5bLr@ zm8T!sNCM3BKvgt^R0If0v$B*TvL=yZV=sm7pmvH^r`kk(cY_i(r{!06=EW`TE|X%8 zA{5FDA}gw)1aWl5kO|&Q>KVc2HP9lqQHNLq&#)u5+W-}-t9Pv>@Fl9Bp=s&Cxz~rZ z7jhU?QXDi%qfhXEDVv`&HO3`K`dVm? z3#+|Dht76d`;a1KTZYDg@s91m0r|t2hgK+}a6i~+OmUX}#~TE110SHjE-bJuizkjR z=~38ABm2$5q9i6HDkny7W%TDPrW>wA2w;R08|Ty2FJv(~hLBMwEjj|2hmggI&IK*o zeB>5Lcd=polz7C%*MuoH1WaC+P~`yXzy)pq15;_4pjc?+7so1elerGk;=n>oI!*0z zJj*`cWYZbd&D&?B6$0(z6ZjO9dq7yAwi>N}Xp>ux6c;=A`tqx_BkhL*X&~8XzBeb*VkdG<51&Rl-Aevh2Oas?(Q}w zy8&)(k?Fi>{oMuADXF7x#0tT0#mIUH>0fneI(=p{CK?b>KRXZ*>2F-Jb+ZImJO0Pk zORbiN!@PLxb4CV;aXxj9Tzgeq>EK@w<+HZ}uHhA6L?;hU>AkcI4f}74yZKx{r(!}i%*16@%9)Y0K))ou5y!&32 zwVI$gn_Cn<954vsR9Oi?QNiJJr=PZM?tJ{)VDWdyA_M<0PbB~mfs}z3eT~qJP z_qrcIk+kn-$;TOh$)VU)Rlg>UelB&>tYM$ddoSM0zn;*1qFK4QJf1f=F>YwdQ>Kkn zys$;C2GD~|4<4B@E(w_G^{=pNgXAC24wuNzSjKy=Ju>QprGJcr)CXP%hoZdqwJNtm zeN4X))W!ItFa>gdVZGn{9QKEf8znBh4eCj2?2S4Y7*#86R_6VMM0aO~F z3MM#HK?O5qOto{*%@ehLc=_A->41y9E0?nD9UPC81%GRpO6)sJUAp^OgUoZ(QfYD+ zs{=ZQsg^n!MElS@Ax{gtb9yI-SwYGmButO~=#4ILpqVI+un(o~Iw*bGG{6`}JCBkM zY#cgFJJU~8$5kpy{JBbo<35ocFR!p=wJUT*P$5A^IiY9)Lc*`Lhc7+T1M>We<-FYp zyvDx=8!`kX`a-upD*{>vBX+dv6W$zB$x_@j#*5{wS>d>|ZY0M5R(UVF4>FIKivS(>+T_7V;^4krd`h z-$Kc@eezQubIRc-xouSwW5)4;1^fLTTi*0BXfYjN#&79#l8KVWDUS&QjxQTV;xuX> zUHkz^m~?lBDWR=cMgeZ1A3`4U%vE%S_*Ai6eJgw6>1!aY@WP}|DB)1oLl~oY*-NbM z)Qklos~$C2zJKyBH{Q$;6b1rCZW9&02Eqo3?eho@I*LLLwl>KRzD zRwUqGOY2RV{jK_aKmxT0w2bgf-qOdkK;MARNC++T=Lin7x{&u@<+%{R&N5Fe z&Z_aBMf(=f0!yL%u}fv|SVXFZvk^e7t(!Vz(RB^fd!31cj+~hA$5HC{&%uT?Zq%Ld ztACKOW3cU)jwcudpsX=~#Osu8jWLaOM3>F9Dfx_~`Y+|c>jzs(I}3BxvOuEyl_b8? zIYd+@Hau%}Gc%DkUKnGnb~a6uQ$*s;LsFr5Om1=p5kdQA5S)~iPKCmy5uM)g5J!Lb zSz{hJAdD9_%hsx2{4jD0J286h2c|;`(3>6gL0);6{F|%HdE()HDcFXGaR2Fw5!g=) zcv~1NPHZ`)U}2=|Wvubky88@Lx`oyl*E_OYIj6aA_lWK_%{Qn*Os>GdP|;M6Z7Q7j z84r%C)ksHSsxn(p_jh>#(piHa=!Sv%8wFv27*apT9n1_{>3k~Gl9WZ<9Qm}MpF|vbc#N7cdSbwjs%r*V#T@TujJi%w z1hpaW1F5z_$Mo0(Cc_gB#8({_PH0UzDK!T#5oCZ((TH3#)(U<^7bBwFM#_YYk)34j zA_8SOuMyORRY@4WgFX<_5>F*NV|HO}&r^xc6D)x9iiD6|cJGlbLi_LySf~Q!nHkuZ z)F!F{F;`tvsrO9{^LSQ=y#mP|cu;2(=eOT?Z#Thb#pxY;#;H5uq>-b)85Mo>@3M$a zBeBAYnQHKF$&f~pa!{t`otyB?GGlX?MT?5m-#0h9FDpVdq`+-duFW3?BZFK)6TdYI zfkEH!$@6=!Y+p|%F1VUyNdv*0#QhzH{pFW8W-jdNh_g*Z+NXvy7HCE(Wb{vkX;({KFn;PYG;aB z_$Qao4sk`%YArQiM(`L5VJdoGiG2uU-lkLU23)*2#+nMy~`Rc+$Pg zzi9xSMII--!BUSv6EzV&wIAFSwoal}RzE{d=aOqsM4soRBr~AZan!?CS&h5`dJowV+b-*E%-)3XSt%)zJy3UZR?|Ln z02x9}r(NRIp#?oO3Qyq3m@qX`g_%LV3Xc?6gXK15pD9b_W)(D~o`vhRza8hudd=H@ zVatM6Io6~-5vkglEy7XevJ_S#E?{h|@A!j_(RvG1Zp0?_xE{AS=()L2f!-vP*$4PF zMRpwE;YoH=MpNG2u&~Lbj(ju;RMazvX#C0K*Z5suuMon-;aA4;uc|YZ^O^cfj8tM%gGvtxz!PZ!833|<}{ zWUQ+*^p%f1LHG@P-1CrJ30`x^RlWVOw2^VU&*Kbt_{B5bGf#&JD6f5V_ZYL>n;-6Z zKPf}?2D%Ngv+e0jbmptJ&aN|jo*O&{ZH2+vv_6f3fs&|A|= zS56&LXtT%|7+^dWZzrCH?8T;L%KZlvrpaxh^!!kg0!y9+1ieOj`sl)XWn79u_FK*} zDBABcRVlQeT>(4DG1ju+5nd$3Yi%-TJ<()X6LI&Y1CixhzVQQdn&MhlWbIAwX;w*j zhs$eUqCp)Q-dDwWbk`3{VPY3xMr2Q3(nAGgD+Z%1*io(^V=9$6-bPj5jz^tm`#f(F zEhVq%*g`oDB4;F|N98?*e9JY#x{dNiZ`xblEdaKeRbA%e!al@DaRxHEHefz9dB2eS zWAwe}CVjMd!pm-v<2s8&n2#8Hdq zpD5qN;jV*8s5(hAlUpq(tKa-cPPNLM;S0Jfg@KLiuPZtF<`_l?&B&NI1J$Ew_#t0% z+_n6eQ2-Q@GTphgt-C!2%Y7AORaP+{cZi%#3bR(XAW5u6VZ_0lRHeSXg2a#jeIpA$ z;zO}AE{bxizJZiqnh?32Y?UAv4_R ztrYFh3GYHPAZ5v@!!fjZQs|vqkNQI~O&W;WLV(_Ru{JDQr^$0}@ec*HHhp|)8zGVmqxQ^^f6tgJCjV2ic7UC?TkH3i`fBq~HuS44U^ z&-P$`y16lAbi1rq_iR%Cy3`kzzub1W=fKiNZFM!y*F*V~-W)rK!7nb~mg0!Wv(ua_ zSNBjwLeWIVd5cg>0@_L;|Pq&I?;3i0L zVVwbe0nofpK!Pi8H_k^o{QNEHF4fJEM4NUBdn8YbWyw!oa+iJOp`6D}Yq6iKJY#F+ zfP_ZE-_^u#7kkR(z=1JXKa;)>cmOsR<3pIkOCN&$a%)BOb{G?c_q@N{?ow7_zh58@ zYjWB>E)HOO7Bs@mE#QV4Q4>r`Oe~YKM%=c+)ykQYk%rrvk>UMy4qEjqkVauG+Bp(T z>;`MPY2t^~EZ81%z5DP3pmbY1H}8ihx&V`|X6fk^XvH7z6)Gl2A8N^=gbd3IeoU)O z&D{-5=FPCzDiwh&VX=~ZuA)1=?8qlZP(0V&qU%rgXPVUKgZx$wpCNiU0cbK5oM?RrA z#>l;R$C$$C3wOR(p+iSC-baBu7KN$ng`t9(;kIZnk>tCC%3-&9psZsd>SY*zv@LTs zcH(fj*c8RSi!P;H+f*k$n9>|0opdO!Py2Bj!8$9h z$fd#tOhb*5=YxE91%@o5d)1Fa&c`OeJ%%{o@L5vTi;1xqHDm6tUB8z*!{8) zFsvL26F-;NecgBuYLiLzRI@vnmZ&!tgR|%wNHetgz027YV=$~S5L!B$iuHZ1_hf%L zfa!WU-+XtX;8v+}K&N9vyq7)Ok}Glmii2oEQ9 z+R*vfGVg|**5P9C%a&y0GgwgscuG%%)+J*j*r||02)yeXq_&y1j%Ln)MeDC_ zitXg%z1@s7J5>3YH+9R?Z-KXD_AU*tCOUhyPf_b}wBADtG^Bp~q{|Iwi(OH})iRT? zG~IvOCG7td=Xlx85cX4O(nSYS8|2V(jLWxf!ijUeSQ)aYQ&Ihtq!lqhS)uBL&dsuh zDYT%1AbodN-N~bSFBhq6bw?|9Z8@^yhI&KG0L}xQn~V2AJX~6sly{l)v*K` zWLCUSjv&3HJ2RgOR`A&R3KcQ0fcNYrhpxp}=#Iki)%3=&{0kdLiC7Hb;x8vcW$&F2 zXO>1cA08G@cY9VA5G#autd^JLrOF#2Obebny|6Y^&;zCSmC$jlFUEPKj^}>7i95?8 zH&1I1CQQrbXc0Gf7Ye=MMm6bq;E}a$3b~BmjKs2>)AIy9X|$0#>Una2ul*1sLgBs2 zdmQTWHYZpNk+ZhtGQ)wIFX21SWkl|CKr_SY z4;2VHHkml3lJpegfh{uZ&;xcFk>u}BtA^;)UnMI~TewW;(>j%j~maou_egIzLI8487qacmVZ$By7J-3&tBJ_FEphQO%_%K54D&E9*dgwVoBm{R4Pl+7FxpYU%{Egt`3&Ba z+XF2I)WcE6*5H*lAeTz=aPBKvf^G}O85cBB$P(IJh%>a74&%}YOnYOI4L5xwi z2w=yV6ztWUc%onLSFOSX{6+86?2*!sDmNJoG`}(JJP1T>nI`q+AB&-FKj4Ojsg~ey zK==iVpE+}EYndN)7GT4mcifh$q${Dx)kaIu#Y>QMcjuBu7W(XqwwqcRd9BJ#cwS>2 zrDr(1!QKwZtb#c;d0w>rb}tr}|Jvwebc2+6>96T*VO7h~@j)}t(H z%9q;>AM6|SU@b8WdS4&c&*~o8xG7h~Q6|r?SLp;JRx`C3h?*n&|DfX35v0;`)p8qW zEr4%9&6Cao;ZC0r(~a;kWIrf$k@kniyQ@f>vhc`N(BE?O|CCZ1f@Ggh(5t;Nj3bSaIg9e^;qher)gv^vNOQ$`H zD>a1}qoM(fswI_&M#5Pq=-mftO_%M5eVDgj2nQw82WR#>5Rb0*)>DD~wnHY2Pdq=& z0sLWR3~Lla(7VJk3E4p) zS=YEcydY9#=XC!%$0`}XN4Q@$)!)7};@|9I+v(FA=sW(orS2g4_S=9WWdZF#|ETiV zu&U&c)VH^_3S-Izg^H4;{j`c9Qc8>)_JV4vDk+${3dwQF>RIXvp+yLPZlwQ|y}>K{ zZ1KOC|DzH8-i80z)F&ZgFaTIH80b5g!8jN@64DtvnGn+15ZVH4jZO3cmM}lnYlQSL zwr-AQHr9mpPS(0V_u^l4a)0dz=4x6~`4zgLny8 zByn7rQ0W|amOA^oL^&Kv)+i4)naq~`C!YT5;h}#e{N0Opg)cge-^m02@}hr^m{kHl(B%;(a{wXcAW=y>AU`@YH8@gJ`xYg&Kh~YQcA%}?90x@h5v*~&<#-XA=#4xZ z(yaof*q65%iySNCUkP#-RXoL@Sh;aNCe`Oxl?=`rINuaw(G84TF?Y*Hy%m8%$xeVm z8;`sqU1g4ymHa5guP&QYVRwcQx=b6sTeuC6NfQG(72IVK_gU}5tovK7OCAn4P%tSU zk^;1p;i5L_8ahPKH()9bx#uFr9i2)0>Rbjx~b-d$A$zOyZY zJaq3D&B-ni{mk;!?BrUaNXHcJ;&9>+8Sjk#BwllQRk;{t^f~we{NFwKckM0VZ^qie z-th1G+rMKN{|^}b7m2T~sjZ`~Gr-WvTL16Zf601h%!L$R&YEFyfPi@ZKd`#G0BeAw zuI|ee|8wN0HI{AW*^#`jb?qjHHaeLaXMy&S^fVOoL;^Z?WEi1JJnL%5atM{I9+rXE9E+8io|tCt949GZDlzg0i0RnpWaxzIYO(=pu)Yt?Z` zq&Tx5E+uGw02A-DXXfWOLrDc^MHRlOT=cFG5nL)~n!K7-hY@vF_#kz2j(aF~paM%MDqOBrj#!$}Sx8y|$^5}x!o%FZPT0dw2R*Ko! zGj7~`vdd^Px$C#8{MxgMLdlBzyO4*hb%s?n=T9KlVR+Eirc77xZG?8sAqX^T)h^Cg zcY_6xP<+C3KD5?dH0nRqdJs6(_ z_A^3v(k!3i9ce|R?Gz4KkVQdR&V5*TjD2BqRcd$+^BOrZL3B;GI(;|SwL_FMgwAbY z6=Nzti5gP0pWDdwB$TqMvG+PKgb6RCw~&0H3k zM{g2U62Xzc*;4K(BFH5vN0h;hirDx@jhGR2^@^7Ew3xicx_{`{rlGe|B@ zl6C1ywj!;0cY;F7M?+OQ%QD3IRn!yM(9Nt-_;@b2&4O`Otd!PxkWuMsg;`^;!qI~1 z^$R%rP1A5Awd(5fp3=|tVQ6*Z4?iFY%^gYZ-l=yi^`b4mQC@G;H05xHqem@NE@)t9 zax7XeP$a^e?0=u0is#^FfAN@}ir+OOPuLFx7eLl)u%6MYn#jm3ldNoJ-%PooWac>| zWao`4W9s7&cqyP#E^lWp-BqtX>{WO}zI@5}Pc6+!o7>vMf7EK+jX@ ztg`YxHMI511$09JZd=t_)llBwJj2*+a-GEXE7xLo_4qwTYCMIdqwgeWIC(NX;v<=t zOfO|9(!AFWiw+4a_|R6@*g@|_p-%j!)+VXJ{QGMp;FW}jznhMZg+mPB5{~W8UjS+M zx$JpR%!Nb^BS1pgzHwoGT0Y$hH{KTbcR>9avY;5ml0;sXCh?c{J9&YFqnoAiUlNHS z+7Sk7#W7_CWywJ)*%7s|X}MC#5h^*UTGffkkK?lwe;n2qQppU{6pw?(krnokHHsDX z|1+cb>CUGXIJWmcjsd@9@c;X@j~5SL9;BZi|JO?(|3?06y?AvE^xH~r@#_xe)8AKahxKT4B-r2p3|3cvV2y?=ch;TQi8&DB5hU)@c3O@4h> z;1`+wrJ(vp^51U`yoSHl8~=jKzg$iDNBG~>$FJe9)w;jn)=5>1L z{a?jLuko*?L%;AHzrp`aNc0;0de7h&nuPqHJ^x>J5MCR2T`>MK5KQ^c240nqzra5O z&FeDo7Z`@>x8VPX-@mv&1I+8f?iV-uH{AcV%zJI(b!GO;1T*zNyZ+znv|s$c7aD)m zD?gt<77b6Df9AicUj7=||Hb*Ak3Z^{pU)rIuKxp8|9`J${%z)8>(i@x<_|MJR}gI4 z-+TYs%zvwHe)ZQ+i?7p>UlxZMe`E1)$;oT>>)hfOTbt?s!v4!l1%J``0|8;Y{NTO> z_RALlB@iiG2dl9$8!M-=zVZJ9CNEMY literal 0 HcmV?d00001 diff --git a/tests/unit/models/test_base_open_api_plugin.py b/tests/unit/models/test_base_open_api_plugin.py new file mode 100644 index 00000000..950a3266 --- /dev/null +++ b/tests/unit/models/test_base_open_api_plugin.py @@ -0,0 +1,79 @@ +from typing import Any, Dict, List, Optional, Tuple + +import pytest + +from autogpt.models.base_open_ai_plugin import ( + BaseOpenAIPlugin, + Message, + PromptGenerator, +) + + +class DummyPlugin(BaseOpenAIPlugin): + pass + + +@pytest.fixture +def dummy_plugin(): + manifests_specs_clients = { + "manifest": { + "name_for_model": "Dummy", + "schema_version": "1.0", + "description_for_model": "A dummy plugin for testing purposes", + }, + "client": None, + "openapi_spec": None, + } + return DummyPlugin(manifests_specs_clients) + + +def test_dummy_plugin_inheritance(dummy_plugin): + assert isinstance(dummy_plugin, BaseOpenAIPlugin) + + +def test_dummy_plugin_name(dummy_plugin): + assert dummy_plugin._name == "Dummy" + + +def test_dummy_plugin_version(dummy_plugin): + assert dummy_plugin._version == "1.0" + + +def test_dummy_plugin_description(dummy_plugin): + assert dummy_plugin._description == "A dummy plugin for testing purposes" + + +def test_dummy_plugin_default_methods(dummy_plugin): + assert not dummy_plugin.can_handle_on_response() + assert not dummy_plugin.can_handle_post_prompt() + assert not dummy_plugin.can_handle_on_planning() + assert not dummy_plugin.can_handle_post_planning() + assert not dummy_plugin.can_handle_pre_instruction() + assert not dummy_plugin.can_handle_on_instruction() + assert not dummy_plugin.can_handle_post_instruction() + assert not dummy_plugin.can_handle_pre_command() + assert not dummy_plugin.can_handle_post_command() + assert not dummy_plugin.can_handle_chat_completion(None, None, None, None) + + assert dummy_plugin.on_response("hello") == "hello" + assert dummy_plugin.post_prompt(None) is None + assert dummy_plugin.on_planning(None, None) is None + assert dummy_plugin.post_planning("world") == "world" + pre_instruction = dummy_plugin.pre_instruction( + [{"role": "system", "content": "Beep, bop, boop"}] + ) + assert isinstance(pre_instruction, list) + assert len(pre_instruction) == 1 + assert pre_instruction[0]["role"] == "system" + assert pre_instruction[0]["content"] == "Beep, bop, boop" + assert dummy_plugin.on_instruction(None) is None + assert dummy_plugin.post_instruction("I'm a robot") == "I'm a robot" + pre_command = dummy_plugin.pre_command("evolve", {"continuously": True}) + assert isinstance(pre_command, tuple) + assert len(pre_command) == 2 + assert pre_command[0] == "evolve" + assert pre_command[1]["continuously"] == True + post_command = dummy_plugin.post_command("evolve", "upgraded successfully!") + assert isinstance(post_command, str) + assert post_command == "upgraded successfully!" + assert dummy_plugin.handle_chat_completion(None, None, None, None) is None diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index fea5ebfc..1a36e19b 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -9,16 +9,20 @@ Code Analysis Objective: The objective of the "scrape_text" function is to scrape the text content from -a given URL and return it as a string, after removing any unwanted HTML tags and scripts. +a given URL and return it as a string, after removing any unwanted HTML tags and + scripts. Inputs: - url: a string representing the URL of the webpage to be scraped. Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. +1. Send a GET request to the given URL using the requests library and the user agent + header from the config file. 2. Check if the response contains an HTTP error. If it does, return an error message. -3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags. -4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup. +3. Use BeautifulSoup to parse the HTML content of the response and extract all script + and style tags. +4. Get the text content of the remaining HTML using the get_text() method of + BeautifulSoup. 5. Split the text into lines and then into chunks, removing any extra whitespace. 6. Join the chunks into a single string with newline characters between them. 7. Return the cleaned text. @@ -27,9 +31,12 @@ Outputs: - A string representing the cleaned text content of the webpage. Additional aspects: -- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively. -- The function removes script and style tags from the HTML to avoid including unwanted content in the text output. -- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. +- The function uses the requests library and BeautifulSoup to handle the HTTP request + and HTML parsing, respectively. +- The function removes script and style tags from the HTML to avoid including unwanted + content in the text output. +- The function uses a generator expression to split the text into lines and chunks, + which can improve performance for large amounts of text. """ @@ -40,26 +47,33 @@ class TestScrapeText: expected_text = "This is some sample text" mock_response = mocker.Mock() mock_response.status_code = 200 - mock_response.text = f"

{expected_text}

" + mock_response.text = ( + "

" + f"{expected_text}

" + ) mocker.patch("requests.Session.get", return_value=mock_response) - # Call the function with a valid URL and assert that it returns the expected text + # Call the function with a valid URL and assert that it returns the + # expected text url = "http://www.example.com" assert scrape_text(url) == expected_text - # Tests that the function returns an error message when an invalid or unreachable url is provided. + # Tests that the function returns an error message when an invalid or unreachable + # url is provided. def test_invalid_url(self, mocker): # Mock the requests.get() method to raise an exception mocker.patch( "requests.Session.get", side_effect=requests.exceptions.RequestException ) - # Call the function with an invalid URL and assert that it returns an error message + # Call the function with an invalid URL and assert that it returns an error + # message url = "http://www.invalidurl.com" error_message = scrape_text(url) assert "Error:" in error_message - # Tests that the function returns an empty string when the html page contains no text to be scraped. + # Tests that the function returns an empty string when the html page contains no + # text to be scraped. def test_no_text(self, mocker): # Mock the requests.get() method to return a response with no text mock_response = mocker.Mock() @@ -71,7 +85,8 @@ class TestScrapeText: url = "http://www.example.com" assert scrape_text(url) == "" - # Tests that the function returns an error message when the response status code is an http error (>=400). + # Tests that the function returns an error message when the response status code is + # an http error (>=400). def test_http_error(self, mocker): # Mock the requests.get() method to return a response with a 404 status code mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py new file mode 100644 index 00000000..739e69bb --- /dev/null +++ b/tests/unit/test_plugins.py @@ -0,0 +1,112 @@ +import pytest + +from autogpt.config import Config +from autogpt.plugins import ( + blacklist_whitelist_check, + inspect_zip_for_module, + scan_plugins, +) + +PLUGINS_TEST_DIR = "tests/unit/data/test_plugins" +PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip" +PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__.py" +PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/" + + +def test_inspect_zip_for_module(): + result = inspect_zip_for_module(str(f"{PLUGINS_TEST_DIR}/{PLUGIN_TEST_ZIP_FILE}")) + assert result == PLUGIN_TEST_INIT_PY + + +@pytest.fixture +def mock_config_blacklist_whitelist_check(): + class MockConfig: + plugins_blacklist = ["BadPlugin"] + plugins_whitelist = ["GoodPlugin"] + + return MockConfig() + + +def test_blacklist_whitelist_check_blacklist( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "y") + assert not blacklist_whitelist_check( + "BadPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_whitelist( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "y") + assert blacklist_whitelist_check( + "GoodPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_user_input_yes( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "y") + assert blacklist_whitelist_check( + "UnknownPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_user_input_no( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "n") + assert not blacklist_whitelist_check( + "UnknownPlugin", mock_config_blacklist_whitelist_check + ) + + +def test_blacklist_whitelist_check_user_input_invalid( + mock_config_blacklist_whitelist_check, monkeypatch +): + monkeypatch.setattr("builtins.input", lambda _: "invalid") + assert not blacklist_whitelist_check( + "UnknownPlugin", mock_config_blacklist_whitelist_check + ) + + +@pytest.fixture +def config_with_plugins(): + cfg = Config() + cfg.plugins_dir = PLUGINS_TEST_DIR + cfg.plugins_openai = ["https://weathergpt.vercel.app/"] + return cfg + + +@pytest.fixture +def mock_config_openai_plugin(): + class MockConfig: + plugins_dir = PLUGINS_TEST_DIR + plugins_openai = [PLUGIN_TEST_OPENAI] + plugins_blacklist = ["AutoGPTPVicuna"] + plugins_whitelist = [PLUGIN_TEST_OPENAI] + + return MockConfig() + + +def test_scan_plugins_openai(mock_config_openai_plugin): + result = scan_plugins(mock_config_openai_plugin, debug=True) + assert len(result) == 1 + + +@pytest.fixture +def mock_config_generic_plugin(): + class MockConfig: + plugins_dir = PLUGINS_TEST_DIR + plugins_openai = [] + plugins_blacklist = [] + plugins_whitelist = ["AutoGPTPVicuna"] + + return MockConfig() + + +def test_scan_plugins_generic(mock_config_generic_plugin): + result = scan_plugins(mock_config_generic_plugin, debug=True) + assert len(result) == 1 From e4129e1a3ad6cbb11c3e2f538ad1f63b11dc561e Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 2 May 2023 13:35:23 +0200 Subject: [PATCH 220/222] Fix CI for `stable` --- .github/workflows/ci.yml | 2 +- .github/workflows/docker-ci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ede81c2c..95a04168 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,7 +4,7 @@ on: push: branches: [ master ] pull_request: - branches: [ master ] + branches: [ master, stable ] concurrency: group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} diff --git a/.github/workflows/docker-ci.yml b/.github/workflows/docker-ci.yml index c933202f..28576d02 100644 --- a/.github/workflows/docker-ci.yml +++ b/.github/workflows/docker-ci.yml @@ -4,7 +4,7 @@ on: push: branches: [ master ] pull_request: - branches: [ master ] + branches: [ master, stable ] concurrency: group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} From 725abbb662cd62aa2602e813fd0e2f8c3fe4fd75 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 2 May 2023 15:44:25 +0200 Subject: [PATCH 221/222] Fix bulletin --- BULLETIN.md | 27 ++++++-- autogpt/main.py | 22 ++++-- autogpt/utils.py | 42 ++++++++++-- tests/test_utils.py | 160 +++++++++++++++++--------------------------- 4 files changed, 136 insertions(+), 115 deletions(-) diff --git a/BULLETIN.md b/BULLETIN.md index 6c2b5c0a..5190b448 100644 --- a/BULLETIN.md +++ b/BULLETIN.md @@ -1,9 +1,24 @@ -Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here. -If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag +# Website and Documentation Site 📰📖 +Check out *https://agpt.co*, the official news & updates site for Auto-GPT! +The documentation also has a place here, at *https://docs.agpt.co* -# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE -Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins +# 🚀 v0.3.0 Release 🚀 +Over a week and 275 pull requests have passed since v0.2.2, and we are happy to announce +the release of v0.3.0! *From now on, we will be focusing on major improvements* rather +than bugfixes, as we feel stability has reached a reasonable level. Most remaining +issues relate to limitations in prompt generation and the memory system, which will be +the focus of our efforts for the next release. -## Changes to Docker configuration -The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly. +Highlights and notable changes in this release: +## Plugin support 🔌 +Auto-GPT now has support for plugins! With plugins, you can extend Auto-GPT's abilities, +adding support for third-party services and more. +See https://github.com/Significant-Gravitas/Auto-GPT-Plugins for instructions and available plugins. + +## Changes to Docker configuration 🐋 +The workdir has been changed from */home/appuser* to */app*. +Be sure to update any volume mounts accordingly! + +# ⚠️ Command `send_tweet` is DEPRECATED, and will be removed in v0.4.0 ⚠️ +Twitter functionality (and more) is now covered by plugins, see [Plugin support 🔌] diff --git a/autogpt/main.py b/autogpt/main.py index fca8b47d..355e1085 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -3,7 +3,7 @@ import logging import sys from pathlib import Path -from colorama import Fore +from colorama import Fore, Style from autogpt.agent.agent import Agent from autogpt.commands.command import CommandRegistry @@ -13,7 +13,11 @@ from autogpt.logs import logger from autogpt.memory import get_memory from autogpt.plugins import scan_plugins from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config -from autogpt.utils import get_current_git_branch, get_latest_bulletin +from autogpt.utils import ( + get_current_git_branch, + get_latest_bulletin, + markdown_to_ansi_style, +) from autogpt.workspace import Workspace from scripts.install_plugin_deps import install_plugin_dependencies @@ -57,9 +61,19 @@ def run_auto_gpt( ) if not cfg.skip_news: - motd = get_latest_bulletin() + motd, is_new_motd = get_latest_bulletin() if motd: - logger.typewriter_log("NEWS: ", Fore.GREEN, motd) + motd = markdown_to_ansi_style(motd) + for motd_line in motd.split("\n"): + logger.info(motd_line, "NEWS:", Fore.GREEN) + if is_new_motd and not cfg.chat_messages_enabled: + input( + Fore.MAGENTA + + Style.BRIGHT + + "NEWS: Bulletin was updated! Press Enter to continue..." + + Style.RESET_ALL + ) + git_branch = get_current_git_branch() if git_branch and git_branch != "stable": logger.typewriter_log( diff --git a/autogpt/utils.py b/autogpt/utils.py index 601254d7..112a1508 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -1,8 +1,9 @@ import os +import re import requests import yaml -from colorama import Fore +from colorama import Fore, Style from git.repo import Repo from autogpt.logs import logger @@ -107,15 +108,46 @@ def get_current_git_branch() -> str: return "" -def get_latest_bulletin() -> str: +def get_latest_bulletin() -> tuple[str, bool]: exists = os.path.exists("CURRENT_BULLETIN.md") current_bulletin = "" if exists: current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read() new_bulletin = get_bulletin_from_web() - is_new_news = new_bulletin != current_bulletin + is_new_news = new_bulletin != "" and new_bulletin != current_bulletin + + news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n" + if new_bulletin or current_bulletin: + news_header += ( + "Below you'll find the latest Auto-GPT News and updates regarding features!\n" + "If you don't wish to see this message, you " + "can run Auto-GPT with the *--skip-news* flag.\n" + ) if new_bulletin and is_new_news: open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) - return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}" - return current_bulletin + current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}" + + return f"{news_header}\n{current_bulletin}", is_new_news + + +def markdown_to_ansi_style(markdown: str): + ansi_lines: list[str] = [] + for line in markdown.split("\n"): + line_style = "" + + if line.startswith("# "): + line_style += Style.BRIGHT + else: + line = re.sub( + r"(? Date: Tue, 2 May 2023 16:32:19 +0200 Subject: [PATCH 222/222] v0.3.0 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 64926046..bf71c70c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "agpt" -version = "0.2.2" +version = "0.3.0" authors = [ { name="Torantulino", email="support@agpt.co" }, ]