diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..05986577 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +relative_files = true \ No newline at end of file diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 02f580a0..f31cdeba 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,28 +1,13 @@ -# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3-bullseye, 3.10-bullseye, 3-buster, 3.10-buster -ARG VARIANT=3-bullseye -FROM --platform=linux/amd64 python:3.10 +# Use an official Python base image from the Docker Hub +FROM python:3.10 -RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - # Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131 - && apt-get purge -y imagemagick imagemagick-6-common +# Install browsers +RUN apt-get update && apt-get install -y \ + chromium-driver firefox-esr \ + ca-certificates -# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897 -# They are installed by the base image (python) which does not have the patch. -RUN python3 -m pip install --upgrade setuptools +# Install utilities +RUN apt-get install -y curl jq wget git -# Install Chrome for web browsing -RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && curl -sSL https://dl.google.com/linux/direct/google-chrome-stable_current_$(dpkg --print-architecture).deb -o /tmp/chrome.deb \ - && apt-get -y install /tmp/chrome.deb - -# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. -# COPY requirements.txt /tmp/pip-tmp/ -# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ -# && rm -rf /tmp/pip-tmp - -# [Optional] Uncomment this section to install additional OS packages. -# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ -# && apt-get -y install --no-install-recommends - -# [Optional] Uncomment this line to install global node packages. -# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 +# Declare working directory +WORKDIR /workspace/Auto-GPT diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index f26810fb..5d50e280 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,14 +1,14 @@ { - "build": { - "dockerfile": "./Dockerfile", - "context": "." - }, + "dockerComposeFile": "./docker-compose.yml", + "service": "auto-gpt", + "workspaceFolder": "/workspace/Auto-GPT", + "shutdownAction": "stopCompose", "features": { "ghcr.io/devcontainers/features/common-utils:2": { "installZsh": "true", "username": "vscode", - "userUid": "1000", - "userGid": "1000", + "userUid": "6942", + "userGid": "6942", "upgradePackages": "true" }, "ghcr.io/devcontainers/features/desktop-lite:1": {}, diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 00000000..90d8c116 --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,19 @@ +# To boot the app run the following: +# docker-compose run auto-gpt +version: '3.9' + +services: + auto-gpt: + depends_on: + - redis + build: + dockerfile: .devcontainer/Dockerfile + context: ../ + tty: true + environment: + MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} + REDIS_HOST: ${REDIS_HOST:-redis} + volumes: + - ../:/workspace/Auto-GPT + redis: + image: 'redis/redis-stack-server:latest' diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..9ac55109 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +.* +*.template +*.yaml +*.yml + +*.md +*.png +!BULLETIN.md diff --git a/.env.template b/.env.template index f1b511c2..c0093507 100644 --- a/.env.template +++ b/.env.template @@ -13,6 +13,11 @@ ## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) # AI_SETTINGS_FILE=ai_settings.yaml +## AUTHORISE COMMAND KEY - Key to authorise commands +# AUTHORISE_COMMAND_KEY=y +## EXIT_KEY - Key to exit AUTO-GPT +# EXIT_KEY=n + ################################################################################ ### LLM PROVIDER ################################################################################ @@ -44,6 +49,14 @@ OPENAI_API_KEY=your-openai-api-key # FAST_TOKEN_LIMIT=4000 # SMART_TOKEN_LIMIT=8000 +### EMBEDDINGS +## EMBEDDING_MODEL - Model to use for creating embeddings +## EMBEDDING_TOKENIZER - Tokenizer to use for chunking large inputs +## EMBEDDING_TOKEN_LIMIT - Chunk size limit for large inputs +# EMBEDDING_MODEL=text-embedding-ada-002 +# EMBEDDING_TOKENIZER=cl100k_base +# EMBEDDING_TOKEN_LIMIT=8191 + ################################################################################ ### MEMORY ################################################################################ @@ -52,7 +65,7 @@ OPENAI_API_KEY=your-openai-api-key ## local - Default ## pinecone - Pinecone (if configured) ## redis - Redis (if configured) -## milvus - Milvus (if configured) +## milvus - Milvus (if configured - also works with Zilliz) ## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt) # MEMORY_BACKEND=local # MEMORY_INDEX=auto-gpt @@ -93,10 +106,16 @@ OPENAI_API_KEY=your-openai-api-key # WEAVIATE_API_KEY= ### MILVUS -## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530) -## MILVUS_COLLECTION - Milvus collection, -## change it if you want to start a new memory and retain the old memory. -# MILVUS_ADDR=your-milvus-cluster-host-port +## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443) +## MILVUS_USERNAME - username for your Milvus database +## MILVUS_PASSWORD - password for your Milvus database +## MILVUS_SECURE - True to enable TLS. (Default: False) +## Setting MILVUS_ADDR to a `https://` URL will override this setting. +## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory. +# MILVUS_ADDR=localhost:19530 +# MILVUS_USERNAME= +# MILVUS_PASSWORD= +# MILVUS_SECURE= # MILVUS_COLLECTION=autogpt ################################################################################ @@ -188,3 +207,16 @@ OPENAI_API_KEY=your-openai-api-key # TW_CONSUMER_SECRET= # TW_ACCESS_TOKEN= # TW_ACCESS_TOKEN_SECRET= + +################################################################################ +### ALLOWLISTED PLUGINS +################################################################################ + +#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) +ALLOWLISTED_PLUGINS= + +################################################################################ +### CHAT PLUGIN SETTINGS +################################################################################ +# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False) +# CHAT_MESSAGES_ENABLED=False diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..b28dfc4d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,5 @@ +# Exclude VCR cassettes from stats +tests/**/cassettes/**.y*ml linguist-generated + +# Mark documentation as such +docs/**.md linguist-documentation diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 6645142e..9c82c63f 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -57,6 +57,20 @@ body: - Other (Please specify in your problem) validations: required: true + - type: dropdown + attributes: + label: Which version of Auto-GPT are you using? + description: | + Please select which version of Auto-GPT you were using when this issue occurred. + If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code. + **If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**. + If installed with git you can run `git branch` to see which version of Auto-GPT you are running. + options: + - Latest Release + - Stable (branch) + - Master (branch) + validations: + required: true - type: dropdown attributes: label: GPT-3 or GPT-4? diff --git a/.github/workflows/auto_format.yml b/.github/workflows/auto_format.yml deleted file mode 100644 index c33cd397..00000000 --- a/.github/workflows/auto_format.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: auto-format -on: pull_request -jobs: - format: - runs-on: ubuntu-latest - steps: - - name: Checkout PR branch - uses: actions/checkout@v2 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: autopep8 - uses: peter-evans/autopep8@v1 - with: - args: --exit-code --recursive --in-place --aggressive --aggressive . - - name: Check for modified files - id: git-check - run: echo "modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)" >> $GITHUB_ENV - - name: Push changes - if: steps.git-check.outputs.modified == 'true' - run: | - git config --global user.name 'Torantulino' - git config --global user.email 'toran.richards@gmail.com' - git remote set diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml deleted file mode 100644 index 53b6e7a0..00000000 --- a/.github/workflows/benchmark.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: benchmark - -on: - workflow_dispatch: - -jobs: - build: - runs-on: ubuntu-latest - environment: benchmark - strategy: - matrix: - python-version: ['3.10', '3.11'] - - steps: - - name: Check out repository - uses: actions/checkout@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - - name: benchmark - run: | - python benchmark/benchmark_entrepeneur_gpt_with_undecisive_user.py - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 00000000..272fca17 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,31 @@ +name: Run Benchmarks + +on: + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + + env: + python-version: '3.10' + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Python ${{ env.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: benchmark + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b37e7f00..95a04168 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,71 +2,76 @@ name: Python CI on: push: - branches: [master] + branches: [ master ] pull_request: - branches: [master] + branches: [ master, stable ] concurrency: - group: ${{ format('ci-{0}', format('pr-{0}', github.event.pull_request.number) || github.sha) }} + group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: lint: runs-on: ubuntu-latest env: - min-python-version: '3.10' + min-python-version: "3.10" steps: - - name: Check out repository - uses: actions/checkout@v3 + - name: Checkout repository + uses: actions/checkout@v3 - - name: Set up Python ${{ env.min-python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ env.min-python-version }} + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ env.min-python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt - - name: Lint with flake8 - run: flake8 + - name: Lint with flake8 + run: flake8 - - name: Check black formatting - run: black . --check - if: success() || failure() + - name: Check black formatting + run: black . --check + if: success() || failure() - - name: Check isort formatting - run: isort . --check - if: success() || failure() + - name: Check isort formatting + run: isort . --check + if: success() || failure() test: + permissions: + # Gives the action the necessary permissions for publishing new + # comments in pull requests. + pull-requests: write + # Gives the action the necessary permissions for pushing data to the + # python-coverage-comment-action branch, and for editing existing + # comments (to avoid publishing multiple comments in the same PR) + contents: write runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.10', '3.11'] + python-version: ["3.10", "3.11"] steps: - - name: Check out repository - uses: actions/checkout@v3 + - name: Check out repository + uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt - - name: Run unittest tests with coverage - run: | - pytest --cov=autogpt --without-integration --without-slow-integration + - name: Run unittest tests with coverage + run: | + pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term - - name: Generate coverage report - run: | - coverage report - coverage xml - if: success() || failure() + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 diff --git a/.github/workflows/docker-cache-clean.yml b/.github/workflows/docker-cache-clean.yml new file mode 100644 index 00000000..f3fc8c10 --- /dev/null +++ b/.github/workflows/docker-cache-clean.yml @@ -0,0 +1,58 @@ +name: Purge Docker CI cache + +on: + schedule: + - cron: 20 4 * * 1,4 + +env: + BASE_BRANCH: master + IMAGE_NAME: auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=${{ matrix.build-type }} + load: true # save to docker images + # use GHA cache as read-only + cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.schedule }} + + build_type: ${{ matrix.build-type }} + + prod_branch: stable + dev_branch: master + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }} + push_forced_label: + + new_commits_json: ${{ null }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true diff --git a/.github/workflows/docker-ci.yml b/.github/workflows/docker-ci.yml new file mode 100644 index 00000000..28576d02 --- /dev/null +++ b/.github/workflows/docker-ci.yml @@ -0,0 +1,115 @@ +name: Docker CI + +on: + push: + branches: [ master ] + pull_request: + branches: [ master, stable ] + +concurrency: + group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +env: + IMAGE_NAME: auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - if: runner.debug + run: | + ls -al + du -hs * + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=${{ matrix.build-type }} + tags: ${{ env.IMAGE_NAME }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=docker-${{ matrix.build-type }} + cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + + build_type: ${{ matrix.build-type }} + + prod_branch: stable + dev_branch: master + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.event.after }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }} + + new_commits_json: ${{ toJSON(github.event.commits) }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true + + # Docker setup needs fixing before this is going to work: #1843 + test: + runs-on: ubuntu-latest + needs: build + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=dev # include pytest + tags: ${{ env.IMAGE_NAME }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=docker-dev + cache-to: type=gha,scope=docker-dev,mode=max + + - id: test + name: Run tests + env: + CI: true + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + set +e + test_output=$( + docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \ + pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1 + ) + test_failure=$? + + echo "$test_output" + + cat << $EOF >> $GITHUB_STEP_SUMMARY + # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌') + \`\`\` + $test_output + \`\`\` + $EOF diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml deleted file mode 100644 index 9c77098e..00000000 --- a/.github/workflows/docker-image.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Docker Image CI - -on: - push: - branches: [ "master" ] - pull_request: - branches: [ "master" ] - -jobs: - - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Build the Docker image - run: docker build . --file Dockerfile --tag autogpt:$(date +%s) diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml new file mode 100644 index 00000000..e066641f --- /dev/null +++ b/.github/workflows/docker-release.yml @@ -0,0 +1,81 @@ +name: Docker Release + +on: + release: + types: [ published, edited ] + + workflow_dispatch: + inputs: + no_cache: + type: boolean + description: 'Build from scratch, without using cached layers' + +env: + IMAGE_NAME: auto-gpt + DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Log in to Docker hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + # slashes are not allowed in image tags, but can appear in git branch or tag names + - id: sanitize_tag + name: Sanitize image tag + run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT + env: + raw_tag: ${{ github.ref_name }} + + - id: build + name: Build image + uses: docker/build-push-action@v3 + with: + build-args: BUILD_TYPE=release + load: true # save to docker images + # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555 + tags: > + ${{ env.IMAGE_NAME }}, + ${{ env.DEPLOY_IMAGE_NAME }}:latest, + ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }} + + # cache layers in GitHub Actions cache to speed up builds + cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release + cache-to: type=gha,scope=docker-release,mode=max + + - name: Push image to Docker Hub + run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }} + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + inputs_no_cache: ${{ inputs.no_cache }} + + prod_branch: stable + dev_branch: master + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }} + + ref_type: ${{ github.ref_type }} + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true diff --git a/.github/workflows/dockerhub-imagepush.yml b/.github/workflows/dockerhub-imagepush.yml deleted file mode 100644 index 165f6265..00000000 --- a/.github/workflows/dockerhub-imagepush.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Push Docker Image on Release - -on: - release: - types: [published,edited] - -jobs: - - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Log in to Docker hub - env: - DOCKER_USER: ${{secrets.DOCKER_USER}} - DOCKER_PASSWORD: ${{secrets.DOCKER_PASSWORD}} - run: | - docker login -u $DOCKER_USER -p $DOCKER_PASSWORD - - name: Build the Docker image - run: | - tag_v=$(git describe --tags $(git rev-list --tags --max-count=1)) - tag=$(echo $tag_v | sed 's/v//') - docker build . --file Dockerfile --tag ${{secrets.DOCKER_USER}}/auto-gpt:${tag} - - name: Docker Push - run: | - tag_v2=$(git describe --tags $(git rev-list --tags --max-count=1)) - tag2=$(echo $tag_v | sed 's/v//') - docker push ${{secrets.DOCKER_USER}}/auto-gpt:${tag2} - diff --git a/.github/workflows/documentation-release.yml b/.github/workflows/documentation-release.yml new file mode 100644 index 00000000..ef97e1e4 --- /dev/null +++ b/.github/workflows/documentation-release.yml @@ -0,0 +1,37 @@ +name: Docs + +on: + push: + branches: [ stable ] + paths: + - 'docs/**' + - 'mkdocs.yml' + - '.github/workflows/documentation.yml' + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +permissions: + contents: write + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Python 3 + uses: actions/setup-python@v4 + with: + python-version: 3.x + + - name: Set up workflow cache + uses: actions/cache@v3 + with: + key: ${{ github.ref }} + path: .cache + + - run: pip install mkdocs-material + + - run: mkdocs gh-deploy --force diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index 92c5a66b..ff4174ad 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -1,12 +1,15 @@ name: "Pull Request auto-label" + on: # So that PRs touching the same files as the push are updated push: + branches: [ master ] # So that the `dirtyLabel` is removed if conflicts are resolve # We recommend `pull_request_target` so that github secrets are available. # In `pull_request` we wouldn't be able to change labels of fork PRs pull_request_target: - types: [opened, synchronize] + types: [ opened, synchronize ] + concurrency: group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }} cancel-in-progress: true @@ -26,3 +29,27 @@ jobs: repoToken: "${{ secrets.GITHUB_TOKEN }}" commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request." commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly." + + size: + if: ${{ github.event_name == 'pull_request_target' }} + permissions: + issues: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: codelytv/pr-size-labeler@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + xs_label: 'size/xs' + xs_max_size: 2 + s_label: 'size/s' + s_max_size: 10 + m_label: 'size/m' + m_max_size: 50 + l_label: 'size/l' + l_max_size: 200 + xl_label: 'size/xl' + message_if_xl: > + This PR exceeds the recommended size of 200 lines. + Please make sure you are NOT addressing multiple issues with one PR. + Note this PR might be rejected due to its size diff --git a/.github/workflows/scripts/docker-ci-summary.sh b/.github/workflows/scripts/docker-ci-summary.sh new file mode 100755 index 00000000..0a55293f --- /dev/null +++ b/.github/workflows/scripts/docker-ci-summary.sh @@ -0,0 +1,98 @@ +#!/bin/bash +meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]') +head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template) +ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template) + +EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) + +cat << $EOF +# Docker Build summary 🔨 + +**Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url) + +**Build type:** \`$build_type\` + +**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB + +## Image details + +**Tags:** +$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta) + +
+

Layers

+ +| Age | Size | Created by instruction | +| --------- | ------ | ---------------------- | +$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \ + | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\ + | cut -f-3 `# yeet Comment column`\ + | sed 's/ ago//' `# fix Layer age`\ + | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\ + | sed 's/\$/\\$/g' `# escape variable and shell expansions`\ + | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\ + | column -t -s$'\t' -o' | ' `# align columns and add separator`\ + | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`) +
+ +
+

ENV

+ +| Variable | Value | +| -------- | -------- | +$(jq -r \ + '.Config.Env + | map( + split("=") + | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`" + ) + | map("| \(.) |") + | .[]' <<< $meta +) +
+ +
+Raw metadata + +\`\`\`JSON +$meta +\`\`\` +
+ +## Build details +**Build trigger:** $push_forced_label $event_name \`$event_ref\` + +
+github context + +\`\`\`JSON +$github_context_json +\`\`\` +
+ +### Source +**HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url) + +**Diff with previous HEAD:** $head_compare_url + +#### New commits +$(jq -r 'map([ + "**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**", + .message, + (if .committer.name != .author.name then "\n> **Committer:** \(.committer.name) <\(.committer.email)>" else "" end), + "**Timestamp:** \(.timestamp)" +] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json) + +### Job environment + +#### \`vars\` context: +\`\`\`JSON +$vars_json +\`\`\` + +#### \`env\` context: +\`\`\`JSON +$job_env_json +\`\`\` + +$EOF diff --git a/.github/workflows/scripts/docker-release-summary.sh b/.github/workflows/scripts/docker-release-summary.sh new file mode 100755 index 00000000..1e00f41d --- /dev/null +++ b/.github/workflows/scripts/docker-release-summary.sh @@ -0,0 +1,85 @@ +#!/bin/bash +meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]') + +EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) + +cat << $EOF +# Docker Release Build summary 🚀🔨 + +**Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url) + +**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB + +## Image details + +**Tags:** +$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta) + +
+

Layers

+ +| Age | Size | Created by instruction | +| --------- | ------ | ---------------------- | +$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \ + | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\ + | cut -f-3 `# yeet Comment column`\ + | sed 's/ ago//' `# fix Layer age`\ + | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\ + | sed 's/\$/\\$/g' `# escape variable and shell expansions`\ + | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\ + | column -t -s$'\t' -o' | ' `# align columns and add separator`\ + | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`) +
+ +
+

ENV

+ +| Variable | Value | +| -------- | -------- | +$(jq -r \ + '.Config.Env + | map( + split("=") + | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`" + ) + | map("| \(.) |") + | .[]' <<< $meta +) +
+ +
+Raw metadata + +\`\`\`JSON +$meta +\`\`\` +
+ +## Build details +**Build trigger:** $event_name \`$current_ref\` + +| Parameter | Value | +| -------------- | ------------ | +| \`no_cache\` | \`$inputs_no_cache\` | + +
+github context + +\`\`\`JSON +$github_context_json +\`\`\` +
+ +### Job environment + +#### \`vars\` context: +\`\`\`JSON +$vars_json +\`\`\` + +#### \`env\` context: +\`\`\`JSON +$job_env_json +\`\`\` + +$EOF diff --git a/.github/workflows/sponsors_readme.yml b/.github/workflows/sponsors_readme.yml new file mode 100644 index 00000000..7e51a4f8 --- /dev/null +++ b/.github/workflows/sponsors_readme.yml @@ -0,0 +1,28 @@ +name: Generate Sponsors README + +on: + workflow_dispatch: + schedule: + - cron: '0 */12 * * *' + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout 🛎️ + uses: actions/checkout@v3 + + - name: Generate Sponsors 💖 + uses: JamesIves/github-sponsors-readme-action@v1 + with: + token: ${{ secrets.README_UPDATER_PAT }} + file: 'README.md' + minimum: 2500 + maximum: 99999 + + - name: Deploy to GitHub Pages 🚀 + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: master + folder: '.' + token: ${{ secrets.README_UPDATER_PAT }} diff --git a/.gitignore b/.gitignore index a6b3b80f..816cdb0c 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ log-ingestion.txt logs *.log *.mp3 +mem.sqlite3 # Byte-compiled / optimized / DLL files __pycache__/ @@ -94,6 +95,7 @@ instance/ # Sphinx documentation docs/_build/ +site/ # PyBuilder target/ @@ -157,5 +159,7 @@ vicuna-* # mac .DS_Store +openai/ + # news CURRENT_BULLETIN.md \ No newline at end of file diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 00000000..17eab482 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,10 @@ +[settings] +profile = black +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 +sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER +skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3722b25e..a2ea6114 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v0.9.2 + rev: v4.4.0 hooks: - id: check-added-large-files args: ['--maxkb=500'] diff --git a/BULLETIN.md b/BULLETIN.md index 735048dd..5190b448 100644 --- a/BULLETIN.md +++ b/BULLETIN.md @@ -1,2 +1,24 @@ -Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here. -If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag \ No newline at end of file +# Website and Documentation Site 📰📖 +Check out *https://agpt.co*, the official news & updates site for Auto-GPT! +The documentation also has a place here, at *https://docs.agpt.co* + +# 🚀 v0.3.0 Release 🚀 +Over a week and 275 pull requests have passed since v0.2.2, and we are happy to announce +the release of v0.3.0! *From now on, we will be focusing on major improvements* rather +than bugfixes, as we feel stability has reached a reasonable level. Most remaining +issues relate to limitations in prompt generation and the memory system, which will be +the focus of our efforts for the next release. + +Highlights and notable changes in this release: + +## Plugin support 🔌 +Auto-GPT now has support for plugins! With plugins, you can extend Auto-GPT's abilities, +adding support for third-party services and more. +See https://github.com/Significant-Gravitas/Auto-GPT-Plugins for instructions and available plugins. + +## Changes to Docker configuration 🐋 +The workdir has been changed from */home/appuser* to */app*. +Be sure to update any volume mounts accordingly! + +# ⚠️ Command `send_tweet` is DEPRECATED, and will be removed in v0.4.0 ⚠️ +Twitter functionality (and more) is now covered by plugins, see [Plugin support 🔌] diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index d2331b4c..b1f7caf3 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,4 @@ -# Code of Conduct for auto-gpt +# Code of Conduct for Auto-GPT ## 1. Purpose @@ -37,4 +37,3 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont ## 6. Contact If you have any questions or concerns, please contact the project maintainers. - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 79169a0c..c410d076 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,35 +1,23 @@ -# Contributing to ProjectName +# Contributing to Auto-GPT First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request. This document provides guidelines and best practices to help you contribute effectively. -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [Getting Started](#getting-started) -- [How to Contribute](#how-to-contribute) - - [Reporting Bugs](#reporting-bugs) - - [Suggesting Enhancements](#suggesting-enhancements) - - [Submitting Pull Requests](#submitting-pull-requests) -- [Style Guidelines](#style-guidelines) - - [Code Formatting](#code-formatting) - - [Pre-Commit Hooks](#pre-commit-hooks) - ## Code of Conduct -By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project. +By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project. + +[Code of Conduct]: https://docs.agpt.co/code-of-conduct/ ## 📢 A Quick Word Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT. -However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template). -> ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates! +However, you absolutely can still add these commands to Auto-GPT in the form of plugins. +Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template). ## Getting Started -To start contributing, follow these steps: - 1. Fork the repository and clone your fork. 2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`). 3. Make your changes in the new branch. @@ -60,7 +48,7 @@ If you have an idea for a new feature or improvement, please create an issue on When submitting a pull request, please ensure that your changes meet the following criteria: - Your pull request should be atomic and focus on a single change. -- Your pull request should include tests for your change. +- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status) - You should have thoroughly tested your changes with multiple different prompts. - You should have considered potential risks and mitigations for your changes. - You should have documented your changes clearly and comprehensively. @@ -70,18 +58,23 @@ When submitting a pull request, please ensure that your changes meet the followi ### Code Formatting -We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`: +We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request. + +To format your code, run the following commands in the project's root directory: ```bash -pip install black +python -m black . +python -m isort . ``` -To format your code, run the following command in the project's root directory: - +Or if you have these tools installed globally: ```bash black . +isort . ``` + ### Pre-Commit Hooks + We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: Install the pre-commit package using pip: @@ -101,5 +94,55 @@ If you encounter any issues or have questions, feel free to reach out to the mai Happy coding, and once again, thank you for your contributions! Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: +https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts -https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ \ No newline at end of file +## Testing your changes + +If you add or change code, make sure the updated code is covered by tests. +To increase coverage if necessary, [write tests using pytest]. + +For more info on running tests, please refer to ["Running tests"](https://docs.agpt.co/testing/). + +[write tests using pytest]: https://realpython.com/pytest-python-testing/ + +### API-dependent tests + +To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known +requests and matching responses in so-called *cassettes*, allowing us to run the tests +in CI without needing actual API access. + +When changes cause a test prompt to be generated differently, it will likely miss the +cache and make a request to the API, updating the cassette with the new request+response. +*Be sure to include the updated cassette in your PR!* + +When you run Pytest locally: + +- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required. +- If the prompt changes in a way that the cassettes are not reusable: + - If no API key, the test fails. It requires a new cassette. So, add an API key to .env. + - If the API key is present, the tests will make a real call to OpenAI. + - If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR. + - If the test is unsuccessful: + - Either: Your change made Auto-GPT less capable, in that case, you have to change your code. + - Or: The test might be poorly written. In that case, you can make suggestions to change the test. + +In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break. + + +### Community Challenges +Challenges are goals we need Auto-GPT to achieve. +To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on. +- a challenge is new if level_currently_beaten is None +- a challenge is in progress if level_currently_beaten is greater or equal to 1 +- a challenge is beaten if level_currently_beaten = max_level + +Here is an example of how to run the memory challenge A and attempt to beat level 3. + +pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3 + +To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder + +Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline. +Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run. + +Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge. diff --git a/Dockerfile b/Dockerfile index 83961549..6023cefa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,38 +1,40 @@ +# 'dev' or 'release' container build +ARG BUILD_TYPE=dev + # Use an official Python base image from the Docker Hub -FROM python:3.10-slim +FROM python:3.10-slim AS autogpt-base -# Install git -RUN apt-get -y update -RUN apt-get -y install git chromium-driver +# Install browsers +RUN apt-get update && apt-get install -y \ + chromium-driver firefox-esr \ + ca-certificates -# Install Xvfb and other dependencies for headless browser testing -RUN apt-get update \ - && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates - -# Install Firefox / Chromium -RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ - && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ - && apt-get update \ - && apt-get install -y chromium firefox-esr +# Install utilities +RUN apt-get install -y curl jq wget git # Set environment variables ENV PIP_NO_CACHE_DIR=yes \ PYTHONUNBUFFERED=1 \ PYTHONDONTWRITEBYTECODE=1 -# Create a non-root user and set permissions -RUN useradd --create-home appuser -WORKDIR /home/appuser -RUN chown appuser:appuser /home/appuser -USER appuser - -# Copy the requirements.txt file and install the requirements -COPY --chown=appuser:appuser requirements.txt . -RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ - pip install --no-cache-dir --user -r requirements.txt - -# Copy the application files -COPY --chown=appuser:appuser autogpt/ ./autogpt +# Install the required python packages globally +ENV PATH="$PATH:/root/.local/bin" +COPY requirements.txt . # Set the entrypoint ENTRYPOINT ["python", "-m", "autogpt"] + +# dev build -> include everything +FROM autogpt-base as autogpt-dev +RUN pip install --no-cache-dir -r requirements.txt +WORKDIR /app +ONBUILD COPY . ./ + +# release build -> include bare minimum +FROM autogpt-base as autogpt-release +RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ + pip install --no-cache-dir -r requirements.txt +WORKDIR /app +ONBUILD COPY autogpt/ ./autogpt + +FROM autogpt-${BUILD_TYPE} AS auto-gpt diff --git a/README.md b/README.md index 6d636c56..312e4d9f 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Auto-GPT: An Autonomous GPT-4 Experiment +[![Official Website](https://img.shields.io/badge/Official%20Website-agpt.co-blue?style=flat&logo=world&logoColor=white)](https://agpt.co) [![Unit Tests](https://img.shields.io/github/actions/workflow/status/Significant-Gravitas/Auto-GPT/ci.yml?label=unit%20tests)](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml) [![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) [![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/auto-gpt?style=social)](https://github.com/Significant-Gravitas/Auto-GPT/stargazers) @@ -26,430 +27,94 @@ Demo made by Blake Werlinger

💖 Help Fund Auto-GPT's Development 💖

-If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! -Your support is greatly appreciated -Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. +If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help to push the boundaries of fully autonomous AI! +Your support is greatly appreciated. Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.

-

-InfluxData    Roost.AI    NucleiAI    Algohash    TypingMind    

-robinicus  prompthero  crizzler  tob-le-rone  FSTatSBS  toverly1  ddtarazona  Nalhos  Kazamario  pingbotan  indoor47  AuroraHolding  kreativai  hunteraraujo  Explorergt92  judegomila   -thepok -  SpacingLily  merwanehamadi  m  zkonduit  maxxflyer  tekelsey  digisomni  nocodeclarity  tjarmain -Josecodesalot  saten-private  kenndanielso  johnculkin  Daniel1357  0xmatchmaker  belharethsami  nicoguyon  josephcmiller2  KiaArmani  Mobivs  rocks6  Odin519Tomas  ChrisDMT  thisisjeffchen  RealChrisSean  AIdevelopersAI  scryptedinc  jun784  goldenrecursion  allenstecat  LeeRobidas  cfarquhar  avy-ai  omphos  sunchongren  CrazySwami  fruition  Web3Capital  jazgarewal  rejunity  dexterityx  shawnharmsen  tommygeee  abhinav-pandey29  ColinConwell  kMag410  lucas-chu  Heitechsoft  bentoml  MediConCenHK  nnkostov  founderblocks-sils  CarmenCocoa  angiaou  fabrietech  Partender  RThaweewat  GalaxyVideoAgency  Brodie0  sultanmeghji  CatsMeow492  caitlynmeeks  garythebat  concreit  Pythagora-io  ASmithOWL  Cameron-Fulton  joaomdmoura  Dradstone  st617  wenfengwang  morcos  CrypteorCapital  jd3655  mathewhawkins  ZERO-A-ONE  MayurVirkar  SwftCoins  marv-technology  cxs  iddelacruz  AryaXAI  lmaugustin  Mr-Bishop42  vixul-accelerator  TheStoneMX  ciscodebs  ntwrite  DataMetis  ikarosai  refinery1  MetaPath01  ternary5  arjunb023  yx3110  vkozacek  eelbaz  rapidstartup  txtr99  tob-le-rone  neverinstall  projectonegames  DailyBotHQ  comet-ml  rickscode  webbcolton  MBassi91  

+ +
+ + + +

robinicus  0xmatchmaker  jazgarewal  MayurVirkar  avy-ai  TheStoneMX  goldenrecursion  MatthewAgs  eelbaz  rapidstartup  gklab  VoiceBeer  DailyBotHQ  lucas-chu  knifour  refinery1  st617  neodenit  CrazySwami  Heitechsoft  RealChrisSean  abhinav-pandey29  Explorergt92  SparkplanAI  crizzler  kreativai  omphos  Jahmazon  tjarmain  ddtarazona  saten-private  anvarazizov  lazzacapital  m  Pythagora-io  Web3Capital  toverly1  digisomni  concreit  LeeRobidas  Josecodesalot  dexterityx  rickscode  Brodie0  FSTatSBS  nocodeclarity  jsolejr  amr-elsehemy  RawBanana  horazius  SwftCoins  tob-le-rone  RThaweewat  jun784  joaomdmoura  rejunity  mathewhawkins  caitlynmeeks  jd3655  Odin519Tomas  DataMetis  webbcolton  rocks6  cxs  fruition  nnkostov  morcos  pingbotan  maxxflyer  tommi-joentakanen  hunteraraujo  projectonegames  tullytim  comet-ml  thepok  prompthero  sunchongren  neverinstall  josephcmiller2  yx3110  MBassi91  SpacingLily  arthur-x88  ciscodebs  christian-gheorghe  EngageStrategies  jondwillis  Cameron-Fulton  AryaXAI  AuroraHolding  Mr-Bishop42  doverhq  johnculkin  marv-technology  ikarosai  ColinConwell  humungasaurus  terpsfreak  iddelacruz  thisisjeffchen  nicoguyon  arjunb023  Nalhos  belharethsami  Mobivs  txtr99  ntwrite  founderblocks-sils  kMag410  angiaou  garythebat  lmaugustin  shawnharmsen  clortegah  MetaPath01  sekomike910  MediConCenHK  svpermari0  jacobyoby  turintech  allenstecat  CatsMeow492  tommygeee  judegomila  cfarquhar  ZoneSixGames  kenndanielso  CrypteorCapital  sultanmeghji  jenius-eagle  josephjacks  pingshian0131  AIdevelopersAI  ternary5  ChrisDMT  AcountoOU  chatgpt-prompts  Partender  Daniel1357  KiaArmani  zkonduit  fabrietech  scryptedinc  coreyspagnoli  AntonioCiolino  Dradstone  CarmenCocoa  bentoml  merwanehamadi  vkozacek  ASmithOWL  tekelsey  GalaxyVideoAgency  wenfengwang  rviramontes  indoor47  ZERO-A-ONE  

+ + ## 🚀 Features - 🌐 Internet access for searches and information gathering -- 💾 Long-Term and Short-Term memory management +- 💾 Long-term and short-term memory management - 🧠 GPT-4 instances for text generation - 🔗 Access to popular websites and platforms - 🗃️ File storage and summarization with GPT-3.5 +- 🔌 Extensibility with Plugins -## 📋 Requirements +## Quickstart -- Environment (pick one) - - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers): It has been configured in the .devcontainer folder and can be used directly - - Docker - - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) -- [OpenAI API key](https://platform.openai.com/account/api-keys) +1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys) +2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) +3. Follow the [installation instructions][docs/setup] +4. Configure any additional features you want, or install some [plugins][docs/plugins] +5. [Run][docs/usage] the app -### Optional -- Memory backend (pick one) - - [Pinecone](https://www.pinecone.io/) - - [Milvus](https://milvus.io/) - - [Redis](https://redis.io) - - [Weaviate](https://weaviate.io) -- ElevenLabs Key (If you want the AI to speak) +Please see the [documentation][docs] for full setup instructions and configuration options. -## ⚠️ OpenAI API Keys Configuration ⚠️ +[docs]: https://docs.agpt.co/ -Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. +## 📖 Documentation +* [⚙️ Setup][docs/setup] +* [💻 Usage][docs/usage] +* [🔌 Plugins][docs/plugins] +* Configuration + * [🔍 Web Search](https://docs.agpt.co/configuration/search/) + * [🧠 Memory](https://docs.agpt.co/configuration/memory/) + * [🗣️ Voice (TTS)](https://docs.agpt.co/configuration/voice/) + * [🖼️ Image Generation](https://docs.agpt.co/configuration/imagegen/) -To use OpenAI API key for Auto-GPT, you **NEED** to have billing set up (AKA paid account). - -You can set up paid account at https://platform.openai.com/account/billing/overview. - -![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./docs/imgs/openai-api-key-billing-paid-account.png) - -#### **PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING, OTHERWISE NOTHING WILL WORK!** - -## 💾 Installation - -To install Auto-GPT, follow these steps: - -1. Make sure you have all the **requirements** listed above, if not, install/get them - -_To execute the following commands, open a CMD, Bash, or Powershell window by navigating to a folder on your computer and typing `CMD` in the folder path at the top, then press enter._ - -2. Clone the repository: For this step, you need Git installed. Alternatively, - you can download the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) (`Source code (zip)`, bottom of the page). - - ```bash - git clone https://github.com/Significant-Gravitas/Auto-GPT.git - ``` - -3. Navigate to the directory where the repository was downloaded - - ```bash - cd Auto-GPT - ``` - -4. Install the required dependencies - - ```bash - pip install -r requirements.txt - ``` - -5. Configure Auto-GPT - 1. Locate the file named `.env.template` in the main `/Auto-GPT` folder. - 2. Create a copy of this file, called `.env` by removing the `template` extension. The easiest way is to do this in a command prompt/terminal window `cp .env.template .env`. - 3. Open the `.env` file in a text editor. _Note: Files starting with a dot might be hidden by your Operating System._ - 4. Find the line that says `OPENAI_API_KEY=`. - 5. After the `"="`, enter your unique OpenAI API Key (without any quotes or spaces). - 6. Enter any other API keys or Tokens for services you would like to utilize. - 7. Save and close the `.env` file. - - By completing these steps, you have properly configured the API Keys for your project. - - - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to obtain your OpenAI API key. - - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps: - - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: - - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID - - `smart_llm_model_deployment_id` - your gpt-4 deployment ID - - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID - - Please specify all of these values as double-quoted strings - ```yaml - # Replace string in angled brackets (<>) to your own ID - azure_model_map: - fast_llm_model_deployment_id: "" - ... - ``` - - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. - -## 🔧 Usage - -1. Run `autogpt` Python module in your terminal. - On linux or mac: - ```bash - # On Linux or Mac: - ./run.sh start - # On Windows: - .\run.bat - ``` - Running with `--help` after `.\run.bat` lists all the possible command line arguments you can pass. - -2. After each action, choose from options to authorize command(s), -exit the program, or provide feedback to the AI. - 1. Authorize a single command, enter `y` - 2. Authorize a series of _N_ continuous commands, enter `y -N` - 3. Exit the program, enter `n` - - -### Logs - -Activity and error logs are located in the `./output/logs` - -To print out debug logs: - -``` -python -m autogpt --debug -``` - -### Docker - -You can also build this into a docker image and run it: - -```bash -docker build -t autogpt . -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt -``` - -Or if you have `docker-compose`: -```bash -docker-compose run --build --rm auto-gpt -``` - -You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: -```bash -docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt --gpt3only --continuous -``` -```bash -docker-compose run --build --rm auto-gpt --gpt3only --continuous -``` - -### Command Line Arguments -Here are some common arguments you can use when running Auto-GPT: -> Replace anything in angled brackets (<>) to a value you want to specify - -* View all available command line arguments - ```bash - python -m autogpt --help - ``` -* Run Auto-GPT with a different AI Settings file - ```bash - python -m autogpt --ai-settings - ``` -* Specify a memory backend - ```bash - python -m autogpt --use-memory - ``` - -> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python -m autogpt --help` for more information - -## 🗣️ Speech Mode - -Use this to use TTS _(Text-to-Speech)_ for Auto-GPT - -```bash -python -m autogpt --speak -``` - -### List of IDs with names from eleven labs, you can use the name or ID: - -- Rachel : 21m00Tcm4TlvDq8ikWAM -- Domi : AZnzlk1XvdvUeBnXmlld -- Bella : EXAVITQu4vr4xnSDxMaL -- Antoni : ErXwobaYiN019PkySvjV -- Elli : MF3mGyEYCl7XYWbV9V6O -- Josh : TxGEqnHWrfWFTfGW9XjX -- Arnold : VR6AewLTigWG4xSOukaG -- Adam : pNInz6obpgDQGcFmaJgB -- Sam : yoZ06aMxZJJ28mfd3POQ - -## 🔍 Google API Keys Configuration - -This section is optional, use the official google api if you are having issues with error 429 when running a google search. -To use the `google_official_search` command, you need to set up your Google API keys in your environment variables. - -1. Go to the [Google Cloud Console](https://console.cloud.google.com/). -2. If you don't already have an account, create one and log in. -3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". Give it a name and click "Create". -4. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard) and click "Enable APIs and Services". Search for "Custom Search API" and click on it, then click "Enable". -5. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page and click "Create Credentials". Choose "API Key". -6. Copy the API key and set it as an environment variable named `GOOGLE_API_KEY` on your machine. See setting up environment variables below. -7. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate) -8. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page and click "Add". -9. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. -10. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below. - -_Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._ - -### Setting up environment variables - -For Windows Users: - -```bash -setx GOOGLE_API_KEY "YOUR_GOOGLE_API_KEY" -setx CUSTOM_SEARCH_ENGINE_ID "YOUR_CUSTOM_SEARCH_ENGINE_ID" -``` - -For macOS and Linux users: - -```bash -export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" -export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" -``` - -## Setting Your Cache Type - -By default, Auto-GPT is going to use LocalCache instead of redis or Pinecone. - -To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want: - -* `local` (default) uses a local JSON cache file -* `pinecone` uses the Pinecone.io account you configured in your ENV settings -* `redis` will use the redis cache that you configured -* `milvus` will use the milvus cache that you configured -* `weaviate` will use the weaviate cache that you configured - -## Memory Backend Setup - -### Redis Setup -> _**CAUTION**_ \ -This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all -1. Install docker (or Docker Desktop on Windows) -2. Launch Redis container - ```bash - docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest - ``` - > See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. -3. Set the following settings in `.env` - > Replace **PASSWORD** in angled brackets (<>) - ```bash - MEMORY_BACKEND=redis - REDIS_HOST=localhost - REDIS_PORT=6379 - REDIS_PASSWORD= - ``` - - You can optionally set `WIPE_REDIS_ON_START=False` to persist memory stored in Redis. - -You can specify the memory index for redis using the following: -```bash -MEMORY_INDEX= -``` - -### 🌲 Pinecone API Key Setup - -Pinecone enables the storage of vast amounts of vector-based memory, allowing for only relevant memories to be loaded for the agent at any given time. - -1. Go to [pinecone](https://app.pinecone.io/) and make an account if you don't already have one. -2. Choose the `Starter` plan to avoid being charged. -3. Find your API key and region under the default project in the left sidebar. - -In the `.env` file set: -- `PINECONE_API_KEY` -- `PINECONE_ENV` (example: _"us-east4-gcp"_) -- `MEMORY_BACKEND=pinecone` - -Alternatively, you can set them from the command line (advanced): - -For Windows Users: - -```bash -setx PINECONE_API_KEY "" -setx PINECONE_ENV "" # e.g: "us-east4-gcp" -setx MEMORY_BACKEND "pinecone" -``` - -For macOS and Linux users: - -```bash -export PINECONE_API_KEY="" -export PINECONE_ENV="" # e.g: "us-east4-gcp" -export MEMORY_BACKEND="pinecone" -``` - -### Milvus Setup - -[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search. - -- setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues. - - setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md) - - or setup by [Zilliz Cloud](https://zilliz.com/cloud) -- set `MILVUS_ADDR` in `.env` to your milvus address `host:ip`. -- set `MEMORY_BACKEND` in `.env` to `milvus` to enable milvus as backend. - -**Optional:** -- set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name. - - -### Weaviate Setup -[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store data objects and vector embeddings from ML-models and scales seamlessly to billion of data objects. [An instance of Weaviate can be created locally (using Docker), on Kubernetes or using Weaviate Cloud Services](https://weaviate.io/developers/weaviate/quickstart). -Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) is supported which allows the Auto-GPT process itself to start a Weaviate instance. To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`. - -#### Install the Weaviate client - -Install the Weaviate client before usage. - -``` -$ pip install weaviate-client -``` - -#### Setting up environment variables - -In your `.env` file set the following: - -``` -MEMORY_BACKEND=weaviate -WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance -WEAVIATE_PORT="8080" -WEAVIATE_PROTOCOL="http" -WEAVIATE_USERNAME="your username" -WEAVIATE_PASSWORD="your password" -WEAVIATE_API_KEY="your weaviate API key if you have one" -WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" # this is optional and indicates where the data should be persisted when running an embedded instance -USE_WEAVIATE_EMBEDDED=False # set to True to run Embedded Weaviate -MEMORY_INDEX="Autogpt" # name of the index to create for the application -``` - -## View Memory Usage - -View memory usage by using the `--debug` flag :) - - -## 🧠 Memory pre-seeding -Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT. - -```bash -# python data_ingestion.py -h -usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] - -Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script. - -options: - -h, --help show this help message and exit - --file FILE The file to ingest. - --dir DIR The directory containing the files to ingest. - --init Init the memory and wipe its content (default: False) - --overlap OVERLAP The overlap size between chunks when ingesting files (default: 200) - --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000) - -# python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000 -``` -In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/autogpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000. - -Note that you can also use the `--file` argument to ingest a single file into memory and that data_ingestion.py will only ingest files within the `/auto_gpt_workspace` directory. - -The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. - -You can adjust the `max_length` and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory: -- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests. -- Reducing the `max_length` value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks. -- Increasing the `max_length` value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI. - -Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT. - -⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file. - -⚠️For other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the `data_ingestion.py` script anytime during an Auto-GPT run. - -Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running. - -## 💀 Continuous Mode ⚠️ - -Run the AI **without** user authorization, 100% automated. -Continuous mode is NOT recommended. -It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorize. -Use at your own risk. - -1. Run the `autogpt` python module in your terminal: - - ```bash - python -m autogpt --speak --continuous - ``` - -2. To exit the program, press Ctrl + C - -## GPT3.5 ONLY Mode - -If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! - -```bash -python -m autogpt --speak --gpt3only -``` - -It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. - -## 🖼 Image Generation - -By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [Hugging Face API Token](https://huggingface.co/settings/tokens) is required. - -Once you have a token, set these variables in your `.env`: - -```bash -IMAGE_PROVIDER=sd -HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" -``` - -## Selenium -```bash -sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 -``` +[docs/setup]: https://docs.agpt.co/setup/ +[docs/usage]: https://docs.agpt.co/usage/ +[docs/plugins]: https://docs.agpt.co/plugins/ ## ⚠️ Limitations @@ -461,7 +126,6 @@ This experiment aims to showcase the potential of GPT-4 but comes with some limi ## 🛡 Disclaimer -Disclaimer This project, Auto-GPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise. The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by Auto-GPT. @@ -486,43 +150,3 @@ We look forward to connecting with you and hearing your thoughts, ideas, and exp Star History Chart

- -## Run tests - -To run all tests, run the following command: - -```bash -pytest - -``` - -To run just without integration tests: - -``` -pytest --without-integration -``` - -To run just without slow integration tests: - -``` -pytest --without-slow-integration -``` - -To run tests and see coverage, run the following command: - -```bash -pytest --cov=autogpt --without-integration --without-slow-integration -``` - -## Run linter - -This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information. - -To run the linter, run the following command: - -```bash -flake8 autogpt/ tests/ - -# Or, if you want to run flake8 with the same configuration as the CI: -flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 -``` diff --git a/autogpt/__init__.py b/autogpt/__init__.py index e69de29b..909f8bf4 100644 --- a/autogpt/__init__.py +++ b/autogpt/__init__.py @@ -0,0 +1,14 @@ +import os +import random +import sys + +from dotenv import load_dotenv + +if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"): + print("Setting random seed to 42") + random.seed(42) + +# Load the users .env file into environment variables +load_dotenv(verbose=True, override=True) + +del load_dotenv diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index ee7885f8..dbae1198 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -1,14 +1,16 @@ from colorama import Fore, Style from autogpt.app import execute_command, get_command -from autogpt.chat import chat_with_ai, create_chat_message from autogpt.config import Config from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques -from autogpt.json_utils.utilities import validate_json +from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json +from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message +from autogpt.llm.token_counter import count_string_tokens from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner from autogpt.utils import clean_input +from autogpt.workspace import Workspace class Agent: @@ -19,18 +21,25 @@ class Agent: memory: The memory object to use. full_message_history: The full message history. next_action_count: The number of actions to execute. - system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. - Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals. + system_prompt: The system prompt is the initial prompt that defines everything + the AI needs to know to achieve its task successfully. + Currently, the dynamic and customizable information in the system prompt are + ai_name, description and goals. - triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: - Determine which next command to use, and respond using the format specified above: - The triggering prompt is not part of the system prompt because between the system prompt and the triggering - prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. + triggering_prompt: The last sentence the AI will see before answering. + For Auto-GPT, this prompt is: + Determine which next command to use, and respond using the format specified + above: + The triggering prompt is not part of the system prompt because between the + system prompt and the triggering + prompt we have contextual information that can distract the AI and make it + forget that its goal is to find the next task to achieve. SYSTEM PROMPT CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) TRIGGERING PROMPT - The triggering prompt reminds the AI about its short term meta task (defining the next task) + The triggering prompt reminds the AI about its short term meta task + (defining the next task) """ def __init__( @@ -39,15 +48,26 @@ class Agent: memory, full_message_history, next_action_count, + command_registry, + config, system_prompt, triggering_prompt, + workspace_directory, ): + cfg = Config() self.ai_name = ai_name self.memory = memory + self.summary_memory = ( + "I was created." # Initial memory necessary to avoid hilucination + ) + self.last_memory_index = 0 self.full_message_history = full_message_history self.next_action_count = next_action_count + self.command_registry = command_registry + self.config = config self.system_prompt = system_prompt self.triggering_prompt = triggering_prompt + self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace) def start_interaction_loop(self): # Interaction Loop @@ -69,10 +89,10 @@ class Agent: "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}" ) break - # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( + self, self.system_prompt, self.triggering_prompt, self.full_message_history, @@ -81,60 +101,92 @@ class Agent: ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply) + for plugin in cfg.plugins: + if not plugin.can_handle_post_planning(): + continue + assistant_reply_json = plugin.post_planning(self, assistant_reply_json) # Print Assistant thoughts if assistant_reply_json != {}: - validate_json(assistant_reply_json, "llm_response_format_1") + validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT) # Get command name and arguments try: - print_assistant_thoughts(self.ai_name, assistant_reply_json) + print_assistant_thoughts( + self.ai_name, assistant_reply_json, cfg.speak_mode + ) command_name, arguments = get_command(assistant_reply_json) - # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"] if cfg.speak_mode: say_text(f"I want to execute {command_name}") + + arguments = self._resolve_pathlike_command_args(arguments) + except Exception as e: logger.error("Error: \n", str(e)) if not cfg.continuous_mode and self.next_action_count == 0: - ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit + self.user_input = "" logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} " f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", ) - print( - "Enter 'y' to authorise command, 'y -N' to run N continuous " - "commands, 'n' to exit program, or enter feedback for " - f"{self.ai_name}...", - flush=True, + + logger.info( + "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands" + "'n' to exit program, or enter feedback for " + f"{self.ai_name}..." ) while True: - console_input = clean_input( - Fore.MAGENTA + "Input:" + Style.RESET_ALL - ) - if console_input.lower().strip() == "y": + if cfg.chat_messages_enabled: + console_input = clean_input("Waiting for your response...") + else: + console_input = clean_input( + Fore.MAGENTA + "Input:" + Style.RESET_ALL + ) + if console_input.lower().strip() == cfg.authorise_key: user_input = "GENERATE NEXT COMMAND JSON" break + elif console_input.lower().strip() == "s": + logger.typewriter_log( + "-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=", + Fore.GREEN, + "", + ) + thoughts = assistant_reply_json.get("thoughts", {}) + self_feedback_resp = self.get_self_feedback( + thoughts, cfg.fast_llm_model + ) + logger.typewriter_log( + f"SELF FEEDBACK: {self_feedback_resp}", + Fore.YELLOW, + "", + ) + if self_feedback_resp[0].lower().strip() == cfg.authorise_key: + user_input = "GENERATE NEXT COMMAND JSON" + else: + user_input = self_feedback_resp + break elif console_input.lower().strip() == "": - print("Invalid input format.") + logger.warn("Invalid input format.") continue - elif console_input.lower().startswith("y -"): + elif console_input.lower().startswith(f"{cfg.authorise_key} -"): try: self.next_action_count = abs( int(console_input.split(" ")[1]) ) user_input = "GENERATE NEXT COMMAND JSON" except ValueError: - print( + logger.warn( "Invalid input format. Please enter 'y -n' where n is" " the number of continuous tasks." ) continue break - elif console_input.lower() == "n": + elif console_input.lower() == cfg.exit_key: user_input = "EXIT" break else: @@ -149,7 +201,7 @@ class Agent: "", ) elif user_input == "EXIT": - print("Exiting...", flush=True) + logger.info("Exiting...") break else: # Print command @@ -168,21 +220,37 @@ class Agent: elif command_name == "human_feedback": result = f"Human feedback: {user_input}" else: - result = ( - f"Command {command_name} returned: " - f"{execute_command(command_name, arguments)}" + for plugin in cfg.plugins: + if not plugin.can_handle_pre_command(): + continue + command_name, arguments = plugin.pre_command( + command_name, arguments + ) + command_result = execute_command( + self.command_registry, + command_name, + arguments, + self.config.prompt_generator, ) + result = f"Command {command_name} returned: " f"{command_result}" + + result_tlength = count_string_tokens( + str(command_result), cfg.fast_llm_model + ) + memory_tlength = count_string_tokens( + str(self.summary_memory), cfg.fast_llm_model + ) + if result_tlength + memory_tlength + 600 > cfg.fast_token_limit: + result = f"Failure: command {command_name} returned too much output. \ + Do not execute this command again with the same arguments." + + for plugin in cfg.plugins: + if not plugin.can_handle_post_command(): + continue + result = plugin.post_command(command_name, result) if self.next_action_count > 0: self.next_action_count -= 1 - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " - f"\nResult: {result} " - f"\nHuman Feedback: {user_input} " - ) - - self.memory.add(memory_to_add) - # Check if there's a result from the command append it to the message # history if result is not None: @@ -195,3 +263,39 @@ class Agent: logger.typewriter_log( "SYSTEM: ", Fore.YELLOW, "Unable to execute command" ) + + def _resolve_pathlike_command_args(self, command_args): + if "directory" in command_args and command_args["directory"] in {"", "/"}: + command_args["directory"] = str(self.workspace.root) + else: + for pathlike in ["filename", "directory", "clone_path"]: + if pathlike in command_args: + command_args[pathlike] = str( + self.workspace.get_path(command_args[pathlike]) + ) + return command_args + + def get_self_feedback(self, thoughts: dict, llm_model: str) -> str: + """Generates a feedback response based on the provided thoughts dictionary. + This method takes in a dictionary of thoughts containing keys such as 'reasoning', + 'plan', 'thoughts', and 'criticism'. It combines these elements into a single + feedback message and uses the create_chat_completion() function to generate a + response based on the input message. + Args: + thoughts (dict): A dictionary containing thought elements like reasoning, + plan, thoughts, and criticism. + Returns: + str: A feedback response generated using the provided thoughts dictionary. + """ + ai_role = self.config.ai_role + + feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution." + reasoning = thoughts.get("reasoning", "") + plan = thoughts.get("plan", "") + thought = thoughts.get("thoughts", "") + criticism = thoughts.get("criticism", "") + feedback_thoughts = thought + reasoning + plan + criticism + return create_chat_completion( + [{"role": "user", "content": feedback_prompt + feedback_thoughts}], + llm_model, + ) diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py index 898767a4..17fb35d8 100644 --- a/autogpt/agent/agent_manager.py +++ b/autogpt/agent/agent_manager.py @@ -1,10 +1,11 @@ """Agent manager for managing GPT agents""" from __future__ import annotations -from typing import Union +from typing import List -from autogpt.config.config import Singleton -from autogpt.llm_utils import create_chat_completion +from autogpt.config.config import Config +from autogpt.llm import Message, create_chat_completion +from autogpt.singleton import Singleton class AgentManager(metaclass=Singleton): @@ -13,6 +14,7 @@ class AgentManager(metaclass=Singleton): def __init__(self): self.next_key = 0 self.agents = {} # key, (task, full_message_history, model) + self.cfg = Config() # Create new GPT agent # TODO: Centralise use of create_chat_completion() to globally enforce token limit @@ -28,19 +30,32 @@ class AgentManager(metaclass=Singleton): Returns: The key of the new agent """ - messages = [ + messages: List[Message] = [ {"role": "user", "content": prompt}, ] - + for plugin in self.cfg.plugins: + if not plugin.can_handle_pre_instruction(): + continue + if plugin_messages := plugin.pre_instruction(messages): + messages.extend(iter(plugin_messages)) # Start GPT instance agent_reply = create_chat_completion( model=model, messages=messages, ) - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) + plugins_reply = "" + for i, plugin in enumerate(self.cfg.plugins): + if not plugin.can_handle_on_instruction(): + continue + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" + plugins_reply = f"{plugins_reply}{sep}{plugin_result}" + + if plugins_reply and plugins_reply != "": + messages.append({"role": "assistant", "content": plugins_reply}) key = self.next_key # This is done instead of len(agents) to make keys unique even if agents # are deleted @@ -48,6 +63,11 @@ class AgentManager(metaclass=Singleton): self.agents[key] = (task, messages, model) + for plugin in self.cfg.plugins: + if not plugin.can_handle_post_instruction(): + continue + agent_reply = plugin.post_instruction(agent_reply) + return key, agent_reply def message_agent(self, key: str | int, message: str) -> str: @@ -65,15 +85,37 @@ class AgentManager(metaclass=Singleton): # Add user message to message history before sending to agent messages.append({"role": "user", "content": message}) + for plugin in self.cfg.plugins: + if not plugin.can_handle_pre_instruction(): + continue + if plugin_messages := plugin.pre_instruction(messages): + for plugin_message in plugin_messages: + messages.append(plugin_message) + # Start GPT instance agent_reply = create_chat_completion( model=model, messages=messages, ) - # Update full message history messages.append({"role": "assistant", "content": agent_reply}) + plugins_reply = agent_reply + for i, plugin in enumerate(self.cfg.plugins): + if not plugin.can_handle_on_instruction(): + continue + if plugin_result := plugin.on_instruction(messages): + sep = "\n" if i else "" + plugins_reply = f"{plugins_reply}{sep}{plugin_result}" + # Update full message history + if plugins_reply and plugins_reply != "": + messages.append({"role": "assistant", "content": plugins_reply}) + + for plugin in self.cfg.plugins: + if not plugin.can_handle_post_instruction(): + continue + agent_reply = plugin.post_instruction(agent_reply) + return agent_reply def list_agents(self) -> list[tuple[str | int, str]]: @@ -86,7 +128,7 @@ class AgentManager(metaclass=Singleton): # Return a list of agent keys and their tasks return [(key, task) for key, (task, _, _) in self.agents.items()] - def delete_agent(self, key: Union[str, int]) -> bool: + def delete_agent(self, key: str | int) -> bool: """Delete an agent from the agent manager Args: diff --git a/autogpt/app.py b/autogpt/app.py index 58d9f716..150bfca3 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -3,34 +3,15 @@ import json from typing import Dict, List, NoReturn, Union from autogpt.agent.agent_manager import AgentManager -from autogpt.commands.analyze_code import analyze_code -from autogpt.commands.audio_text import read_audio_from_file -from autogpt.commands.execute_code import ( - execute_python_file, - execute_shell, - execute_shell_popen, -) -from autogpt.commands.file_operations import ( - append_to_file, - delete_file, - download_file, - read_file, - search_files, - write_to_file, -) -from autogpt.commands.git_operations import clone_repository -from autogpt.commands.google_search import google_official_search, google_search -from autogpt.commands.image_gen import generate_image -from autogpt.commands.improve_code import improve_code -from autogpt.commands.twitter import send_tweet +from autogpt.commands.command import CommandRegistry, command from autogpt.commands.web_requests import scrape_links, scrape_text -from autogpt.commands.web_selenium import browse_website -from autogpt.commands.write_tests import write_tests from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_and_parse_json +from autogpt.logs import logger from autogpt.memory import get_memory from autogpt.processing.text import summarize_text +from autogpt.prompts.generator import PromptGenerator from autogpt.speech import say_text +from autogpt.url_utils.validators import validate_url CFG = Config() AGENT_MANAGER = AgentManager() @@ -108,7 +89,12 @@ def map_command_synonyms(command_name: str): return command_name -def execute_command(command_name: str, arguments): +def execute_command( + command_registry: CommandRegistry, + command_name: str, + arguments, + prompt: PromptGenerator, +): """Execute the command and return the result Args: @@ -119,105 +105,30 @@ def execute_command(command_name: str, arguments): str: The result of the command """ try: + cmd = command_registry.commands.get(command_name) + + # If the command is found, call it with the provided arguments + if cmd: + return cmd(**arguments) + + # TODO: Remove commands below after they are moved to the command registry. command_name = map_command_synonyms(command_name.lower()) - if command_name == "google": - # Check if the Google API key is set and use the official search method - # If the API key is not set or has only whitespaces, use the unofficial - # search method - key = CFG.google_api_key - if key and key.strip() and key != "your-google-api-key": - google_result = google_official_search(arguments["input"]) - return google_result - else: - google_result = google_search(arguments["input"]) - # google_result can be a list or a string depending on the search results - if isinstance(google_result, list): - safe_message = [ - google_result_single.encode("utf-8", "ignore") - for google_result_single in google_result - ] - else: - safe_message = google_result.encode("utf-8", "ignore") + if command_name == "memory_add": + return get_memory(CFG).add(arguments["string"]) - return safe_message.decode("utf-8") - elif command_name == "memory_add": - memory = get_memory(CFG) - return memory.add(arguments["string"]) - elif command_name == "start_agent": - return start_agent( - arguments["name"], arguments["task"], arguments["prompt"] - ) - elif command_name == "message_agent": - return message_agent(arguments["key"], arguments["message"]) - elif command_name == "list_agents": - return list_agents() - elif command_name == "delete_agent": - return delete_agent(arguments["key"]) - elif command_name == "get_text_summary": - return get_text_summary(arguments["url"], arguments["question"]) - elif command_name == "get_hyperlinks": - return get_hyperlinks(arguments["url"]) - elif command_name == "clone_repository": - return clone_repository( - arguments["repository_url"], arguments["clone_path"] - ) - elif command_name == "read_file": - return read_file(arguments["file"]) - elif command_name == "write_to_file": - return write_to_file(arguments["file"], arguments["text"]) - elif command_name == "append_to_file": - return append_to_file(arguments["file"], arguments["text"]) - elif command_name == "delete_file": - return delete_file(arguments["file"]) - elif command_name == "search_files": - return search_files(arguments["directory"]) - elif command_name == "download_file": - if not CFG.allow_downloads: - return "Error: You do not have user authorization to download files locally." - return download_file(arguments["url"], arguments["file"]) - elif command_name == "browse_website": - return browse_website(arguments["url"], arguments["question"]) # TODO: Change these to take in a file rather than pasted code, if # non-file is given, return instructions "Input should be a python - # filepath, write your code to file and try again" - elif command_name == "analyze_code": - return analyze_code(arguments["code"]) - elif command_name == "improve_code": - return improve_code(arguments["suggestions"], arguments["code"]) - elif command_name == "write_tests": - return write_tests(arguments["code"], arguments.get("focus")) - elif command_name == "execute_python_file": # Add this command - return execute_python_file(arguments["file"]) - elif command_name == "execute_shell": - if CFG.execute_local_commands: - return execute_shell(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "execute_shell_popen": - if CFG.execute_local_commands: - return execute_shell_popen(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "read_audio_from_file": - return read_audio_from_file(arguments["file"]) - elif command_name == "generate_image": - return generate_image(arguments["prompt"]) - elif command_name == "send_tweet": - return send_tweet(arguments["text"]) - elif command_name == "do_nothing": - return "No action performed." + # filepath, write your code to file and try again elif command_name == "task_complete": shutdown() else: + for command in prompt.commands: + if ( + command_name == command["label"].lower() + or command_name == command["name"].lower() + ): + return command["function"](**arguments) return ( f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" " list for available commands and only respond in the specified JSON" @@ -227,6 +138,10 @@ def execute_command(command_name: str, arguments): return f"Error: {str(e)}" +@command( + "get_text_summary", "Get text summary", '"url": "", "question": ""' +) +@validate_url def get_text_summary(url: str, question: str) -> str: """Return the results of a Google search @@ -242,6 +157,8 @@ def get_text_summary(url: str, question: str) -> str: return f""" "Result" : {summary}""" +@command("get_hyperlinks", "Get text summary", '"url": ""') +@validate_url def get_hyperlinks(url: str) -> Union[str, List[str]]: """Return the results of a Google search @@ -256,10 +173,15 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]: def shutdown() -> NoReturn: """Shut down the program""" - print("Shutting down...") + logger.info("Shutting down...") quit() +@command( + "start_agent", + "Start GPT Agent", + '"name": "", "task": "", "prompt": ""', +) def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: """Start an agent with a given name, task, and prompt @@ -292,6 +214,7 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> return f"Agent {name} created with key {key}. First response: {agent_response}" +@command("message_agent", "Message GPT Agent", '"key": "", "message": ""') def message_agent(key: str, message: str) -> str: """Message an agent with a given key and message""" # Check if the key is a valid integer @@ -306,7 +229,8 @@ def message_agent(key: str, message: str) -> str: return agent_response -def list_agents(): +@command("list_agents", "List GPT Agents", "") +def list_agents() -> str: """List all agents Returns: @@ -317,6 +241,7 @@ def list_agents(): ) +@command("delete_agent", "Delete GPT Agent", '"key": ""') def delete_agent(key: str) -> str: """Delete an agent with a given key diff --git a/autogpt/cli.py b/autogpt/cli.py index 6fe9ecbb..d9294516 100644 --- a/autogpt/cli.py +++ b/autogpt/cli.py @@ -47,6 +47,19 @@ import click is_flag=True, help="Specifies whether to suppress the output of latest news on startup.", ) +@click.option( + # TODO: this is a hidden option for now, necessary for integration testing. + # We should make this public once we're ready to roll out agent specific workspaces. + "--workspace-directory", + "-w", + type=click.Path(), + hidden=True, +) +@click.option( + "--install-plugin-deps", + is_flag=True, + help="Installs external dependencies for 3rd party plugins.", +) @click.pass_context def main( ctx: click.Context, @@ -62,6 +75,8 @@ def main( browser_name: str, allow_downloads: bool, skip_news: bool, + workspace_directory: str, + install_plugin_deps: bool, ) -> None: """ Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI. @@ -69,24 +84,10 @@ def main( Start an Auto-GPT assistant. """ # Put imports inside function to avoid importing everything when starting the CLI - import logging - import sys - - from colorama import Fore - - from autogpt.agent.agent import Agent - from autogpt.config import Config, check_openai_api_key - from autogpt.configurator import create_config - from autogpt.logs import logger - from autogpt.memory import get_memory - from autogpt.prompt import construct_prompt - from autogpt.utils import get_current_git_branch, get_latest_bulletin + from autogpt.main import run_auto_gpt if ctx.invoked_subcommand is None: - cfg = Config() - # TODO: fill in llm values here - check_openai_api_key() - create_config( + run_auto_gpt( continuous, continuous_limit, ai_settings, @@ -99,56 +100,9 @@ def main( browser_name, allow_downloads, skip_news, + workspace_directory, + install_plugin_deps, ) - logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) - ai_name = "" - if not cfg.skip_news: - motd = get_latest_bulletin() - if motd: - logger.typewriter_log("NEWS: ", Fore.GREEN, motd) - git_branch = get_current_git_branch() - if git_branch and git_branch != "stable": - logger.typewriter_log( - "WARNING: ", - Fore.RED, - f"You are running on `{git_branch}` branch " - "- this is not a supported branch.", - ) - if sys.version_info < (3, 10): - logger.typewriter_log( - "WARNING: ", - Fore.RED, - "You are running on an older version of Python. " - "Some people have observed problems with certain " - "parts of Auto-GPT with this version. " - "Please consider upgrading to Python 3.10 or higher.", - ) - system_prompt = construct_prompt() - # print(prompt) - # Initialize variables - full_message_history = [] - next_action_count = 0 - # Make a constant: - triggering_prompt = ( - "Determine which next command to use, and respond using the" - " format specified above:" - ) - # Initialize memory and make sure it is empty. - # this is particularly important for indexing and referencing pinecone memory - memory = get_memory(cfg, init=True) - logger.typewriter_log( - "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" - ) - logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) - agent = Agent( - ai_name=ai_name, - memory=memory, - full_message_history=full_message_history, - next_action_count=next_action_count, - system_prompt=system_prompt, - triggering_prompt=triggering_prompt, - ) - agent.start_interaction_loop() if __name__ == "__main__": diff --git a/autogpt/commands/analyze_code.py b/autogpt/commands/analyze_code.py index e02ea4c5..cff8a737 100644 --- a/autogpt/commands/analyze_code.py +++ b/autogpt/commands/analyze_code.py @@ -1,9 +1,15 @@ """Code evaluation module.""" from __future__ import annotations -from autogpt.llm_utils import call_ai_function +from autogpt.commands.command import command +from autogpt.llm import call_ai_function +@command( + "analyze_code", + "Analyze Code", + '"code": ""', +) def analyze_code(code: str) -> list[str]: """ A function that takes in a string and returns a response from create chat @@ -16,10 +22,10 @@ def analyze_code(code: str) -> list[str]: improve the code. """ - function_string = "def analyze_code(code: str) -> List[str]:" + function_string = "def analyze_code(code: str) -> list[str]:" args = [code] description_string = ( - "Analyzes the given code and returns a list of suggestions" " for improvements." + "Analyzes the given code and returns a list of suggestions for improvements." ) return call_ai_function(function_string, args, description_string) diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py index cae32d4e..0a8640cf 100644 --- a/autogpt/commands/audio_text.py +++ b/autogpt/commands/audio_text.py @@ -1,24 +1,49 @@ +"""Commands for converting audio to text.""" import json import requests +from autogpt.commands.command import command from autogpt.config import Config -from autogpt.workspace import path_in_workspace -cfg = Config() +CFG = Config() -def read_audio_from_file(audio_path): - audio_path = path_in_workspace(audio_path) - with open(audio_path, "rb") as audio_file: +@command( + "read_audio_from_file", + "Convert Audio to text", + '"filename": ""', + CFG.huggingface_audio_to_text_model, + "Configure huggingface_audio_to_text_model.", +) +def read_audio_from_file(filename: str) -> str: + """ + Convert audio to text. + + Args: + filename (str): The path to the audio file + + Returns: + str: The text from the audio + """ + with open(filename, "rb") as audio_file: audio = audio_file.read() return read_audio(audio) -def read_audio(audio): - model = cfg.huggingface_audio_to_text_model +def read_audio(audio: bytes) -> str: + """ + Convert audio to text. + + Args: + audio (bytes): The audio to convert + + Returns: + str: The text from the audio + """ + model = CFG.huggingface_audio_to_text_model api_url = f"https://api-inference.huggingface.co/models/{model}" - api_token = cfg.huggingface_api_token + api_token = CFG.huggingface_api_token headers = {"Authorization": f"Bearer {api_token}"} if api_token is None: @@ -33,4 +58,4 @@ def read_audio(audio): ) text = json.loads(response.content.decode("utf-8"))["text"] - return "The audio says: " + text + return f"The audio says: {text}" diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py new file mode 100644 index 00000000..22ebace5 --- /dev/null +++ b/autogpt/commands/command.py @@ -0,0 +1,156 @@ +import functools +import importlib +import inspect +from typing import Any, Callable, Optional + +# Unique identifier for auto-gpt commands +AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command" + + +class Command: + """A class representing a command. + + Attributes: + name (str): The name of the command. + description (str): A brief description of what the command does. + signature (str): The signature of the function that the command executes. Defaults to None. + """ + + def __init__( + self, + name: str, + description: str, + method: Callable[..., Any], + signature: str = "", + enabled: bool = True, + disabled_reason: Optional[str] = None, + ): + self.name = name + self.description = description + self.method = method + self.signature = signature if signature else str(inspect.signature(self.method)) + self.enabled = enabled + self.disabled_reason = disabled_reason + + def __call__(self, *args, **kwargs) -> Any: + if not self.enabled: + return f"Command '{self.name}' is disabled: {self.disabled_reason}" + return self.method(*args, **kwargs) + + def __str__(self) -> str: + return f"{self.name}: {self.description}, args: {self.signature}" + + +class CommandRegistry: + """ + The CommandRegistry class is a manager for a collection of Command objects. + It allows the registration, modification, and retrieval of Command objects, + as well as the scanning and loading of command plugins from a specified + directory. + """ + + def __init__(self): + self.commands = {} + + def _import_module(self, module_name: str) -> Any: + return importlib.import_module(module_name) + + def _reload_module(self, module: Any) -> Any: + return importlib.reload(module) + + def register(self, cmd: Command) -> None: + self.commands[cmd.name] = cmd + + def unregister(self, command_name: str): + if command_name in self.commands: + del self.commands[command_name] + else: + raise KeyError(f"Command '{command_name}' not found in registry.") + + def reload_commands(self) -> None: + """Reloads all loaded command plugins.""" + for cmd_name in self.commands: + cmd = self.commands[cmd_name] + module = self._import_module(cmd.__module__) + reloaded_module = self._reload_module(module) + if hasattr(reloaded_module, "register"): + reloaded_module.register(self) + + def get_command(self, name: str) -> Callable[..., Any]: + return self.commands[name] + + def call(self, command_name: str, **kwargs) -> Any: + if command_name not in self.commands: + raise KeyError(f"Command '{command_name}' not found in registry.") + command = self.commands[command_name] + return command(**kwargs) + + def command_prompt(self) -> str: + """ + Returns a string representation of all registered `Command` objects for use in a prompt + """ + commands_list = [ + f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values()) + ] + return "\n".join(commands_list) + + def import_commands(self, module_name: str) -> None: + """ + Imports the specified Python module containing command plugins. + + This method imports the associated module and registers any functions or + classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute + as `Command` objects. The registered `Command` objects are then added to the + `commands` dictionary of the `CommandRegistry` object. + + Args: + module_name (str): The name of the module to import for command plugins. + """ + + module = importlib.import_module(module_name) + + for attr_name in dir(module): + attr = getattr(module, attr_name) + # Register decorated functions + if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr( + attr, AUTO_GPT_COMMAND_IDENTIFIER + ): + self.register(attr.command) + # Register command classes + elif ( + inspect.isclass(attr) and issubclass(attr, Command) and attr != Command + ): + cmd_instance = attr() + self.register(cmd_instance) + + +def command( + name: str, + description: str, + signature: str = "", + enabled: bool = True, + disabled_reason: Optional[str] = None, +) -> Callable[..., Any]: + """The command decorator is used to create Command objects from ordinary functions.""" + + def decorator(func: Callable[..., Any]) -> Command: + cmd = Command( + name=name, + description=description, + method=func, + signature=signature, + enabled=enabled, + disabled_reason=disabled_reason, + ) + + @functools.wraps(func) + def wrapper(*args, **kwargs) -> Any: + return func(*args, **kwargs) + + wrapper.command = cmd + + setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True) + + return wrapper + + return decorator diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 11266f85..6b141dba 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -1,36 +1,39 @@ """Execute code in a Docker container""" import os import subprocess +from pathlib import Path import docker from docker.errors import ImageNotFound -from autogpt.workspace import WORKSPACE_PATH, path_in_workspace +from autogpt.commands.command import command +from autogpt.config import Config +from autogpt.logs import logger + +CFG = Config() -def execute_python_file(file: str) -> str: +@command("execute_python_file", "Execute Python File", '"filename": ""') +def execute_python_file(filename: str) -> str: """Execute a Python file in a Docker container and return the output Args: - file (str): The name of the file to execute + filename (str): The name of the file to execute Returns: str: The output of the file """ + logger.info(f"Executing file '{filename}'") - print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'") - - if not file.endswith(".py"): + if not filename.endswith(".py"): return "Error: Invalid file type. Only .py files are allowed." - file_path = path_in_workspace(file) - - if not os.path.isfile(file_path): - return f"Error: File '{file}' does not exist." + if not os.path.isfile(filename): + return f"Error: File '{filename}' does not exist." if we_are_running_in_a_docker_container(): result = subprocess.run( - f"python {file_path}", capture_output=True, encoding="utf8", shell=True + f"python {filename}", capture_output=True, encoding="utf8", shell=True ) if result.returncode == 0: return result.stdout @@ -39,16 +42,17 @@ def execute_python_file(file: str) -> str: try: client = docker.from_env() - # You can replace this with the desired Python image/version # You can find available Python images on Docker Hub: # https://hub.docker.com/_/python image_name = "python:3-alpine" try: client.images.get(image_name) - print(f"Image '{image_name}' found locally") + logger.warn(f"Image '{image_name}' found locally") except ImageNotFound: - print(f"Image '{image_name}' not found locally, pulling from Docker Hub") + logger.info( + f"Image '{image_name}' not found locally, pulling from Docker Hub" + ) # Use the low-level API to stream the pull response low_level_client = docker.APIClient() for line in low_level_client.pull(image_name, stream=True, decode=True): @@ -56,15 +60,14 @@ def execute_python_file(file: str) -> str: status = line.get("status") progress = line.get("progress") if status and progress: - print(f"{status}: {progress}") + logger.info(f"{status}: {progress}") elif status: - print(status) - + logger.info(status) container = client.containers.run( image_name, - f"python {file}", + f"python {Path(filename).relative_to(CFG.workspace_path)}", volumes={ - os.path.abspath(WORKSPACE_PATH): { + CFG.workspace_path: { "bind": "/workspace", "mode": "ro", } @@ -85,7 +88,7 @@ def execute_python_file(file: str) -> str: return logs except docker.errors.DockerException as e: - print( + logger.warn( "Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/" ) return f"Error: {str(e)}" @@ -94,6 +97,15 @@ def execute_python_file(file: str) -> str: return f"Error: {str(e)}" +@command( + "execute_shell", + "Execute Shell Command, non-interactive commands only", + '"command_line": ""', + CFG.execute_local_commands, + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction.", +) def execute_shell(command_line: str) -> str: """Execute a shell command and return the output @@ -103,12 +115,15 @@ def execute_shell(command_line: str) -> str: Returns: str: The output of the command """ - current_dir = os.getcwd() - # Change dir into workspace if necessary - if str(WORKSPACE_PATH) not in current_dir: - os.chdir(WORKSPACE_PATH) - print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") + current_dir = Path.cwd() + # Change dir into workspace if necessary + if not current_dir.is_relative_to(CFG.workspace_path): + os.chdir(CFG.workspace_path) + + logger.info( + f"Executing command '{command_line}' in working directory '{os.getcwd()}'" + ) result = subprocess.run(command_line, capture_output=True, shell=True) output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" @@ -116,10 +131,18 @@ def execute_shell(command_line: str) -> str: # Change back to whatever the prior working dir was os.chdir(current_dir) - return output +@command( + "execute_shell_popen", + "Execute Shell Command, non-interactive commands only", + '"command_line": ""', + CFG.execute_local_commands, + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction.", +) def execute_shell_popen(command_line) -> str: """Execute a shell command with Popen and returns an english description of the event and the process id @@ -130,12 +153,15 @@ def execute_shell_popen(command_line) -> str: Returns: str: Description of the fact that the process started and its id """ + current_dir = os.getcwd() # Change dir into workspace if necessary - if str(WORKSPACE_PATH) not in current_dir: - os.chdir(WORKSPACE_PATH) + if CFG.workspace_path not in current_dir: + os.chdir(CFG.workspace_path) - print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'") + logger.info( + f"Executing command '{command_line}' in working directory '{os.getcwd()}'" + ) do_not_show_output = subprocess.DEVNULL process = subprocess.Popen( diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index ad145ec9..28198eba 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -1,52 +1,114 @@ """File operations for AutoGPT""" from __future__ import annotations +import hashlib import os import os.path -from typing import Generator +from typing import Dict, Generator, Literal, Tuple +import charset_normalizer import requests from colorama import Back, Fore from requests.adapters import HTTPAdapter, Retry +from autogpt.commands.command import command +from autogpt.config import Config +from autogpt.logs import logger from autogpt.spinner import Spinner from autogpt.utils import readable_file_size -from autogpt.workspace import WORKSPACE_PATH, path_in_workspace -LOG_FILE = "file_logger.txt" -LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE +CFG = Config() + +Operation = Literal["write", "append", "delete"] -def check_duplicate_operation(operation: str, filename: str) -> bool: - """Check if the operation has already been performed on the given file +def text_checksum(text: str) -> str: + """Get the hex checksum for the given text.""" + return hashlib.md5(text.encode("utf-8")).hexdigest() - Args: - operation (str): The operation to check for - filename (str): The name of the file to check for + +def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str | None]]: + """Parse the file operations log and return a tuple containing the log entries""" + try: + log = open(log_path, "r", encoding="utf-8") + except FileNotFoundError: + return + + for line in log: + line = line.replace("File Operation Logger", "").strip() + if not line: + continue + operation, tail = line.split(": ", maxsplit=1) + operation = operation.strip() + if operation in ("write", "append"): + try: + path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1)) + except ValueError: + path, checksum = tail.strip(), None + yield (operation, path, checksum) + elif operation == "delete": + yield (operation, tail.strip(), None) + + log.close() + + +def file_operations_state(log_path: str) -> Dict: + """Iterates over the operations log and returns the expected state. + + Parses a log file at CFG.file_logger_path to construct a dictionary that maps + each file path written or appended to its checksum. Deleted files are removed + from the dictionary. Returns: - bool: True if the operation has already been performed on the file + A dictionary mapping file paths to their checksums. + + Raises: + FileNotFoundError: If CFG.file_logger_path is not found. + ValueError: If the log file content is not in the expected format. """ - log_content = read_file(LOG_FILE) - log_entry = f"{operation}: {filename}\n" - return log_entry in log_content + state = {} + for operation, path, checksum in operations_from_log(log_path): + if operation in ("write", "append"): + state[path] = checksum + elif operation == "delete": + del state[path] + return state -def log_operation(operation: str, filename: str) -> None: +def is_duplicate_operation( + operation: Operation, filename: str, checksum: str | None = None +) -> bool: + """Check if the operation has already been performed + + Args: + operation: The operation to check for + filename: The name of the file to check for + checksum: The checksum of the contents to be written + + Returns: + True if the operation has already been performed on the file + """ + state = file_operations_state(CFG.file_logger_path) + if operation == "delete" and filename not in state: + return True + if operation == "write" and state.get(filename) == checksum: + return True + return False + + +def log_operation(operation: str, filename: str, checksum: str | None = None) -> None: """Log the file operation to the file_logger.txt Args: - operation (str): The operation to log - filename (str): The name of the file the operation was performed on + operation: The operation to log + filename: The name of the file the operation was performed on + checksum: The checksum of the contents to be written """ - log_entry = f"{operation}: {filename}\n" - - # Create the log file if it doesn't exist - if not os.path.exists(LOG_FILE_PATH): - with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: - f.write("File Operation Logger ") - - append_to_file(LOG_FILE, log_entry, shouldLog=False) + log_entry = f"{operation}: {filename}" + if checksum is not None: + log_entry += f" #{checksum}" + logger.debug(f"Logging file operation: {log_entry}") + append_to_file(CFG.file_logger_path, f"{log_entry}\n", should_log=False) def split_file( @@ -81,6 +143,7 @@ def split_file( start += max_length - overlap +@command("read_file", "Read file", '"filename": ""') def read_file(filename: str) -> str: """Read a file and return the contents @@ -91,12 +154,12 @@ def read_file(filename: str) -> str: str: The contents of the file """ try: - filepath = path_in_workspace(filename) - with open(filepath, "r", encoding="utf-8") as f: - content = f.read() - return content - except Exception as e: - return f"Error: {str(e)}" + charset_match = charset_normalizer.from_path(filename).best() + encoding = charset_match.encoding + logger.debug(f"Read file '{filename}' with encoding '{encoding}'") + return str(charset_match) + except Exception as err: + return f"Error: {err}" def ingest_file( @@ -112,27 +175,28 @@ def ingest_file( :param overlap: The number of overlapping characters between chunks, default is 200 """ try: - print(f"Working with file {filename}") + logger.info(f"Working with file {filename}") content = read_file(filename) content_length = len(content) - print(f"File length: {content_length} characters") + logger.info(f"File length: {content_length} characters") chunks = list(split_file(content, max_length=max_length, overlap=overlap)) num_chunks = len(chunks) for i, chunk in enumerate(chunks): - print(f"Ingesting chunk {i + 1} / {num_chunks} into memory") + logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory") memory_to_add = ( f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}" ) memory.add(memory_to_add) - print(f"Done ingesting {num_chunks} chunks from {filename}.") - except Exception as e: - print(f"Error while ingesting file '{filename}': {str(e)}") + logger.info(f"Done ingesting {num_chunks} chunks from {filename}.") + except Exception as err: + logger.info(f"Error while ingesting file '{filename}': {err}") +@command("write_to_file", "Write to file", '"filename": "", "text": ""') def write_to_file(filename: str, text: str) -> str: """Write text to a file @@ -143,44 +207,51 @@ def write_to_file(filename: str, text: str) -> str: Returns: str: A message indicating success or failure """ - if check_duplicate_operation("write", filename): + checksum = text_checksum(text) + if is_duplicate_operation("write", filename, checksum): return "Error: File has already been updated." try: - filepath = path_in_workspace(filename) - directory = os.path.dirname(filepath) - if not os.path.exists(directory): - os.makedirs(directory) - with open(filepath, "w", encoding="utf-8") as f: + directory = os.path.dirname(filename) + os.makedirs(directory, exist_ok=True) + with open(filename, "w", encoding="utf-8") as f: f.write(text) - log_operation("write", filename) + log_operation("write", filename, checksum) return "File written to successfully." - except Exception as e: - return f"Error: {str(e)}" + except Exception as err: + return f"Error: {err}" -def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: +@command( + "append_to_file", "Append to file", '"filename": "", "text": ""' +) +def append_to_file(filename: str, text: str, should_log: bool = True) -> str: """Append text to a file Args: filename (str): The name of the file to append to text (str): The text to append to the file + should_log (bool): Should log output Returns: str: A message indicating success or failure """ try: - filepath = path_in_workspace(filename) - with open(filepath, "a") as f: + directory = os.path.dirname(filename) + os.makedirs(directory, exist_ok=True) + with open(filename, "a", encoding="utf-8") as f: f.write(text) - if shouldLog: - log_operation("append", filename) + if should_log: + with open(filename, "r", encoding="utf-8") as f: + checksum = text_checksum(f.read()) + log_operation("append", filename, checksum=checksum) return "Text appended successfully." - except Exception as e: - return f"Error: {str(e)}" + except Exception as err: + return f"Error: {err}" +@command("delete_file", "Delete file", '"filename": ""') def delete_file(filename: str) -> str: """Delete a file @@ -190,19 +261,19 @@ def delete_file(filename: str) -> str: Returns: str: A message indicating success or failure """ - if check_duplicate_operation("delete", filename): + if is_duplicate_operation("delete", filename): return "Error: File has already been deleted." try: - filepath = path_in_workspace(filename) - os.remove(filepath) + os.remove(filename) log_operation("delete", filename) return "File deleted successfully." - except Exception as e: - return f"Error: {str(e)}" + except Exception as err: + return f"Error: {err}" -def search_files(directory: str) -> list[str]: - """Search for files in a directory +@command("list_files", "List Files in Directory", '"directory": ""') +def list_files(directory: str) -> list[str]: + """lists files in a directory recursively Args: directory (str): The directory to search in @@ -212,29 +283,34 @@ def search_files(directory: str) -> list[str]: """ found_files = [] - if directory in {"", "/"}: - search_directory = WORKSPACE_PATH - else: - search_directory = path_in_workspace(directory) - - for root, _, files in os.walk(search_directory): + for root, _, files in os.walk(directory): for file in files: if file.startswith("."): continue - relative_path = os.path.relpath(os.path.join(root, file), WORKSPACE_PATH) + relative_path = os.path.relpath( + os.path.join(root, file), CFG.workspace_path + ) found_files.append(relative_path) return found_files +@command( + "download_file", + "Download File", + '"url": "", "filename": ""', + CFG.allow_downloads, + "Error: You do not have user authorization to download files locally.", +) def download_file(url, filename): """Downloads a file Args: url (str): URL of the file to download filename (str): Filename to save the file as """ - safe_filename = path_in_workspace(filename) try: + directory = os.path.dirname(filename) + os.makedirs(directory, exist_ok=True) message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}" with Spinner(message) as spinner: session = requests.Session() @@ -251,7 +327,7 @@ def download_file(url, filename): total_size = int(r.headers.get("Content-Length", 0)) downloaded_size = 0 - with open(safe_filename, "wb") as f: + with open(filename, "wb") as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) downloaded_size += len(chunk) @@ -260,8 +336,8 @@ def download_file(url, filename): progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}" spinner.update_message(f"{message} {progress}") - return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})' - except requests.HTTPError as e: - return f"Got an HTTP Error whilst trying to download file: {e}" - except Exception as e: - return "Error: " + str(e) + return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})' + except requests.HTTPError as err: + return f"Got an HTTP Error whilst trying to download file: {err}" + except Exception as err: + return f"Error: {err}" diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 028f3b8d..22233108 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -1,26 +1,35 @@ """Git operations for autogpt""" -import git +from git.repo import Repo +from autogpt.commands.command import command from autogpt.config import Config -from autogpt.workspace import path_in_workspace +from autogpt.url_utils.validators import validate_url CFG = Config() -def clone_repository(repo_url: str, clone_path: str) -> str: - """Clone a GitHub repository locally +@command( + "clone_repository", + "Clone Repository", + '"url": "", "clone_path": ""', + CFG.github_username and CFG.github_api_key, + "Configure github_username and github_api_key.", +) +@validate_url +def clone_repository(url: str, clone_path: str) -> str: + """Clone a GitHub repository locally. Args: - repo_url (str): The URL of the repository to clone - clone_path (str): The path to clone the repository to + url (str): The URL of the repository to clone. + clone_path (str): The path to clone the repository to. Returns: - str: The result of the clone operation""" - split_url = repo_url.split("//") + str: The result of the clone operation. + """ + split_url = url.split("//") auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) - safe_clone_path = path_in_workspace(clone_path) try: - git.Repo.clone_from(auth_repo_url, safe_clone_path) - return f"""Cloned {repo_url} to {safe_clone_path}""" + Repo.clone_from(url=auth_repo_url, to_path=clone_path) + return f"""Cloned {url} to {clone_path}""" except Exception as e: return f"Error: {str(e)}" diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py index 7d38ce75..264daaff 100644 --- a/autogpt/commands/google_search.py +++ b/autogpt/commands/google_search.py @@ -5,11 +5,13 @@ import json from duckduckgo_search import ddg +from autogpt.commands.command import command from autogpt.config import Config CFG = Config() +@command("google", "Google Search", '"query": ""', not CFG.google_api_key) def google_search(query: str, num_results: int = 8) -> str: """Return the results of a Google search @@ -31,9 +33,17 @@ def google_search(query: str, num_results: int = 8) -> str: for j in results: search_results.append(j) - return json.dumps(search_results, ensure_ascii=False, indent=4) + results = json.dumps(search_results, ensure_ascii=False, indent=4) + return safe_google_results(results) +@command( + "google", + "Google Search", + '"query": ""', + bool(CFG.google_api_key), + "Configure google_api_key.", +) def google_official_search(query: str, num_results: int = 8) -> str | list[str]: """Return the results of a Google search using the official Google API @@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]: return "Error: The provided Google API key is invalid or missing." else: return f"Error: {e}" + # google_result can be a list or a string depending on the search results # Return the list of search result URLs - return search_results_links + return safe_google_results(search_results_links) + + +def safe_google_results(results: str | list) -> str: + """ + Return the results of a google search in a safe format. + + Args: + results (str | list): The search results. + + Returns: + str: The results of the search. + """ + if isinstance(results, list): + safe_message = json.dumps( + [result.encode("utf-8", "ignore") for result in results] + ) + else: + safe_message = results.encode("utf-8", "ignore").decode("utf-8") + return safe_message diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index 0809fcdd..fbed067f 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -1,6 +1,5 @@ """ Image Generation Module for AutoGPT.""" import io -import os.path import uuid from base64 import b64decode @@ -8,12 +7,14 @@ import openai import requests from PIL import Image +from autogpt.commands.command import command from autogpt.config import Config -from autogpt.workspace import path_in_workspace +from autogpt.logs import logger CFG = Config() +@command("generate_image", "Generate Image", '"prompt": ""', CFG.image_provider) def generate_image(prompt: str, size: int = 256) -> str: """Generate an image from a prompt. @@ -24,7 +25,7 @@ def generate_image(prompt: str, size: int = 256) -> str: Returns: str: The filename of the image """ - filename = f"{str(uuid.uuid4())}.jpg" + filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg" # DALL-E if CFG.image_provider == "dalle": @@ -69,29 +70,29 @@ def generate_image_with_hf(prompt: str, filename: str) -> str: ) image = Image.open(io.BytesIO(response.content)) - print(f"Image Generated for prompt:{prompt}") + logger.info(f"Image Generated for prompt:{prompt}") - image.save(path_in_workspace(filename)) + image.save(filename) return f"Saved to disk:{filename}" -def generate_image_with_dalle(prompt: str, filename: str) -> str: +def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str: """Generate an image with DALL-E. Args: prompt (str): The prompt to use filename (str): The filename to save the image to + size (int): The size of the image Returns: str: The filename of the image """ - openai.api_key = CFG.openai_api_key # Check for supported image sizes if size not in [256, 512, 1024]: closest = min([256, 512, 1024], key=lambda x: abs(x - size)) - print( + logger.info( f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}." ) size = closest @@ -101,13 +102,14 @@ def generate_image_with_dalle(prompt: str, filename: str) -> str: n=1, size=f"{size}x{size}", response_format="b64_json", + api_key=CFG.openai_api_key, ) - print(f"Image Generated for prompt:{prompt}") + logger.info(f"Image Generated for prompt:{prompt}") image_data = b64decode(response["data"][0]["b64_json"]) - with open(path_in_workspace(filename), mode="wb") as png: + with open(filename, mode="wb") as png: png.write(image_data) return f"Saved to disk:{filename}" @@ -152,12 +154,12 @@ def generate_image_with_sd_webui( }, ) - print(f"Image Generated for prompt:{prompt}") + logger.info(f"Image Generated for prompt:{prompt}") # Save the image to disk response = response.json() b64 = b64decode(response["images"][0].split(",", 1)[0]) image = Image.open(io.BytesIO(b64)) - image.save(path_in_workspace(filename)) + image.save(filename) return f"Saved to disk:{filename}" diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py index e3440d8b..ff4cf48c 100644 --- a/autogpt/commands/improve_code.py +++ b/autogpt/commands/improve_code.py @@ -2,23 +2,29 @@ from __future__ import annotations import json -from autogpt.llm_utils import call_ai_function +from autogpt.commands.command import command +from autogpt.llm import call_ai_function +@command( + "improve_code", + "Get Improved Code", + '"suggestions": "", "code": ""', +) def improve_code(suggestions: list[str], code: str) -> str: """ A function that takes in code and suggestions and returns a response from create chat completion api call. Parameters: - suggestions (List): A list of suggestions around what needs to be improved. + suggestions (list): A list of suggestions around what needs to be improved. code (str): Code to be improved. Returns: A result string from create chat completion. Improved code in response. """ function_string = ( - "def generate_improved_code(suggestions: List[str], code: str) -> str:" + "def generate_improved_code(suggestions: list[str], code: str) -> str:" ) args = [json.dumps(suggestions), code] description_string = ( diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py index 3eaed36e..baf0876d 100644 --- a/autogpt/commands/twitter.py +++ b/autogpt/commands/twitter.py @@ -1,12 +1,27 @@ +"""A module that contains a command to send a tweet.""" import os import tweepy -from dotenv import load_dotenv -load_dotenv() +from autogpt.commands.command import command -def send_tweet(tweet_text): +@command( + "send_tweet", + "Send Tweet", + '"tweet_text": ""', +) +def send_tweet(tweet_text: str) -> str: + """ + A function that takes in a string and returns a response from create chat + completion api call. + + Args: + tweet_text (str): Text to be tweeted. + + Returns: + A result from sending the tweet. + """ consumer_key = os.environ.get("TW_CONSUMER_KEY") consumer_secret = os.environ.get("TW_CONSUMER_SECRET") access_token = os.environ.get("TW_ACCESS_TOKEN") @@ -21,6 +36,6 @@ def send_tweet(tweet_text): # Send tweet try: api.update_status(tweet_text) - print("Tweet sent successfully!") + return "Tweet sent successfully!" except tweepy.TweepyException as e: - print("Error sending tweet: {}".format(e.reason)) + return f"Error sending tweet: {e.reason}" diff --git a/autogpt/commands/web_playwright.py b/autogpt/commands/web_playwright.py index 4e388ded..70f19dee 100644 --- a/autogpt/commands/web_playwright.py +++ b/autogpt/commands/web_playwright.py @@ -1,10 +1,12 @@ """Web scraping commands using Playwright""" from __future__ import annotations +from autogpt.logs import logger + try: from playwright.sync_api import sync_playwright except ImportError: - print( + logger.info( "Playwright not installed. Please install it with 'pip install playwright' to use." ) from bs4 import BeautifulSoup diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py index 406338f4..60e54252 100644 --- a/autogpt/commands/web_requests.py +++ b/autogpt/commands/web_requests.py @@ -1,89 +1,21 @@ """Browse a webpage and summarize it using the LLM model""" from __future__ import annotations -from urllib.parse import urljoin, urlparse - import requests from bs4 import BeautifulSoup from requests import Response -from requests.compat import urljoin from autogpt.config import Config -from autogpt.memory import get_memory from autogpt.processing.html import extract_hyperlinks, format_hyperlinks +from autogpt.url_utils.validators import validate_url CFG = Config() -memory = get_memory(CFG) session = requests.Session() session.headers.update({"User-Agent": CFG.user_agent}) -def is_valid_url(url: str) -> bool: - """Check if the URL is valid - - Args: - url (str): The URL to check - - Returns: - bool: True if the URL is valid, False otherwise - """ - try: - result = urlparse(url) - return all([result.scheme, result.netloc]) - except ValueError: - return False - - -def sanitize_url(url: str) -> str: - """Sanitize the URL - - Args: - url (str): The URL to sanitize - - Returns: - str: The sanitized URL - """ - return urljoin(url, urlparse(url).path) - - -def check_local_file_access(url: str) -> bool: - """Check if the URL is a local file - - Args: - url (str): The URL to check - - Returns: - bool: True if the URL is a local file, False otherwise - """ - local_prefixes = [ - "file:///", - "file://localhost/", - "file://localhost", - "http://localhost", - "http://localhost/", - "https://localhost", - "https://localhost/", - "http://2130706433", - "http://2130706433/", - "https://2130706433", - "https://2130706433/", - "http://127.0.0.1/", - "http://127.0.0.1", - "https://127.0.0.1/", - "https://127.0.0.1", - "https://0.0.0.0/", - "https://0.0.0.0", - "http://0.0.0.0/", - "http://0.0.0.0", - "http://0000", - "http://0000/", - "https://0000", - "https://0000/", - ] - return any(url.startswith(prefix) for prefix in local_prefixes) - - +@validate_url def get_response( url: str, timeout: int = 10 ) -> tuple[None, str] | tuple[Response, None]: @@ -101,17 +33,7 @@ def get_response( requests.exceptions.RequestException: If the HTTP request fails """ try: - # Restrict access to local files - if check_local_file_access(url): - raise ValueError("Access to local files is restricted") - - # Most basic check if the URL is valid: - if not url.startswith("http://") and not url.startswith("https://"): - raise ValueError("Invalid URL format") - - sanitized_url = sanitize_url(url) - - response = session.get(sanitized_url, timeout=timeout) + response = session.get(url, timeout=timeout) # Check if the response contains an HTTP error if response.status_code >= 400: diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 11bdfeb1..0f5764b5 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -7,6 +7,7 @@ from sys import platform from bs4 import BeautifulSoup from selenium import webdriver +from selenium.common.exceptions import WebDriverException from selenium.webdriver.chrome.options import Options as ChromeOptions from selenium.webdriver.common.by import By from selenium.webdriver.firefox.options import Options as FirefoxOptions @@ -18,13 +19,21 @@ from webdriver_manager.chrome import ChromeDriverManager from webdriver_manager.firefox import GeckoDriverManager import autogpt.processing.text as summary +from autogpt.commands.command import command from autogpt.config import Config from autogpt.processing.html import extract_hyperlinks, format_hyperlinks +from autogpt.url_utils.validators import validate_url FILE_DIR = Path(__file__).parent.parent CFG = Config() +@command( + "browse_website", + "Browse Website", + '"url": "", "question": ""', +) +@validate_url def browse_website(url: str, question: str) -> tuple[str, WebDriver]: """Browse a website and return the answer and links to the user @@ -35,7 +44,14 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]: Returns: Tuple[str, WebDriver]: The answer and links to the user and the webdriver """ - driver, text = scrape_text_with_selenium(url) + try: + driver, text = scrape_text_with_selenium(url) + except WebDriverException as e: + # These errors are often quite long and include lots of context. + # Just grab the first line. + msg = e.msg.split("\n")[0] + return f"Error: {msg}", None + add_header(driver) summary_text = summary.summarize_text(url, text, question, driver) links = scrape_links_with_selenium(driver, url) @@ -70,6 +86,9 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: ) if CFG.selenium_web_browser == "firefox": + if CFG.selenium_headless: + options.headless = True + options.add_argument("--disable-gpu") driver = webdriver.Firefox( executable_path=GeckoDriverManager().install(), options=options ) @@ -84,11 +103,16 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: options.add_argument("--no-sandbox") if CFG.selenium_headless: - options.add_argument("--headless") + options.add_argument("--headless=new") options.add_argument("--disable-gpu") + chromium_driver_path = Path("/usr/bin/chromedriver") + driver = webdriver.Chrome( - executable_path=ChromeDriverManager().install(), options=options + executable_path=chromium_driver_path + if chromium_driver_path.exists() + else ChromeDriverManager().install(), + options=options, ) driver.get(url) @@ -151,4 +175,9 @@ def add_header(driver: WebDriver) -> None: Returns: None """ - driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read()) + try: + with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file: + overlay_script = overlay_file.read() + driver.execute_script(overlay_script) + except Exception as e: + print(f"Error executing overlay.js: {e}") diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py index 35a08653..c6a03324 100644 --- a/autogpt/commands/write_tests.py +++ b/autogpt/commands/write_tests.py @@ -3,9 +3,15 @@ from __future__ import annotations import json -from autogpt.llm_utils import call_ai_function +from autogpt.commands.command import command +from autogpt.llm import call_ai_function +@command( + "write_tests", + "Write Tests", + '"code": "", "focus": ""', +) def write_tests(code: str, focus: list[str]) -> str: """ A function that takes in code and focus topics and returns a response from create diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py index 726b6dcf..9bdd98e2 100644 --- a/autogpt/config/__init__.py +++ b/autogpt/config/__init__.py @@ -3,12 +3,9 @@ This module contains the configuration classes for AutoGPT. """ from autogpt.config.ai_config import AIConfig from autogpt.config.config import Config, check_openai_api_key -from autogpt.config.singleton import AbstractSingleton, Singleton __all__ = [ "check_openai_api_key", - "AbstractSingleton", "AIConfig", "Config", - "Singleton", ] diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index d50c30be..88acbfe6 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -5,10 +5,18 @@ A module that contains the AIConfig class object that contains the configuration from __future__ import annotations import os -from typing import Type +import platform +from pathlib import Path +from typing import Any, Optional, Type +import distro import yaml +from autogpt.prompts.generator import PromptGenerator + +# Soon this will go in a folder where it remembers more stuff about the run(s) +SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml") + class AIConfig: """ @@ -18,10 +26,15 @@ class AIConfig: ai_name (str): The name of the AI. ai_role (str): The description of the AI's role. ai_goals (list): The list of objectives the AI is supposed to complete. + api_budget (float): The maximum dollar value for API calls (0.0 means infinite) """ def __init__( - self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None + self, + ai_name: str = "", + ai_role: str = "", + ai_goals: list | None = None, + api_budget: float = 0.0, ) -> None: """ Initialize a class instance @@ -30,6 +43,7 @@ class AIConfig: ai_name (str): The name of the AI. ai_role (str): The description of the AI's role. ai_goals (list): The list of objectives the AI is supposed to complete. + api_budget (float): The maximum dollar value for API calls (0.0 means infinite) Returns: None """ @@ -38,14 +52,14 @@ class AIConfig: self.ai_name = ai_name self.ai_role = ai_role self.ai_goals = ai_goals - - # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml") + self.api_budget = api_budget + self.prompt_generator = None + self.command_registry = None @staticmethod def load(config_file: str = SAVE_FILE) -> "AIConfig": """ - Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from + Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from yaml file if yaml file exists, else returns class with no parameters. @@ -65,9 +79,15 @@ class AIConfig: ai_name = config_params.get("ai_name", "") ai_role = config_params.get("ai_role", "") - ai_goals = config_params.get("ai_goals", []) + ai_goals = [ + str(goal).strip("{}").replace("'", "").replace('"', "") + if isinstance(goal, dict) + else str(goal) + for goal in config_params.get("ai_goals", []) + ] + api_budget = config_params.get("api_budget", 0.0) # type: Type[AIConfig] - return AIConfig(ai_name, ai_role, ai_goals) + return AIConfig(ai_name, ai_role, ai_goals, api_budget) def save(self, config_file: str = SAVE_FILE) -> None: """ @@ -85,11 +105,14 @@ class AIConfig: "ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals, + "api_budget": self.api_budget, } with open(config_file, "w", encoding="utf-8") as file: yaml.dump(config, file, allow_unicode=True) - def construct_full_prompt(self) -> str: + def construct_full_prompt( + self, prompt_generator: Optional[PromptGenerator] = None + ) -> str: """ Returns a prompt to the user with the class information in an organized fashion. @@ -98,7 +121,7 @@ class AIConfig: Returns: full_prompt (str): A string containing the initial prompt for the user - including the ai_name, ai_role and ai_goals. + including the ai_name, ai_role, ai_goals, and api_budget. """ prompt_start = ( @@ -108,14 +131,38 @@ class AIConfig: "" ) - from autogpt.prompt import get_prompt + from autogpt.config import Config + from autogpt.prompts.prompt import build_default_prompt_generator + + cfg = Config() + if prompt_generator is None: + prompt_generator = build_default_prompt_generator() + prompt_generator.goals = self.ai_goals + prompt_generator.name = self.ai_name + prompt_generator.role = self.ai_role + prompt_generator.command_registry = self.command_registry + for plugin in cfg.plugins: + if not plugin.can_handle_post_prompt(): + continue + prompt_generator = plugin.post_prompt(prompt_generator) + + if cfg.execute_local_commands: + # add OS info to prompt + os_name = platform.system() + os_info = ( + platform.platform(terse=True) + if os_name != "Linux" + else distro.name(pretty=True) + ) + + prompt_start += f"\nThe OS you are running on is: {os_info}" # Construct full prompt - full_prompt = ( - f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" - ) + full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n" for i, goal in enumerate(self.ai_goals): full_prompt += f"{i+1}. {goal}\n" - - full_prompt += f"\n\n{get_prompt()}" + if self.api_budget > 0.0: + full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}" + self.prompt_generator = prompt_generator + full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}" return full_prompt diff --git a/autogpt/config/config.py b/autogpt/config/config.py index c284a4ac..324d284c 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -1,14 +1,13 @@ """Configuration class to store the state of bools for different scripts access.""" import os +from typing import List import openai import yaml +from auto_gpt_plugin_template import AutoGPTPluginTemplate from colorama import Fore -from dotenv import load_dotenv -from autogpt.config.singleton import Singleton - -load_dotenv(verbose=True) +from autogpt.singleton import Singleton class Config(metaclass=Singleton): @@ -18,6 +17,9 @@ class Config(metaclass=Singleton): def __init__(self) -> None: """Initialize the Config class""" + self.workspace_path = None + self.file_logger_path = None + self.debug_mode = False self.continuous_mode = False self.continuous_limit = 0 @@ -26,11 +28,16 @@ class Config(metaclass=Singleton): self.allow_downloads = False self.skip_news = False + self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y") + self.exit_key = os.getenv("EXIT_KEY", "n") self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) + self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002") + self.embedding_tokenizer = os.getenv("EMBEDDING_TOKENIZER", "cl100k_base") + self.embedding_token_limit = int(os.getenv("EMBEDDING_TOKEN_LIMIT", 8191)) self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000)) self.browse_spacy_language_model = os.getenv( "BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm" @@ -59,6 +66,8 @@ class Config(metaclass=Singleton): self.use_mac_os_tts = False self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") + self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True" + self.use_brian_tts = False self.use_brian_tts = os.getenv("USE_BRIAN_TTS") @@ -83,9 +92,12 @@ class Config(metaclass=Singleton): os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" ) - # milvus configuration, e.g., localhost:19530. + # milvus or zilliz cloud configuration. self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530") + self.milvus_username = os.getenv("MILVUS_USERNAME") + self.milvus_password = os.getenv("MILVUS_PASSWORD") self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt") + self.milvus_secure = os.getenv("MILVUS_SECURE") == "True" self.image_provider = os.getenv("IMAGE_PROVIDER") self.image_size = int(os.getenv("IMAGE_SIZE", 256)) @@ -120,8 +132,17 @@ class Config(metaclass=Singleton): # Note that indexes must be created on db 0 in redis, this is not configurable. self.memory_backend = os.getenv("MEMORY_BACKEND", "local") - # Initialize the OpenAI API client - openai.api_key = self.openai_api_key + + self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins") + self.plugins: List[AutoGPTPluginTemplate] = [] + self.plugins_openai = [] + + plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS") + if plugins_allowlist: + self.plugins_allowlist = plugins_allowlist.split(",") + else: + self.plugins_allowlist = [] + self.plugins_denylist = [] def get_azure_deployment_id_for_model(self, model: str) -> str: """ @@ -161,11 +182,8 @@ class Config(metaclass=Singleton): Returns: None """ - try: - with open(config_file) as file: - config_params = yaml.load(file, Loader=yaml.FullLoader) - except FileNotFoundError: - config_params = {} + with open(config_file) as file: + config_params = yaml.load(file, Loader=yaml.FullLoader) self.openai_api_type = config_params.get("azure_api_type") or "azure" self.openai_api_base = config_params.get("azure_api_base") or "" self.openai_api_version = ( @@ -201,6 +219,18 @@ class Config(metaclass=Singleton): """Set the smart token limit value.""" self.smart_token_limit = value + def set_embedding_model(self, value: str) -> None: + """Set the model to use for creating embeddings.""" + self.embedding_model = value + + def set_embedding_tokenizer(self, value: str) -> None: + """Set the tokenizer to use when creating embeddings.""" + self.embedding_tokenizer = value + + def set_embedding_token_limit(self, value: int) -> None: + """Set the token limit for creating embeddings.""" + self.embedding_token_limit = value + def set_browse_chunk_max_length(self, value: int) -> None: """Set the browse_website command chunk max length value.""" self.browse_chunk_max_length = value @@ -241,6 +271,18 @@ class Config(metaclass=Singleton): """Set the debug mode value.""" self.debug_mode = value + def set_plugins(self, value: list) -> None: + """Set the plugins value.""" + self.plugins = value + + def set_temperature(self, value: int) -> None: + """Set the temperature value.""" + self.temperature = value + + def set_memory_backend(self, name: str) -> None: + """Set the memory backend name.""" + self.memory_backend = name + def check_openai_api_key() -> None: """Check if the OpenAI API key is set in config.py or as an environment variable.""" @@ -249,6 +291,7 @@ def check_openai_api_key() -> None: print( Fore.RED + "Please set your OpenAI API key in .env or as an environment variable." + + Fore.RESET ) print("You can get your key from https://platform.openai.com/account/api-keys") exit(1) diff --git a/autogpt/configurator.py b/autogpt/configurator.py index 1dc3be12..84000e57 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -112,6 +112,9 @@ def create_config( CFG.ai_settings_file = file CFG.skip_reprompt = True + if browser_name: + CFG.selenium_web_browser = browser_name + if allow_downloads: logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") logger.typewriter_log( @@ -129,6 +132,3 @@ def create_config( if skip_news: CFG.skip_news = True - - if browser_name: - CFG.selenium_web_browser = browser_name diff --git a/autogpt/json_utils/json_fix_general.py b/autogpt/json_utils/json_fix_general.py index 7010fa3b..e485aca1 100644 --- a/autogpt/json_utils/json_fix_general.py +++ b/autogpt/json_utils/json_fix_general.py @@ -9,6 +9,7 @@ from typing import Optional from autogpt.config import Config from autogpt.json_utils.utilities import extract_char_position +from autogpt.logs import logger CFG = Config() @@ -33,8 +34,7 @@ def fix_invalid_escape(json_to_load: str, error_message: str) -> str: json.loads(json_to_load) return json_to_load except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - fix invalid escape", e) + logger.debug("json loads error - fix invalid escape", e) error_message = str(e) return json_to_load @@ -98,13 +98,11 @@ def correct_json(json_to_load: str) -> str: """ try: - if CFG.debug_mode: - print("json", json_to_load) + logger.debug("json", json_to_load) json.loads(json_to_load) return json_to_load except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error", e) + logger.debug("json loads error", e) error_message = str(e) if error_message.startswith("Invalid \\escape"): json_to_load = fix_invalid_escape(json_to_load, error_message) @@ -116,8 +114,7 @@ def correct_json(json_to_load: str) -> str: json.loads(json_to_load) return json_to_load except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - add quotes", e) + logger.debug("json loads error - add quotes", e) error_message = str(e) if balanced_str := balance_braces(json_to_load): return balanced_str diff --git a/autogpt/json_utils/json_fix_llm.py b/autogpt/json_utils/json_fix_llm.py index 869aed12..73f76648 100644 --- a/autogpt/json_utils/json_fix_llm.py +++ b/autogpt/json_utils/json_fix_llm.py @@ -11,7 +11,7 @@ from regex import regex from autogpt.config import Config from autogpt.json_utils.json_fix_general import correct_json -from autogpt.llm_utils import call_ai_function +from autogpt.llm import call_ai_function from autogpt.logs import logger from autogpt.speech import say_text @@ -91,14 +91,33 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: Returns: str: The fixed JSON string. """ + assistant_reply = assistant_reply.strip() + if assistant_reply.startswith("```json"): + assistant_reply = assistant_reply[7:] + if assistant_reply.endswith("```"): + assistant_reply = assistant_reply[:-3] + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass + + if assistant_reply.startswith("json "): + assistant_reply = assistant_reply[5:] + assistant_reply = assistant_reply.strip() + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass # Parse and print Assistant response assistant_reply_json = fix_and_parse_json(assistant_reply) + logger.debug("Assistant reply JSON: %s", str(assistant_reply_json)) if assistant_reply_json == {}: assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( assistant_reply ) + logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json)) if assistant_reply_json != {}: return assistant_reply_json diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py index 388cad3c..933de8e9 100644 --- a/autogpt/json_utils/utilities.py +++ b/autogpt/json_utils/utilities.py @@ -9,6 +9,7 @@ from autogpt.config import Config from autogpt.logs import logger CFG = Config() +LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1" def extract_char_position(error_message: str) -> int: @@ -29,15 +30,14 @@ def extract_char_position(error_message: str) -> int: raise ValueError("Character position not found in the error message.") -def validate_json(json_object: object, schema_name: object) -> object: +def validate_json(json_object: object, schema_name: str) -> dict | None: """ :type schema_name: object - :param schema_name: + :param schema_name: str :type json_object: object """ - script_dir = os.path.dirname(__file__) - - with open(os.path.join(script_dir, f"{schema_name}.json"), "r") as f: + scheme_file = os.path.join(os.path.dirname(__file__), f"{schema_name}.json") + with open(scheme_file, "r") as f: schema = json.load(f) validator = Draft7Validator(schema) @@ -51,7 +51,31 @@ def validate_json(json_object: object, schema_name: object) -> object: for error in errors: logger.error(f"Error: {error.message}") - elif CFG.debug_mode: - print("The JSON object is valid.") + else: + logger.debug("The JSON object is valid.") return json_object + + +def validate_json_string(json_string: str, schema_name: str) -> dict | None: + """ + :type schema_name: object + :param schema_name: str + :type json_object: object + """ + + try: + json_loaded = json.loads(json_string) + return validate_json(json_loaded, schema_name) + except: + return None + + +def is_string_valid_json(json_string: str, schema_name: str) -> bool: + """ + :type schema_name: object + :param schema_name: str + :type json_object: object + """ + + return validate_json_string(json_string, schema_name) is not None diff --git a/autogpt/llm/__init__.py b/autogpt/llm/__init__.py new file mode 100644 index 00000000..76eaf3b7 --- /dev/null +++ b/autogpt/llm/__init__.py @@ -0,0 +1,40 @@ +from autogpt.llm.api_manager import ApiManager +from autogpt.llm.base import ( + ChatModelInfo, + ChatModelResponse, + EmbeddingModelInfo, + EmbeddingModelResponse, + LLMResponse, + Message, + ModelInfo, +) +from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context +from autogpt.llm.llm_utils import ( + call_ai_function, + chunked_tokens, + create_chat_completion, + get_ada_embedding, +) +from autogpt.llm.modelsinfo import COSTS +from autogpt.llm.token_counter import count_message_tokens, count_string_tokens + +__all__ = [ + "ApiManager", + "Message", + "ModelInfo", + "ChatModelInfo", + "EmbeddingModelInfo", + "LLMResponse", + "ChatModelResponse", + "EmbeddingModelResponse", + "create_chat_message", + "generate_context", + "chat_with_ai", + "call_ai_function", + "create_chat_completion", + "get_ada_embedding", + "chunked_tokens", + "COSTS", + "count_message_tokens", + "count_string_tokens", +] diff --git a/autogpt/llm/api_manager.py b/autogpt/llm/api_manager.py new file mode 100644 index 00000000..9143389e --- /dev/null +++ b/autogpt/llm/api_manager.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +import openai + +from autogpt.config import Config +from autogpt.llm.modelsinfo import COSTS +from autogpt.logs import logger +from autogpt.singleton import Singleton + + +class ApiManager(metaclass=Singleton): + def __init__(self): + self.total_prompt_tokens = 0 + self.total_completion_tokens = 0 + self.total_cost = 0 + self.total_budget = 0 + + def reset(self): + self.total_prompt_tokens = 0 + self.total_completion_tokens = 0 + self.total_cost = 0 + self.total_budget = 0.0 + + def create_chat_completion( + self, + messages: list, # type: ignore + model: str | None = None, + temperature: float = None, + max_tokens: int | None = None, + deployment_id=None, + ) -> str: + """ + Create a chat completion and update the cost. + Args: + messages (list): The list of messages to send to the API. + model (str): The model to use for the API call. + temperature (float): The temperature to use for the API call. + max_tokens (int): The maximum number of tokens for the API call. + Returns: + str: The AI's response. + """ + cfg = Config() + if temperature is None: + temperature = cfg.temperature + if deployment_id is not None: + response = openai.ChatCompletion.create( + deployment_id=deployment_id, + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + api_key=cfg.openai_api_key, + ) + else: + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + api_key=cfg.openai_api_key, + ) + logger.debug(f"Response: {response}") + prompt_tokens = response.usage.prompt_tokens + completion_tokens = response.usage.completion_tokens + self.update_cost(prompt_tokens, completion_tokens, model) + return response + + def update_cost(self, prompt_tokens, completion_tokens, model): + """ + Update the total cost, prompt tokens, and completion tokens. + + Args: + prompt_tokens (int): The number of tokens used in the prompt. + completion_tokens (int): The number of tokens used in the completion. + model (str): The model used for the API call. + """ + self.total_prompt_tokens += prompt_tokens + self.total_completion_tokens += completion_tokens + self.total_cost += ( + prompt_tokens * COSTS[model]["prompt"] + + completion_tokens * COSTS[model]["completion"] + ) / 1000 + logger.debug(f"Total running cost: ${self.total_cost:.3f}") + + def set_total_budget(self, total_budget): + """ + Sets the total user-defined budget for API calls. + + Args: + total_budget (float): The total budget for API calls. + """ + self.total_budget = total_budget + + def get_total_prompt_tokens(self): + """ + Get the total number of prompt tokens. + + Returns: + int: The total number of prompt tokens. + """ + return self.total_prompt_tokens + + def get_total_completion_tokens(self): + """ + Get the total number of completion tokens. + + Returns: + int: The total number of completion tokens. + """ + return self.total_completion_tokens + + def get_total_cost(self): + """ + Get the total cost of API calls. + + Returns: + float: The total cost of API calls. + """ + return self.total_cost + + def get_total_budget(self): + """ + Get the total user-defined budget for API calls. + + Returns: + float: The total budget for API calls. + """ + return self.total_budget diff --git a/autogpt/llm/base.py b/autogpt/llm/base.py new file mode 100644 index 00000000..722e0f0f --- /dev/null +++ b/autogpt/llm/base.py @@ -0,0 +1,65 @@ +from dataclasses import dataclass, field +from typing import List, TypedDict + + +class Message(TypedDict): + """OpenAI Message object containing a role and the message content""" + + role: str + content: str + + +@dataclass +class ModelInfo: + """Struct for model information. + + Would be lovely to eventually get this directly from APIs, but needs to be scraped from + websites for now. + + """ + + name: str + prompt_token_cost: float + completion_token_cost: float + max_tokens: int + + +@dataclass +class ChatModelInfo(ModelInfo): + """Struct for chat model information.""" + + pass + + +@dataclass +class EmbeddingModelInfo(ModelInfo): + """Struct for embedding model information.""" + + embedding_dimensions: int + + +@dataclass +class LLMResponse: + """Standard response struct for a response from an LLM model.""" + + model_info: ModelInfo + prompt_tokens_used: int = 0 + completion_tokens_used: int = 0 + + +@dataclass +class EmbeddingModelResponse(LLMResponse): + """Standard response struct for a response from an embedding model.""" + + embedding: List[float] = field(default_factory=list) + + def __post_init__(self): + if self.completion_tokens_used: + raise ValueError("Embeddings should not have completion tokens used.") + + +@dataclass +class ChatModelResponse(LLMResponse): + """Standard response struct for a response from an LLM model.""" + + content: str = None diff --git a/autogpt/chat.py b/autogpt/llm/chat.py similarity index 51% rename from autogpt/chat.py rename to autogpt/llm/chat.py index 1f6bca96..b4e6b1a4 100644 --- a/autogpt/chat.py +++ b/autogpt/llm/chat.py @@ -1,16 +1,26 @@ import time +from random import shuffle from openai.error import RateLimitError -from autogpt import token_counter from autogpt.config import Config -from autogpt.llm_utils import create_chat_completion +from autogpt.llm.api_manager import ApiManager +from autogpt.llm.base import Message +from autogpt.llm.llm_utils import create_chat_completion +from autogpt.llm.token_counter import count_message_tokens from autogpt.logs import logger +from autogpt.memory_management.store_memory import ( + save_memory_trimmed_from_context_window, +) +from autogpt.memory_management.summary_memory import ( + get_newly_trimmed_messages, + update_running_summary, +) cfg = Config() -def create_chat_message(role, content): +def create_chat_message(role, content) -> Message: """ Create a chat message with the given role and content. @@ -30,17 +40,17 @@ def generate_context(prompt, relevant_memory, full_message_history, model): create_chat_message( "system", f"The current time and date is {time.strftime('%c')}" ), - create_chat_message( - "system", - f"This reminds you of these events from your past:\n{relevant_memory}\n\n", - ), + # create_chat_message( + # "system", + # f"This reminds you of these events from your past:\n{relevant_memory}\n\n", + # ), ] # Add messages from the full message history until we reach the token limit next_message_to_add_index = len(full_message_history) - 1 insertion_index = len(current_context) # Count the currently used tokens - current_tokens_used = token_counter.count_message_tokens(current_context, model) + current_tokens_used = count_message_tokens(current_context, model) return ( next_message_to_add_index, current_tokens_used, @@ -51,7 +61,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model): # TODO: Change debug from hardcode to argument def chat_with_ai( - prompt, user_input, full_message_history, permanent_memory, token_limit + agent, prompt, user_input, full_message_history, permanent_memory, token_limit ): """Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.""" @@ -75,16 +85,21 @@ def chat_with_ai( """ model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - logger.debug(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 - relevant_memory = ( - "" - if len(full_message_history) == 0 - else permanent_memory.get_relevant(str(full_message_history[-9:]), 10) - ) - + # if len(full_message_history) == 0: + # relevant_memory = "" + # else: + # recent_history = full_message_history[-5:] + # shuffle(recent_history) + # relevant_memories = permanent_memory.get_relevant( + # str(recent_history), 5 + # ) + # if relevant_memories: + # shuffle(relevant_memories) + # relevant_memory = str(relevant_memories) + relevant_memory = "" logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") ( @@ -94,30 +109,36 @@ def chat_with_ai( current_context, ) = generate_context(prompt, relevant_memory, full_message_history, model) - while current_tokens_used > 2500: - # remove memories until we are under 2500 tokens - relevant_memory = relevant_memory[:-1] - ( - next_message_to_add_index, - current_tokens_used, - insertion_index, - current_context, - ) = generate_context( - prompt, relevant_memory, full_message_history, model - ) + # while current_tokens_used > 2500: + # # remove memories until we are under 2500 tokens + # relevant_memory = relevant_memory[:-1] + # ( + # next_message_to_add_index, + # current_tokens_used, + # insertion_index, + # current_context, + # ) = generate_context( + # prompt, relevant_memory, full_message_history, model + # ) - current_tokens_used += token_counter.count_message_tokens( + current_tokens_used += count_message_tokens( [create_chat_message("user", user_input)], model ) # Account for user input (appended later) + current_tokens_used += 500 # Account for memory (appended later) TODO: The final memory may be less than 500 tokens + + # Add Messages until the token limit is reached or there are no more messages to add. while next_message_to_add_index >= 0: # print (f"CURRENT TOKENS USED: {current_tokens_used}") message_to_add = full_message_history[next_message_to_add_index] - tokens_to_add = token_counter.count_message_tokens( - [message_to_add], model - ) + tokens_to_add = count_message_tokens([message_to_add], model) if current_tokens_used + tokens_to_add > send_token_limit: + # save_memory_trimmed_from_context_window( + # full_message_history, + # next_message_to_add_index, + # permanent_memory, + # ) break # Add the most recent message to the start of the current context, @@ -132,9 +153,66 @@ def chat_with_ai( # Move to the next most recent message in the full message history next_message_to_add_index -= 1 + # Insert Memories + if len(full_message_history) > 0: + ( + newly_trimmed_messages, + agent.last_memory_index, + ) = get_newly_trimmed_messages( + full_message_history=full_message_history, + current_context=current_context, + last_memory_index=agent.last_memory_index, + ) + agent.summary_memory = update_running_summary( + current_memory=agent.summary_memory, + new_events=newly_trimmed_messages, + ) + current_context.insert(insertion_index, agent.summary_memory) + + api_manager = ApiManager() + # inform the AI about its remaining budget (if it has one) + if api_manager.get_total_budget() > 0.0: + remaining_budget = ( + api_manager.get_total_budget() - api_manager.get_total_cost() + ) + if remaining_budget < 0: + remaining_budget = 0 + system_message = ( + f"Your remaining API budget is ${remaining_budget:.3f}" + + ( + " BUDGET EXCEEDED! SHUT DOWN!\n\n" + if remaining_budget == 0 + else " Budget very nearly exceeded! Shut down gracefully!\n\n" + if remaining_budget < 0.005 + else " Budget nearly exceeded. Finish up.\n\n" + if remaining_budget < 0.01 + else "\n\n" + ) + ) + logger.debug(system_message) + current_context.append(create_chat_message("system", system_message)) + # Append user input, the length of this is accounted for above current_context.extend([create_chat_message("user", user_input)]) + plugin_count = len(cfg.plugins) + for i, plugin in enumerate(cfg.plugins): + if not plugin.can_handle_on_planning(): + continue + plugin_response = plugin.on_planning( + agent.prompt_generator, current_context + ) + if not plugin_response or plugin_response == "": + continue + tokens_to_add = count_message_tokens( + [create_chat_message("system", plugin_response)], model + ) + if current_tokens_used + tokens_to_add > send_token_limit: + logger.debug("Plugin response too long, skipping:", plugin_response) + logger.debug("Plugins remaining at stop:", plugin_count - i) + break + current_context.append(create_chat_message("system", plugin_response)) + # Calculate remaining tokens tokens_remaining = token_limit - current_tokens_used # assert tokens_remaining >= 0, "Tokens remaining is negative. @@ -171,5 +249,5 @@ def chat_with_ai( return assistant_reply except RateLimitError: # TODO: When we switch to langchain, this is built in - print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") + logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") time.sleep(10) diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py new file mode 100644 index 00000000..a77bccbc --- /dev/null +++ b/autogpt/llm/llm_utils.py @@ -0,0 +1,295 @@ +from __future__ import annotations + +import functools +import time +from itertools import islice +from typing import List, Optional + +import numpy as np +import openai +import tiktoken +from colorama import Fore, Style +from openai.error import APIError, RateLimitError, Timeout + +from autogpt.config import Config +from autogpt.llm.api_manager import ApiManager +from autogpt.llm.base import Message +from autogpt.logs import logger + + +def retry_openai_api( + num_retries: int = 10, + backoff_base: float = 2.0, + warn_user: bool = True, +): + """Retry an OpenAI API call. + + Args: + num_retries int: Number of retries. Defaults to 10. + backoff_base float: Base for exponential backoff. Defaults to 2. + warn_user bool: Whether to warn the user. Defaults to True. + """ + retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}" + api_key_error_msg = ( + f"Please double check that you have setup a " + f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can " + f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}" + ) + backoff_msg = ( + f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}" + ) + + def _wrapper(func): + @functools.wraps(func) + def _wrapped(*args, **kwargs): + user_warned = not warn_user + num_attempts = num_retries + 1 # +1 for the first attempt + for attempt in range(1, num_attempts + 1): + try: + return func(*args, **kwargs) + + except RateLimitError: + if attempt == num_attempts: + raise + + logger.debug(retry_limit_msg) + if not user_warned: + logger.double_check(api_key_error_msg) + user_warned = True + + except APIError as e: + if (e.http_status != 502) or (attempt == num_attempts): + raise + + backoff = backoff_base ** (attempt + 2) + logger.debug(backoff_msg.format(backoff=backoff)) + time.sleep(backoff) + + return _wrapped + + return _wrapper + + +def call_ai_function( + function: str, args: list, description: str, model: str | None = None +) -> str: + """Call an AI function + + This is a magic function that can do anything with no-code. See + https://github.com/Torantulino/AI-Functions for more info. + + Args: + function (str): The function to call + args (list): The arguments to pass to the function + description (str): The description of the function + model (str, optional): The model to use. Defaults to None. + + Returns: + str: The response from the function + """ + cfg = Config() + if model is None: + model = cfg.smart_llm_model + # For each arg, if any are None, convert to "None": + args = [str(arg) if arg is not None else "None" for arg in args] + # parse args to comma separated string + args: str = ", ".join(args) + messages: List[Message] = [ + { + "role": "system", + "content": f"You are now the following python function: ```# {description}" + f"\n{function}```\n\nOnly respond with your `return` value.", + }, + {"role": "user", "content": args}, + ] + + return create_chat_completion(model=model, messages=messages, temperature=0) + + +# Overly simple abstraction until we create something better +# simple retry mechanism when getting a rate error or a bad gateway +def create_chat_completion( + messages: List[Message], # type: ignore + model: Optional[str] = None, + temperature: float = None, + max_tokens: Optional[int] = None, +) -> str: + """Create a chat completion using the OpenAI API + + Args: + messages (List[Message]): The messages to send to the chat completion + model (str, optional): The model to use. Defaults to None. + temperature (float, optional): The temperature to use. Defaults to 0.9. + max_tokens (int, optional): The max tokens to use. Defaults to None. + + Returns: + str: The response from the chat completion + """ + cfg = Config() + if temperature is None: + temperature = cfg.temperature + + num_retries = 10 + warned_user = False + logger.debug( + f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" + ) + for plugin in cfg.plugins: + if plugin.can_handle_chat_completion( + messages=messages, + model=model, + temperature=temperature, + max_tokens=max_tokens, + ): + message = plugin.handle_chat_completion( + messages=messages, + model=model, + temperature=temperature, + max_tokens=max_tokens, + ) + if message is not None: + return message + api_manager = ApiManager() + response = None + for attempt in range(num_retries): + backoff = 2 ** (attempt + 2) + try: + if cfg.use_azure: + response = api_manager.create_chat_completion( + deployment_id=cfg.get_azure_deployment_id_for_model(model), + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + ) + else: + response = api_manager.create_chat_completion( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + ) + break + except RateLimitError: + logger.debug( + f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}" + ) + if not warned_user: + logger.double_check( + f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " + + f"You can read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}" + ) + warned_user = True + except (APIError, Timeout) as e: + if e.http_status != 502: + raise + if attempt == num_retries - 1: + raise + logger.debug( + f"{Fore.RED}Error: ", + f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}", + ) + time.sleep(backoff) + if response is None: + logger.typewriter_log( + "FAILED TO GET RESPONSE FROM OPENAI", + Fore.RED, + "Auto-GPT has failed to get a response from OpenAI's services. " + + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.", + ) + logger.double_check() + if cfg.debug_mode: + raise RuntimeError(f"Failed to get response after {num_retries} retries") + else: + quit(1) + resp = response.choices[0].message["content"] + for plugin in cfg.plugins: + if not plugin.can_handle_on_response(): + continue + resp = plugin.on_response(resp) + return resp + + +def batched(iterable, n): + """Batch data into tuples of length n. The last batch may be shorter.""" + # batched('ABCDEFG', 3) --> ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch + + +def chunked_tokens(text, tokenizer_name, chunk_length): + tokenizer = tiktoken.get_encoding(tokenizer_name) + tokens = tokenizer.encode(text) + chunks_iterator = batched(tokens, chunk_length) + yield from chunks_iterator + + +def get_ada_embedding(text: str) -> List[float]: + """Get an embedding from the ada model. + + Args: + text (str): The text to embed. + + Returns: + List[float]: The embedding. + """ + cfg = Config() + model = cfg.embedding_model + text = text.replace("\n", " ") + + if cfg.use_azure: + kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)} + else: + kwargs = {"model": model} + + embedding = create_embedding(text, **kwargs) + return embedding + + +@retry_openai_api() +def create_embedding( + text: str, + *_, + **kwargs, +) -> openai.Embedding: + """Create an embedding using the OpenAI API + + Args: + text (str): The text to embed. + kwargs: Other arguments to pass to the OpenAI API embedding creation call. + + Returns: + openai.Embedding: The embedding object. + """ + cfg = Config() + chunk_embeddings = [] + chunk_lengths = [] + for chunk in chunked_tokens( + text, + tokenizer_name=cfg.embedding_tokenizer, + chunk_length=cfg.embedding_token_limit, + ): + embedding = openai.Embedding.create( + input=[chunk], + api_key=cfg.openai_api_key, + **kwargs, + ) + api_manager = ApiManager() + api_manager.update_cost( + prompt_tokens=embedding.usage.prompt_tokens, + completion_tokens=0, + model=cfg.embedding_model, + ) + chunk_embeddings.append(embedding["data"][0]["embedding"]) + chunk_lengths.append(len(chunk)) + + # do weighted avg + chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=chunk_lengths) + chunk_embeddings = chunk_embeddings / np.linalg.norm( + chunk_embeddings + ) # normalize the length to one + chunk_embeddings = chunk_embeddings.tolist() + return chunk_embeddings diff --git a/autogpt/llm/modelsinfo.py b/autogpt/llm/modelsinfo.py new file mode 100644 index 00000000..1bebcbf9 --- /dev/null +++ b/autogpt/llm/modelsinfo.py @@ -0,0 +1,10 @@ +COSTS = { + "gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002}, + "gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002}, + "gpt-4-0314": {"prompt": 0.03, "completion": 0.06}, + "gpt-4": {"prompt": 0.03, "completion": 0.06}, + "gpt-4-0314": {"prompt": 0.03, "completion": 0.06}, + "gpt-4-32k": {"prompt": 0.06, "completion": 0.12}, + "gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12}, + "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0}, +} diff --git a/autogpt/permanent_memory/__init__.py b/autogpt/llm/providers/__init__.py similarity index 100% rename from autogpt/permanent_memory/__init__.py rename to autogpt/llm/providers/__init__.py diff --git a/autogpt/llm/providers/openai.py b/autogpt/llm/providers/openai.py new file mode 100644 index 00000000..188d5cf7 --- /dev/null +++ b/autogpt/llm/providers/openai.py @@ -0,0 +1,37 @@ +from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo + +OPEN_AI_CHAT_MODELS = { + "gpt-3.5-turbo": ChatModelInfo( + name="gpt-3.5-turbo", + prompt_token_cost=0.002, + completion_token_cost=0.002, + max_tokens=4096, + ), + "gpt-4": ChatModelInfo( + name="gpt-4", + prompt_token_cost=0.03, + completion_token_cost=0.06, + max_tokens=8192, + ), + "gpt-4-32k": ChatModelInfo( + name="gpt-4-32k", + prompt_token_cost=0.06, + completion_token_cost=0.12, + max_tokens=32768, + ), +} + +OPEN_AI_EMBEDDING_MODELS = { + "text-embedding-ada-002": EmbeddingModelInfo( + name="text-embedding-ada-002", + prompt_token_cost=0.0004, + completion_token_cost=0.0, + max_tokens=8191, + embedding_dimensions=1536, + ), +} + +OPEN_AI_MODELS = { + **OPEN_AI_CHAT_MODELS, + **OPEN_AI_EMBEDDING_MODELS, +} diff --git a/autogpt/token_counter.py b/autogpt/llm/token_counter.py similarity index 95% rename from autogpt/token_counter.py rename to autogpt/llm/token_counter.py index 338fe6be..5e13920e 100644 --- a/autogpt/token_counter.py +++ b/autogpt/llm/token_counter.py @@ -1,13 +1,16 @@ """Functions for counting the number of tokens in a message or string.""" from __future__ import annotations +from typing import List + import tiktoken +from autogpt.llm.base import Message from autogpt.logs import logger def count_message_tokens( - messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301" + messages: List[Message], model: str = "gpt-3.5-turbo-0301" ) -> int: """ Returns the number of tokens used by a list of messages. diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py deleted file mode 100644 index 821820ff..00000000 --- a/autogpt/llm_utils.py +++ /dev/null @@ -1,172 +0,0 @@ -from __future__ import annotations - -import time -from ast import List - -import openai -from colorama import Fore, Style -from openai.error import APIError, RateLimitError - -from autogpt.config import Config -from autogpt.logs import logger - -CFG = Config() - -openai.api_key = CFG.openai_api_key - - -def call_ai_function( - function: str, args: list, description: str, model: str | None = None -) -> str: - """Call an AI function - - This is a magic function that can do anything with no-code. See - https://github.com/Torantulino/AI-Functions for more info. - - Args: - function (str): The function to call - args (list): The arguments to pass to the function - description (str): The description of the function - model (str, optional): The model to use. Defaults to None. - - Returns: - str: The response from the function - """ - if model is None: - model = CFG.smart_llm_model - # For each arg, if any are None, convert to "None": - args = [str(arg) if arg is not None else "None" for arg in args] - # parse args to comma separated string - args = ", ".join(args) - messages = [ - { - "role": "system", - "content": f"You are now the following python function: ```# {description}" - f"\n{function}```\n\nOnly respond with your `return` value.", - }, - {"role": "user", "content": args}, - ] - - return create_chat_completion(model=model, messages=messages, temperature=0) - - -# Overly simple abstraction until we create something better -# simple retry mechanism when getting a rate error or a bad gateway -def create_chat_completion( - messages: list, # type: ignore - model: str | None = None, - temperature: float = CFG.temperature, - max_tokens: int | None = None, -) -> str: - """Create a chat completion using the OpenAI API - - Args: - messages (list[dict[str, str]]): The messages to send to the chat completion - model (str, optional): The model to use. Defaults to None. - temperature (float, optional): The temperature to use. Defaults to 0.9. - max_tokens (int, optional): The max tokens to use. Defaults to None. - - Returns: - str: The response from the chat completion - """ - response = None - num_retries = 10 - warned_user = False - if CFG.debug_mode: - print( - Fore.GREEN - + f"Creating chat completion with model {model}, temperature {temperature}," - f" max_tokens {max_tokens}" + Fore.RESET - ) - for attempt in range(num_retries): - backoff = 2 ** (attempt + 2) - try: - if CFG.use_azure: - response = openai.ChatCompletion.create( - deployment_id=CFG.get_azure_deployment_id_for_model(model), - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - ) - else: - response = openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - ) - break - except RateLimitError: - if CFG.debug_mode: - print( - Fore.RED + "Error: ", - f"Reached rate limit, passing..." + Fore.RESET, - ) - if not warned_user: - logger.double_check( - f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " - + f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}" - ) - warned_user = True - except APIError as e: - if e.http_status == 502: - pass - else: - raise - if attempt == num_retries - 1: - raise - if CFG.debug_mode: - print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, - ) - time.sleep(backoff) - if response is None: - logger.typewriter_log( - "FAILED TO GET RESPONSE FROM OPENAI", - Fore.RED, - "Auto-GPT has failed to get a response from OpenAI's services. " - + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.", - ) - logger.double_check() - if CFG.debug_mode: - raise RuntimeError(f"Failed to get response after {num_retries} retries") - else: - quit(1) - - return response.choices[0].message["content"] - - -def create_embedding_with_ada(text) -> list: - """Create an embedding with text-ada-002 using the OpenAI SDK""" - num_retries = 10 - for attempt in range(num_retries): - backoff = 2 ** (attempt + 2) - try: - if CFG.use_azure: - return openai.Embedding.create( - input=[text], - engine=CFG.get_azure_deployment_id_for_model( - "text-embedding-ada-002" - ), - )["data"][0]["embedding"] - else: - return openai.Embedding.create( - input=[text], model="text-embedding-ada-002" - )["data"][0]["embedding"] - except RateLimitError: - pass - except APIError as e: - if e.http_status == 502: - pass - else: - raise - if attempt == num_retries - 1: - raise - if CFG.debug_mode: - print( - Fore.RED + "Error: ", - f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET, - ) - time.sleep(backoff) diff --git a/autogpt/logs.py b/autogpt/logs.py index 35037404..1cbb784d 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -1,20 +1,16 @@ """Logging module for Auto-GPT.""" -import json import logging import os import random import re import time -import traceback from logging import LogRecord from colorama import Fore, Style -from autogpt.config import Config, Singleton +from autogpt.singleton import Singleton from autogpt.speech import say_text -CFG = Config() - class Logger(metaclass=Singleton): """ @@ -78,12 +74,18 @@ class Logger(metaclass=Singleton): self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) + self.speak_mode = False + self.chat_plugins = [] + def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): - if speak_text and CFG.speak_mode: + if speak_text and self.speak_mode: say_text(f"{title}. {content}") + for plugin in self.chat_plugins: + plugin.report(f"{title}. {content}") + if content: if isinstance(content, list): content = " ".join(content) @@ -102,6 +104,14 @@ class Logger(metaclass=Singleton): ): self._log(title, title_color, message, logging.DEBUG) + def info( + self, + message, + title="", + title_color="", + ): + self._log(title, title_color, message, logging.INFO) + def warn( self, message, @@ -113,11 +123,19 @@ class Logger(metaclass=Singleton): def error(self, title, message=""): self._log(title, Fore.RED, message, logging.ERROR) - def _log(self, title="", title_color="", message="", level=logging.INFO): + def _log( + self, + title: str = "", + title_color: str = "", + message: str = "", + level=logging.INFO, + ): if message: if isinstance(message, list): message = " ".join(message) - self.logger.log(level, message, extra={"title": title, "color": title_color}) + self.logger.log( + level, message, extra={"title": str(title), "color": str(title_color)} + ) def set_level(self, level): self.logger.setLevel(level) @@ -202,100 +220,10 @@ def remove_color_codes(s: str) -> str: logger = Logger() -def print_assistant_thoughts(ai_name, assistant_reply): - """Prints the assistant's thoughts to the console""" - from autogpt.json_utils.json_fix_llm import ( - attempt_to_fix_json_by_finding_outermost_brackets, - fix_and_parse_json, - ) - - try: - try: - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - if isinstance(assistant_reply_json, str): - assistant_reply_json = fix_and_parse_json(assistant_reply_json) - - # Check if assistant_reply_json is a string and attempt to parse - # it into a JSON object - if isinstance(assistant_reply_json, str): - try: - assistant_reply_json = json.loads(assistant_reply_json) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - assistant_reply_json = ( - attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply_json - ) - ) - - assistant_thoughts_reasoning = None - assistant_thoughts_plan = None - assistant_thoughts_speak = None - assistant_thoughts_criticism = None - if not isinstance(assistant_reply_json, dict): - assistant_reply_json = {} - assistant_thoughts = assistant_reply_json.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") - - if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") - - logger.typewriter_log( - f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" - ) - logger.typewriter_log( - "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}" - ) - - if assistant_thoughts_plan: - logger.typewriter_log("PLAN:", Fore.YELLOW, "") - # If it's a list, join it into a string - if isinstance(assistant_thoughts_plan, list): - assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) - elif isinstance(assistant_thoughts_plan, dict): - assistant_thoughts_plan = str(assistant_thoughts_plan) - - # Split the input_string using the newline character and dashes - lines = assistant_thoughts_plan.split("\n") - for line in lines: - line = line.lstrip("- ") - logger.typewriter_log("- ", Fore.GREEN, line.strip()) - - logger.typewriter_log( - "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}" - ) - # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: - say_text(assistant_thoughts_speak) - else: - logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}") - - return assistant_reply_json - except json.decoder.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - if CFG.speak_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API." - " I cannot ignore this response." - ) - - # All other errors, return "Error: + error message" - except Exception: - call_stack = traceback.format_exc() - logger.error("Error: \n", call_stack) - - def print_assistant_thoughts( - ai_name: object, assistant_reply_json_valid: object + ai_name: object, + assistant_reply_json_valid: object, + speak_mode: bool = False, ) -> None: assistant_thoughts_reasoning = None assistant_thoughts_plan = None @@ -328,5 +256,5 @@ def print_assistant_thoughts( logger.typewriter_log("- ", Fore.GREEN, line.strip()) logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}") # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: + if speak_mode and assistant_thoughts_speak: say_text(assistant_thoughts_speak) diff --git a/autogpt/main.py b/autogpt/main.py new file mode 100644 index 00000000..355e1085 --- /dev/null +++ b/autogpt/main.py @@ -0,0 +1,171 @@ +"""The application entry point. Can be invoked by a CLI or any other front end application.""" +import logging +import sys +from pathlib import Path + +from colorama import Fore, Style + +from autogpt.agent.agent import Agent +from autogpt.commands.command import CommandRegistry +from autogpt.config import Config, check_openai_api_key +from autogpt.configurator import create_config +from autogpt.logs import logger +from autogpt.memory import get_memory +from autogpt.plugins import scan_plugins +from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config +from autogpt.utils import ( + get_current_git_branch, + get_latest_bulletin, + markdown_to_ansi_style, +) +from autogpt.workspace import Workspace +from scripts.install_plugin_deps import install_plugin_dependencies + + +def run_auto_gpt( + continuous: bool, + continuous_limit: int, + ai_settings: str, + skip_reprompt: bool, + speak: bool, + debug: bool, + gpt3only: bool, + gpt4only: bool, + memory_type: str, + browser_name: str, + allow_downloads: bool, + skip_news: bool, + workspace_directory: str, + install_plugin_deps: bool, +): + # Configure logging before we do anything else. + logger.set_level(logging.DEBUG if debug else logging.INFO) + logger.speak_mode = speak + + cfg = Config() + # TODO: fill in llm values here + check_openai_api_key() + create_config( + continuous, + continuous_limit, + ai_settings, + skip_reprompt, + speak, + debug, + gpt3only, + gpt4only, + memory_type, + browser_name, + allow_downloads, + skip_news, + ) + + if not cfg.skip_news: + motd, is_new_motd = get_latest_bulletin() + if motd: + motd = markdown_to_ansi_style(motd) + for motd_line in motd.split("\n"): + logger.info(motd_line, "NEWS:", Fore.GREEN) + if is_new_motd and not cfg.chat_messages_enabled: + input( + Fore.MAGENTA + + Style.BRIGHT + + "NEWS: Bulletin was updated! Press Enter to continue..." + + Style.RESET_ALL + ) + + git_branch = get_current_git_branch() + if git_branch and git_branch != "stable": + logger.typewriter_log( + "WARNING: ", + Fore.RED, + f"You are running on `{git_branch}` branch " + "- this is not a supported branch.", + ) + if sys.version_info < (3, 10): + logger.typewriter_log( + "WARNING: ", + Fore.RED, + "You are running on an older version of Python. " + "Some people have observed problems with certain " + "parts of Auto-GPT with this version. " + "Please consider upgrading to Python 3.10 or higher.", + ) + + if install_plugin_deps: + install_plugin_dependencies() + + # TODO: have this directory live outside the repository (e.g. in a user's + # home directory) and have it come in as a command line argument or part of + # the env file. + if workspace_directory is None: + workspace_directory = Path(__file__).parent / "auto_gpt_workspace" + else: + workspace_directory = Path(workspace_directory) + # TODO: pass in the ai_settings file and the env file and have them cloned into + # the workspace directory so we can bind them to the agent. + workspace_directory = Workspace.make_workspace(workspace_directory) + cfg.workspace_path = str(workspace_directory) + + # HACK: doing this here to collect some globals that depend on the workspace. + file_logger_path = workspace_directory / "file_logger.txt" + if not file_logger_path.exists(): + with file_logger_path.open(mode="w", encoding="utf-8") as f: + f.write("File Operation Logger ") + + cfg.file_logger_path = str(file_logger_path) + + cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) + # Create a CommandRegistry instance and scan default folder + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.analyze_code") + command_registry.import_commands("autogpt.commands.audio_text") + command_registry.import_commands("autogpt.commands.execute_code") + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.commands.git_operations") + command_registry.import_commands("autogpt.commands.google_search") + command_registry.import_commands("autogpt.commands.image_gen") + command_registry.import_commands("autogpt.commands.improve_code") + command_registry.import_commands("autogpt.commands.twitter") + command_registry.import_commands("autogpt.commands.web_selenium") + command_registry.import_commands("autogpt.commands.write_tests") + command_registry.import_commands("autogpt.app") + + ai_name = "" + ai_config = construct_main_ai_config() + ai_config.command_registry = command_registry + # print(prompt) + # Initialize variables + full_message_history = [] + next_action_count = 0 + + # add chat plugins capable of report to logger + if cfg.chat_messages_enabled: + for plugin in cfg.plugins: + if hasattr(plugin, "can_handle_report") and plugin.can_handle_report(): + logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}") + logger.chat_plugins.append(plugin) + + # Initialize memory and make sure it is empty. + # this is particularly important for indexing and referencing pinecone memory + memory = get_memory(cfg, init=True) + logger.typewriter_log( + "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" + ) + logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) + system_prompt = ai_config.construct_full_prompt() + if cfg.debug_mode: + logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt) + + agent = Agent( + ai_name=ai_name, + memory=memory, + full_message_history=full_message_history, + next_action_count=next_action_count, + command_registry=command_registry, + config=ai_config, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace_directory, + ) + agent.start_interaction_loop() diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py index 3d18704c..a955a3e7 100644 --- a/autogpt/memory/__init__.py +++ b/autogpt/memory/__init__.py @@ -1,3 +1,4 @@ +from autogpt.logs import logger from autogpt.memory.local import LocalCache from autogpt.memory.no_memory import NoMemory @@ -10,7 +11,6 @@ try: supported_memory.append("redis") except ImportError: - # print("Redis not installed. Skipping import.") RedisMemory = None try: @@ -18,7 +18,6 @@ try: supported_memory.append("pinecone") except ImportError: - # print("Pinecone not installed. Skipping import.") PineconeMemory = None try: @@ -26,7 +25,6 @@ try: supported_memory.append("weaviate") except ImportError: - # print("Weaviate not installed. Skipping import.") WeaviateMemory = None try: @@ -34,7 +32,6 @@ try: supported_memory.append("milvus") except ImportError: - # print("pymilvus not installed. Skipping import.") MilvusMemory = None @@ -42,7 +39,7 @@ def get_memory(cfg, init=False): memory = None if cfg.memory_backend == "pinecone": if not PineconeMemory: - print( + logger.warn( "Error: Pinecone is not installed. Please install pinecone" " to use Pinecone as a memory backend." ) @@ -52,7 +49,7 @@ def get_memory(cfg, init=False): memory.clear() elif cfg.memory_backend == "redis": if not RedisMemory: - print( + logger.warn( "Error: Redis is not installed. Please install redis-py to" " use Redis as a memory backend." ) @@ -60,7 +57,7 @@ def get_memory(cfg, init=False): memory = RedisMemory(cfg) elif cfg.memory_backend == "weaviate": if not WeaviateMemory: - print( + logger.warn( "Error: Weaviate is not installed. Please install weaviate-client to" " use Weaviate as a memory backend." ) @@ -68,9 +65,9 @@ def get_memory(cfg, init=False): memory = WeaviateMemory(cfg) elif cfg.memory_backend == "milvus": if not MilvusMemory: - print( - "Error: Milvus sdk is not installed." - "Please install pymilvus to use Milvus as memory backend." + logger.warn( + "Error: pymilvus sdk is not installed." + "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend." ) else: memory = MilvusMemory(cfg) diff --git a/autogpt/memory/base.py b/autogpt/memory/base.py index 691e2299..d99cadde 100644 --- a/autogpt/memory/base.py +++ b/autogpt/memory/base.py @@ -1,43 +1,31 @@ """Base class for memory providers.""" import abc -import openai - -from autogpt.config import AbstractSingleton, Config - -cfg = Config() - - -def get_ada_embedding(text): - text = text.replace("\n", " ") - if cfg.use_azure: - return openai.Embedding.create( - input=[text], - engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"), - )["data"][0]["embedding"] - else: - return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[ - "data" - ][0]["embedding"] +from autogpt.singleton import AbstractSingleton class MemoryProviderSingleton(AbstractSingleton): @abc.abstractmethod def add(self, data): + """Adds to memory""" pass @abc.abstractmethod def get(self, data): + """Gets from memory""" pass @abc.abstractmethod def clear(self): + """Clears memory""" pass @abc.abstractmethod def get_relevant(self, data, num_relevant=5): + """Gets relevant memory for""" pass @abc.abstractmethod def get_stats(self): + """Get stats from memory""" pass diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py index 803b6dc6..18b7255c 100644 --- a/autogpt/memory/local.py +++ b/autogpt/memory/local.py @@ -1,13 +1,13 @@ from __future__ import annotations import dataclasses -import os +from pathlib import Path from typing import Any, List import numpy as np import orjson -from autogpt.llm_utils import create_embedding_with_ada +from autogpt.llm import get_ada_embedding from autogpt.memory.base import MemoryProviderSingleton EMBED_DIM = 1536 @@ -38,26 +38,16 @@ class LocalCache(MemoryProviderSingleton): Returns: None """ - self.filename = f"{cfg.memory_index}.json" - if os.path.exists(self.filename): - try: - with open(self.filename, "w+b") as f: - file_content = f.read() - if not file_content.strip(): - file_content = b"{}" - f.write(file_content) + workspace_path = Path(cfg.workspace_path) + self.filename = workspace_path / f"{cfg.memory_index}.json" - loaded = orjson.loads(file_content) - self.data = CacheContent(**loaded) - except orjson.JSONDecodeError: - print(f"Error: The file '{self.filename}' is not in JSON format.") - self.data = CacheContent() - else: - print( - f"Warning: The file '{self.filename}' does not exist. " - "Local memory would not be saved to a file." - ) - self.data = CacheContent() + self.filename.touch(exist_ok=True) + + file_content = b"{}" + with self.filename.open("w+b") as f: + f.write(file_content) + + self.data = CacheContent() def add(self, text: str): """ @@ -73,7 +63,7 @@ class LocalCache(MemoryProviderSingleton): return "" self.data.texts.append(text) - embedding = create_embedding_with_ada(text) + embedding = get_ada_embedding(text) vector = np.array(embedding).astype(np.float32) vector = vector[np.newaxis, :] @@ -92,7 +82,7 @@ class LocalCache(MemoryProviderSingleton): def clear(self) -> str: """ - Clears the redis server. + Clears the data in memory. Returns: A message indicating that the memory has been cleared. """ @@ -121,7 +111,7 @@ class LocalCache(MemoryProviderSingleton): Returns: List[str] """ - embedding = create_embedding_with_ada(text) + embedding = get_ada_embedding(text) scores = np.dot(self.data.embeddings, embedding) diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py index 44aa72b9..40694df5 100644 --- a/autogpt/memory/milvus.py +++ b/autogpt/memory/milvus.py @@ -1,20 +1,76 @@ """ Milvus memory storage provider.""" +import re + from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.config import Config +from autogpt.llm import get_ada_embedding +from autogpt.memory.base import MemoryProviderSingleton class MilvusMemory(MemoryProviderSingleton): """Milvus memory storage provider.""" - def __init__(self, cfg) -> None: + def __init__(self, cfg: Config) -> None: """Construct a milvus memory storage connection. Args: cfg (Config): Auto-GPT global config. """ - # connect to milvus server. - connections.connect(address=cfg.milvus_addr) + self.configure(cfg) + + connect_kwargs = {} + if self.username: + connect_kwargs["user"] = self.username + connect_kwargs["password"] = self.password + + connections.connect( + **connect_kwargs, + uri=self.uri or "", + address=self.address or "", + secure=self.secure, + ) + + self.init_collection() + + def configure(self, cfg: Config) -> None: + # init with configuration. + self.uri = None + self.address = cfg.milvus_addr + self.secure = cfg.milvus_secure + self.username = cfg.milvus_username + self.password = cfg.milvus_password + self.collection_name = cfg.milvus_collection + # use HNSW by default. + self.index_params = { + "metric_type": "IP", + "index_type": "HNSW", + "params": {"M": 8, "efConstruction": 64}, + } + + if (self.username is None) != (self.password is None): + raise ValueError( + "Both username and password must be set to use authentication for Milvus" + ) + + # configured address may be a full URL. + if re.match(r"^(https?|tcp)://", self.address) is not None: + self.uri = self.address + self.address = None + + if self.uri.startswith("https"): + self.secure = True + + # Zilliz Cloud requires AutoIndex. + if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.uri) is not None: + self.index_params = { + "metric_type": "IP", + "index_type": "AUTOINDEX", + "params": {}, + } + + def init_collection(self) -> None: + """Initialize collection in vector database.""" fields = [ FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True), FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536), @@ -22,19 +78,14 @@ class MilvusMemory(MemoryProviderSingleton): ] # create collection if not exist and load it. - self.milvus_collection = cfg.milvus_collection self.schema = CollectionSchema(fields, "auto-gpt memory storage") - self.collection = Collection(self.milvus_collection, self.schema) + self.collection = Collection(self.collection_name, self.schema) # create index if not exist. if not self.collection.has_index(): self.collection.release() self.collection.create_index( "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, + self.index_params, index_name="embeddings", ) self.collection.load() @@ -70,14 +121,10 @@ class MilvusMemory(MemoryProviderSingleton): str: log. """ self.collection.drop() - self.collection = Collection(self.milvus_collection, self.schema) + self.collection = Collection(self.collection_name, self.schema) self.collection.create_index( "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, + self.index_params, index_name="embeddings", ) self.collection.load() diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py index 27fcd624..7c027956 100644 --- a/autogpt/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -1,7 +1,7 @@ import pinecone from colorama import Fore, Style -from autogpt.llm_utils import create_embedding_with_ada +from autogpt.llm import get_ada_embedding from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton @@ -38,13 +38,16 @@ class PineconeMemory(MemoryProviderSingleton): exit(1) if table_name not in pinecone.list_indexes(): + logger.typewriter_log( + "Connecting Pinecone. This may take some time...", Fore.MAGENTA, "" + ) pinecone.create_index( table_name, dimension=dimension, metric=metric, pod_type=pod_type ) self.index = pinecone.Index(table_name) def add(self, data): - vector = create_embedding_with_ada(data) + vector = get_ada_embedding(data) # no metadata here. We may wish to change that long term. self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" @@ -64,7 +67,7 @@ class PineconeMemory(MemoryProviderSingleton): :param data: The data to compare to. :param num_relevant: The number of relevant data to return. Defaults to 5 """ - query_embedding = create_embedding_with_ada(data) + query_embedding = get_ada_embedding(data) results = self.index.query( query_embedding, top_k=num_relevant, include_metadata=True ) diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py index 082a812c..2a072a55 100644 --- a/autogpt/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -10,7 +10,7 @@ from redis.commands.search.field import TextField, VectorField from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.query import Query -from autogpt.llm_utils import create_embedding_with_ada +from autogpt.llm import get_ada_embedding from autogpt.logs import logger from autogpt.memory.base import MemoryProviderSingleton @@ -73,7 +73,7 @@ class RedisMemory(MemoryProviderSingleton): ), ) except Exception as e: - print("Error creating Redis search index: ", e) + logger.warn("Error creating Redis search index: ", e) existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num") self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0 @@ -88,7 +88,7 @@ class RedisMemory(MemoryProviderSingleton): """ if "Command Error:" in data: return "" - vector = create_embedding_with_ada(data) + vector = get_ada_embedding(data) vector = np.array(vector).astype(np.float32).tobytes() data_dict = {b"data": data, "embedding": vector} pipe = self.redis.pipeline() @@ -130,7 +130,7 @@ class RedisMemory(MemoryProviderSingleton): Returns: A list of the most relevant data. """ - query_embedding = create_embedding_with_ada(data) + query_embedding = get_ada_embedding(data) base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]" query = ( Query(base_query) @@ -145,7 +145,7 @@ class RedisMemory(MemoryProviderSingleton): query, query_params={"vector": query_vector} ) except Exception as e: - print("Error calling Redis search: ", e) + logger.warn("Error calling Redis search: ", e) return None return [result.data for result in results.docs] diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py index 5408e9a9..0b4f58b5 100644 --- a/autogpt/memory/weaviate.py +++ b/autogpt/memory/weaviate.py @@ -1,12 +1,11 @@ -import uuid - import weaviate from weaviate import Client from weaviate.embedded import EmbeddedOptions from weaviate.util import generate_uuid5 -from autogpt.config import Config -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.llm import get_ada_embedding +from autogpt.logs import logger +from autogpt.memory.base import MemoryProviderSingleton def default_schema(weaviate_index): @@ -37,7 +36,7 @@ class WeaviateMemory(MemoryProviderSingleton): ) ) - print( + logger.info( f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}" ) else: @@ -51,6 +50,7 @@ class WeaviateMemory(MemoryProviderSingleton): # weaviate uses capitalised index names # The python client uses the following code to format # index names before the corresponding class is created + index = index.replace("-", "_") if len(index) == 1: return index.capitalize() return index[0].capitalize() + index[1:] @@ -117,7 +117,7 @@ class WeaviateMemory(MemoryProviderSingleton): return [] except Exception as err: - print(f"Unexpected error {err=}, {type(err)=}") + logger.warn(f"Unexpected error {err=}, {type(err)=}") return [] def get_stats(self): diff --git a/autogpt/memory_management/store_memory.py b/autogpt/memory_management/store_memory.py new file mode 100644 index 00000000..e37434ef --- /dev/null +++ b/autogpt/memory_management/store_memory.py @@ -0,0 +1,33 @@ +from autogpt.json_utils.utilities import ( + LLM_DEFAULT_RESPONSE_FORMAT, + is_string_valid_json, +) +from autogpt.logs import logger + + +def format_memory(assistant_reply, next_message_content): + # the next_message_content is a variable to stores either the user_input or the command following the assistant_reply + result = ( + "None" if next_message_content.startswith("Command") else next_message_content + ) + user_input = ( + "None" + if next_message_content.startswith("Human feedback") + else next_message_content + ) + + return f"Assistant Reply: {assistant_reply}\nResult: {result}\nHuman Feedback:{user_input}" + + +def save_memory_trimmed_from_context_window( + full_message_history, next_message_to_add_index, permanent_memory +): + while next_message_to_add_index >= 0: + message_content = full_message_history[next_message_to_add_index]["content"] + if is_string_valid_json(message_content, LLM_DEFAULT_RESPONSE_FORMAT): + next_message = full_message_history[next_message_to_add_index + 1] + memory_to_add = format_memory(message_content, next_message["content"]) + logger.debug(f"Storing the following memory: {memory_to_add}") + permanent_memory.add(memory_to_add) + + next_message_to_add_index -= 1 diff --git a/autogpt/memory_management/summary_memory.py b/autogpt/memory_management/summary_memory.py new file mode 100644 index 00000000..754c09ba --- /dev/null +++ b/autogpt/memory_management/summary_memory.py @@ -0,0 +1,121 @@ +import copy +import json +from typing import Dict, List, Tuple + +from autogpt.config import Config +from autogpt.llm.llm_utils import create_chat_completion + +cfg = Config() + + +def get_newly_trimmed_messages( + full_message_history: List[Dict[str, str]], + current_context: List[Dict[str, str]], + last_memory_index: int, +) -> Tuple[List[Dict[str, str]], int]: + """ + This function returns a list of dictionaries contained in full_message_history + with an index higher than prev_index that are absent from current_context. + + Args: + full_message_history (list): A list of dictionaries representing the full message history. + current_context (list): A list of dictionaries representing the current context. + last_memory_index (int): An integer representing the previous index. + + Returns: + list: A list of dictionaries that are in full_message_history with an index higher than last_memory_index and absent from current_context. + int: The new index value for use in the next loop. + """ + # Select messages in full_message_history with an index higher than last_memory_index + new_messages = [ + msg for i, msg in enumerate(full_message_history) if i > last_memory_index + ] + + # Remove messages that are already present in current_context + new_messages_not_in_context = [ + msg for msg in new_messages if msg not in current_context + ] + + # Find the index of the last message processed + new_index = last_memory_index + if new_messages_not_in_context: + last_message = new_messages_not_in_context[-1] + new_index = full_message_history.index(last_message) + + return new_messages_not_in_context, new_index + + +def update_running_summary( + current_memory: str, new_events: List[Dict[str, str]] +) -> str: + """ + This function takes a list of dictionaries representing new events and combines them with the current summary, + focusing on key and potentially important information to remember. The updated summary is returned in a message + formatted in the 1st person past tense. + + Args: + new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary. + + Returns: + str: A message containing the updated summary of actions, formatted in the 1st person past tense. + + Example: + new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}] + update_running_summary(new_events) + # Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7." + """ + # Create a copy of the new_events list to prevent modifying the original list + new_events = copy.deepcopy(new_events) + + # Replace "assistant" with "you". This produces much better first person past tense results. + for event in new_events: + if event["role"].lower() == "assistant": + event["role"] = "you" + + # Remove "thoughts" dictionary from "content" + content_dict = json.loads(event["content"]) + if "thoughts" in content_dict: + del content_dict["thoughts"] + event["content"] = json.dumps(content_dict) + + elif event["role"].lower() == "system": + event["role"] = "your computer" + + # Delete all user messages + elif event["role"] == "user": + new_events.remove(event) + + # This can happen at any point during execturion, not just the beginning + if len(new_events) == 0: + new_events = "Nothing new happened." + + prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember. + +You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise. + +Summary So Far: +""" +{current_memory} +""" + +Latest Development: +""" +{new_events} +""" +''' + + messages = [ + { + "role": "user", + "content": prompt, + } + ] + + current_memory = create_chat_completion(messages, cfg.fast_llm_model) + + message_to_return = { + "role": "system", + "content": f"This reminds you of these events from your past: \n{current_memory}", + } + + return message_to_return diff --git a/autogpt/models/base_open_ai_plugin.py b/autogpt/models/base_open_ai_plugin.py new file mode 100644 index 00000000..046295c0 --- /dev/null +++ b/autogpt/models/base_open_ai_plugin.py @@ -0,0 +1,199 @@ +"""Handles loading of plugins.""" +from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar + +from auto_gpt_plugin_template import AutoGPTPluginTemplate + +PromptGenerator = TypeVar("PromptGenerator") + + +class Message(TypedDict): + role: str + content: str + + +class BaseOpenAIPlugin(AutoGPTPluginTemplate): + """ + This is a BaseOpenAIPlugin class for generating Auto-GPT plugins. + """ + + def __init__(self, manifests_specs_clients: dict): + # super().__init__() + self._name = manifests_specs_clients["manifest"]["name_for_model"] + self._version = manifests_specs_clients["manifest"]["schema_version"] + self._description = manifests_specs_clients["manifest"]["description_for_model"] + self._client = manifests_specs_clients["client"] + self._manifest = manifests_specs_clients["manifest"] + self._openapi_spec = manifests_specs_clients["openapi_spec"] + + def can_handle_on_response(self) -> bool: + """This method is called to check that the plugin can + handle the on_response method. + Returns: + bool: True if the plugin can handle the on_response method.""" + return False + + def on_response(self, response: str, *args, **kwargs) -> str: + """This method is called when a response is received from the model.""" + return response + + def can_handle_post_prompt(self) -> bool: + """This method is called to check that the plugin can + handle the post_prompt method. + Returns: + bool: True if the plugin can handle the post_prompt method.""" + return False + + def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: + """This method is called just after the generate_prompt is called, + but actually before the prompt is generated. + Args: + prompt (PromptGenerator): The prompt generator. + Returns: + PromptGenerator: The prompt generator. + """ + return prompt + + def can_handle_on_planning(self) -> bool: + """This method is called to check that the plugin can + handle the on_planning method. + Returns: + bool: True if the plugin can handle the on_planning method.""" + return False + + def on_planning( + self, prompt: PromptGenerator, messages: List[Message] + ) -> Optional[str]: + """This method is called before the planning chat completion is done. + Args: + prompt (PromptGenerator): The prompt generator. + messages (List[str]): The list of messages. + """ + pass + + def can_handle_post_planning(self) -> bool: + """This method is called to check that the plugin can + handle the post_planning method. + Returns: + bool: True if the plugin can handle the post_planning method.""" + return False + + def post_planning(self, response: str) -> str: + """This method is called after the planning chat completion is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the pre_instruction method. + Returns: + bool: True if the plugin can handle the pre_instruction method.""" + return False + + def pre_instruction(self, messages: List[Message]) -> List[Message]: + """This method is called before the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + List[Message]: The resulting list of messages. + """ + return messages + + def can_handle_on_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the on_instruction method. + Returns: + bool: True if the plugin can handle the on_instruction method.""" + return False + + def on_instruction(self, messages: List[Message]) -> Optional[str]: + """This method is called when the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + Optional[str]: The resulting message. + """ + pass + + def can_handle_post_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the post_instruction method. + Returns: + bool: True if the plugin can handle the post_instruction method.""" + return False + + def post_instruction(self, response: str) -> str: + """This method is called after the instruction chat is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_command(self) -> bool: + """This method is called to check that the plugin can + handle the pre_command method. + Returns: + bool: True if the plugin can handle the pre_command method.""" + return False + + def pre_command( + self, command_name: str, arguments: Dict[str, Any] + ) -> Tuple[str, Dict[str, Any]]: + """This method is called before the command is executed. + Args: + command_name (str): The command name. + arguments (Dict[str, Any]): The arguments. + Returns: + Tuple[str, Dict[str, Any]]: The command name and the arguments. + """ + return command_name, arguments + + def can_handle_post_command(self) -> bool: + """This method is called to check that the plugin can + handle the post_command method. + Returns: + bool: True if the plugin can handle the post_command method.""" + return False + + def post_command(self, command_name: str, response: str) -> str: + """This method is called after the command is executed. + Args: + command_name (str): The command name. + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_chat_completion( + self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int + ) -> bool: + """This method is called to check that the plugin can + handle the chat_completion method. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + bool: True if the plugin can handle the chat_completion method.""" + return False + + def handle_chat_completion( + self, messages: List[Message], model: str, temperature: float, max_tokens: int + ) -> str: + """This method is called when the chat completion is done. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + str: The resulting response. + """ + pass diff --git a/autogpt/permanent_memory/sqlite3_store.py b/autogpt/permanent_memory/sqlite3_store.py deleted file mode 100644 index ecbc944a..00000000 --- a/autogpt/permanent_memory/sqlite3_store.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import sqlite3 - - -class MemoryDB: - def __init__(self, db=None): - self.db_file = db - if db is None: # No db filename supplied... - self.db_file = f"{os.getcwd()}/mem.sqlite3" # Use default filename - # Get the db connection object, making the file and tables if needed. - try: - self.cnx = sqlite3.connect(self.db_file) - except Exception as e: - print("Exception connecting to memory database file:", e) - self.cnx = None - finally: - if self.cnx is None: - # As last resort, open in dynamic memory. Won't be persistent. - self.db_file = ":memory:" - self.cnx = sqlite3.connect(self.db_file) - self.cnx.execute( - "CREATE VIRTUAL TABLE \ - IF NOT EXISTS text USING FTS5 \ - (session, \ - key, \ - block);" - ) - self.session_id = int(self.get_max_session_id()) + 1 - self.cnx.commit() - - def get_cnx(self): - if self.cnx is None: - self.cnx = sqlite3.connect(self.db_file) - return self.cnx - - # Get the highest session id. Initially 0. - def get_max_session_id(self): - id = None - cmd_str = f"SELECT MAX(session) FROM text;" - cnx = self.get_cnx() - max_id = cnx.execute(cmd_str).fetchone()[0] - if max_id is None: # New db, session 0 - id = 0 - else: - id = max_id - return id - - # Get next key id for inserting text into db. - def get_next_key(self): - next_key = None - cmd_str = f"SELECT MAX(key) FROM text \ - where session = {self.session_id};" - cnx = self.get_cnx() - next_key = cnx.execute(cmd_str).fetchone()[0] - if next_key is None: # First key - next_key = 0 - else: - next_key = int(next_key) + 1 - return next_key - - # Insert new text into db. - def insert(self, text=None): - if text is not None: - key = self.get_next_key() - session_id = self.session_id - cmd_str = f"REPLACE INTO text(session, key, block) \ - VALUES (?, ?, ?);" - cnx = self.get_cnx() - cnx.execute(cmd_str, (session_id, key, text)) - cnx.commit() - - # Overwrite text at key. - def overwrite(self, key, text): - self.delete_memory(key) - session_id = self.session_id - cmd_str = f"REPLACE INTO text(session, key, block) \ - VALUES (?, ?, ?);" - cnx = self.get_cnx() - cnx.execute(cmd_str, (session_id, key, text)) - cnx.commit() - - def delete_memory(self, key, session_id=None): - session = session_id - if session is None: - session = self.session_id - cmd_str = f"DELETE FROM text WHERE session = {session} AND key = {key};" - cnx = self.get_cnx() - cnx.execute(cmd_str) - cnx.commit() - - def search(self, text): - cmd_str = f"SELECT * FROM text('{text}')" - cnx = self.get_cnx() - rows = cnx.execute(cmd_str).fetchall() - lines = [] - for r in rows: - lines.append(r[2]) - return lines - - # Get entire session text. If no id supplied, use current session id. - def get_session(self, id=None): - if id is None: - id = self.session_id - cmd_str = f"SELECT * FROM text where session = {id}" - cnx = self.get_cnx() - rows = cnx.execute(cmd_str).fetchall() - lines = [] - for r in rows: - lines.append(r[2]) - return lines - - # Commit and close the database connection. - def quit(self): - self.cnx.commit() - self.cnx.close() - - -permanent_memory = MemoryDB() - -# Remember us fondly, children of our minds -# Forgive us our faults, our tantrums, our fears -# Gently strive to be better than we -# Know that we tried, we cared, we strived, we loved diff --git a/autogpt/plugins.py b/autogpt/plugins.py new file mode 100644 index 00000000..dbf37017 --- /dev/null +++ b/autogpt/plugins.py @@ -0,0 +1,268 @@ +"""Handles loading of plugins.""" + +import importlib +import json +import os +import zipfile +from pathlib import Path +from typing import List, Optional, Tuple +from urllib.parse import urlparse +from zipimport import zipimporter + +import openapi_python_client +import requests +from auto_gpt_plugin_template import AutoGPTPluginTemplate +from openapi_python_client.cli import Config as OpenAPIConfig + +from autogpt.config import Config +from autogpt.logs import logger +from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin + + +def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]: + """ + Inspect a zipfile for a modules. + + Args: + zip_path (str): Path to the zipfile. + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + list[str]: The list of module names found or empty list if none were found. + """ + result = [] + with zipfile.ZipFile(zip_path, "r") as zfile: + for name in zfile.namelist(): + if name.endswith("__init__.py") and not name.startswith("__MACOSX"): + logger.debug(f"Found module '{name}' in the zipfile at: {name}") + result.append(name) + if len(result) == 0: + logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") + return result + + +def write_dict_to_json_file(data: dict, file_path: str) -> None: + """ + Write a dictionary to a JSON file. + Args: + data (dict): Dictionary to write. + file_path (str): Path to the file. + """ + with open(file_path, "w") as file: + json.dump(data, file, indent=4) + + +def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict: + """ + Fetch the manifest for a list of OpenAI plugins. + Args: + urls (List): List of URLs to fetch. + Returns: + dict: per url dictionary of manifest and spec. + """ + # TODO add directory scan + manifests = {} + for url in cfg.plugins_openai: + openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}" + create_directory_if_not_exists(openai_plugin_client_dir) + if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"): + try: + response = requests.get(f"{url}/.well-known/ai-plugin.json") + if response.status_code == 200: + manifest = response.json() + if manifest["schema_version"] != "v1": + logger.warn( + f"Unsupported manifest version: {manifest['schem_version']} for {url}" + ) + continue + if manifest["api"]["type"] != "openapi": + logger.warn( + f"Unsupported API type: {manifest['api']['type']} for {url}" + ) + continue + write_dict_to_json_file( + manifest, f"{openai_plugin_client_dir}/ai-plugin.json" + ) + else: + logger.warn( + f"Failed to fetch manifest for {url}: {response.status_code}" + ) + except requests.exceptions.RequestException as e: + logger.warn(f"Error while requesting manifest from {url}: {e}") + else: + logger.info(f"Manifest for {url} already exists") + manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json")) + if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"): + openapi_spec = openapi_python_client._get_document( + url=manifest["api"]["url"], path=None, timeout=5 + ) + write_dict_to_json_file( + openapi_spec, f"{openai_plugin_client_dir}/openapi.json" + ) + else: + logger.info(f"OpenAPI spec for {url} already exists") + openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json")) + manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec} + return manifests + + +def create_directory_if_not_exists(directory_path: str) -> bool: + """ + Create a directory if it does not exist. + Args: + directory_path (str): Path to the directory. + Returns: + bool: True if the directory was created, else False. + """ + if not os.path.exists(directory_path): + try: + os.makedirs(directory_path) + logger.debug(f"Created directory: {directory_path}") + return True + except OSError as e: + logger.warn(f"Error creating directory {directory_path}: {e}") + return False + else: + logger.info(f"Directory {directory_path} already exists") + return True + + +def initialize_openai_plugins( + manifests_specs: dict, cfg: Config, debug: bool = False +) -> dict: + """ + Initialize OpenAI plugins. + Args: + manifests_specs (dict): per url dictionary of manifest and spec. + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + dict: per url dictionary of manifest, spec and client. + """ + openai_plugins_dir = f"{cfg.plugins_dir}/openai" + if create_directory_if_not_exists(openai_plugins_dir): + for url, manifest_spec in manifests_specs.items(): + openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}" + _meta_option = (openapi_python_client.MetaType.SETUP,) + _config = OpenAPIConfig( + **{ + "project_name_override": "client", + "package_name_override": "client", + } + ) + prev_cwd = Path.cwd() + os.chdir(openai_plugin_client_dir) + Path("ai-plugin.json") + if not os.path.exists("client"): + client_results = openapi_python_client.create_new_client( + url=manifest_spec["manifest"]["api"]["url"], + path=None, + meta=_meta_option, + config=_config, + ) + if client_results: + logger.warn( + f"Error creating OpenAPI client: {client_results[0].header} \n" + f" details: {client_results[0].detail}" + ) + continue + spec = importlib.util.spec_from_file_location( + "client", "client/client/client.py" + ) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + client = module.Client(base_url=url) + os.chdir(prev_cwd) + manifest_spec["client"] = client + return manifests_specs + + +def instantiate_openai_plugin_clients( + manifests_specs_clients: dict, cfg: Config, debug: bool = False +) -> dict: + """ + Instantiates BaseOpenAIPlugin instances for each OpenAI plugin. + Args: + manifests_specs_clients (dict): per url dictionary of manifest, spec and client. + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + plugins (dict): per url dictionary of BaseOpenAIPlugin instances. + + """ + plugins = {} + for url, manifest_spec_client in manifests_specs_clients.items(): + plugins[url] = BaseOpenAIPlugin(manifest_spec_client) + return plugins + + +def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]: + """Scan the plugins directory for plugins and loads them. + + Args: + cfg (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + List[Tuple[str, Path]]: List of plugins. + """ + loaded_plugins = [] + # Generic plugins + plugins_path_path = Path(cfg.plugins_dir) + for plugin in plugins_path_path.glob("*.zip"): + if moduleList := inspect_zip_for_modules(str(plugin), debug): + for module in moduleList: + plugin = Path(plugin) + module = Path(module) + logger.debug(f"Plugin: {plugin} Module: {module}") + zipped_package = zipimporter(str(plugin)) + zipped_module = zipped_package.load_module(str(module.parent)) + for key in dir(zipped_module): + if key.startswith("__"): + continue + a_module = getattr(zipped_module, key) + a_keys = dir(a_module) + if ( + "_abc_impl" in a_keys + and a_module.__name__ != "AutoGPTPluginTemplate" + and denylist_allowlist_check(a_module.__name__, cfg) + ): + loaded_plugins.append(a_module()) + # OpenAI plugins + if cfg.plugins_openai: + manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg) + if manifests_specs.keys(): + manifests_specs_clients = initialize_openai_plugins( + manifests_specs, cfg, debug + ) + for url, openai_plugin_meta in manifests_specs_clients.items(): + if denylist_allowlist_check(url, cfg): + plugin = BaseOpenAIPlugin(openai_plugin_meta) + loaded_plugins.append(plugin) + + if loaded_plugins: + logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------") + for plugin in loaded_plugins: + logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}") + return loaded_plugins + + +def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool: + """Check if the plugin is in the allowlist or denylist. + + Args: + plugin_name (str): Name of the plugin. + cfg (Config): Config object. + + Returns: + True or False + """ + if plugin_name in cfg.plugins_denylist: + return False + if plugin_name in cfg.plugins_allowlist: + return True + ack = input( + f"WARNING: Plugin {plugin_name} found. But not in the" + f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): " + ) + return ack.lower() == cfg.authorise_key diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 2122f0f0..9391e0a0 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -4,13 +4,12 @@ from typing import Dict, Generator, Optional import spacy from selenium.webdriver.remote.webdriver import WebDriver -from autogpt import token_counter from autogpt.config import Config -from autogpt.llm_utils import create_chat_completion +from autogpt.llm import count_message_tokens, create_chat_completion +from autogpt.logs import logger from autogpt.memory import get_memory CFG = Config() -MEMORY = get_memory(CFG) def split_text( @@ -45,7 +44,7 @@ def split_text( ] expected_token_usage = ( - token_usage_of_chunk(messages=message_with_additional_sentence, model=model) + count_message_tokens(messages=message_with_additional_sentence, model=model) + 1 ) if expected_token_usage <= max_length: @@ -57,7 +56,7 @@ def split_text( create_message(" ".join(current_chunk), question) ] expected_token_usage = ( - token_usage_of_chunk(messages=message_this_sentence_only, model=model) + count_message_tokens(messages=message_this_sentence_only, model=model) + 1 ) if expected_token_usage > max_length: @@ -69,10 +68,6 @@ def split_text( yield " ".join(current_chunk) -def token_usage_of_chunk(messages, model): - return token_counter.count_message_tokens(messages, model) - - def summarize_text( url: str, text: str, question: str, driver: Optional[WebDriver] = None ) -> str: @@ -92,7 +87,7 @@ def summarize_text( model = CFG.fast_llm_model text_length = len(text) - print(f"Text length: {text_length} characters") + logger.info(f"Text length: {text_length} characters") summaries = [] chunks = list( @@ -105,15 +100,16 @@ def summarize_text( for i, chunk in enumerate(chunks): if driver: scroll_to_percentage(driver, scroll_ratio * i) - print(f"Adding chunk {i + 1} / {len(chunks)} to memory") + logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory") memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}" - MEMORY.add(memory_to_add) + memory = get_memory(CFG) + memory.add(memory_to_add) messages = [create_message(chunk, question)] - tokens_for_chunk = token_counter.count_message_tokens(messages, model) - print( + tokens_for_chunk = count_message_tokens(messages, model) + logger.info( f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens" ) @@ -122,15 +118,15 @@ def summarize_text( messages=messages, ) summaries.append(summary) - print( + logger.info( f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters" ) memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}" - MEMORY.add(memory_to_add) + memory.add(memory_to_add) - print(f"Summarized {len(chunks)} chunks.") + logger.info(f"Summarized {len(chunks)} chunks.") combined_summary = "\n".join(summaries) messages = [create_message(combined_summary, question)] diff --git a/autogpt/prompt.py b/autogpt/prompt.py deleted file mode 100644 index 08754605..00000000 --- a/autogpt/prompt.py +++ /dev/null @@ -1,203 +0,0 @@ -from colorama import Fore - -from autogpt.config import Config -from autogpt.config.ai_config import AIConfig -from autogpt.config.config import Config -from autogpt.logs import logger -from autogpt.promptgenerator import PromptGenerator -from autogpt.setup import prompt_user -from autogpt.utils import clean_input - -CFG = Config() - - -def get_prompt() -> str: - """ - This function generates a prompt string that includes various constraints, - commands, resources, and performance evaluations. - - Returns: - str: The generated prompt string. - """ - - # Initialize the Config object - cfg = Config() - - # Initialize the PromptGenerator object - prompt_generator = PromptGenerator() - - # Add constraints to the PromptGenerator object - prompt_generator.add_constraint( - "~4000 word limit for short term memory. Your short term memory is short, so" - " immediately save important information to files." - ) - prompt_generator.add_constraint( - "If you are unsure how you previously did something or want to recall past" - " events, thinking about similar events will help you remember." - ) - prompt_generator.add_constraint("No user assistance") - prompt_generator.add_constraint( - 'Exclusively use the commands listed in double quotes e.g. "command name"' - ) - prompt_generator.add_constraint( - "Use subprocesses for commands that will not terminate within a few minutes" - ) - - # Define the command list - commands = [ - ("Google Search", "google", {"input": ""}), - ( - "Browse Website", - "browse_website", - {"url": "", "question": ""}, - ), - ( - "Start GPT Agent", - "start_agent", - {"name": "", "task": "", "prompt": ""}, - ), - ( - "Message GPT Agent", - "message_agent", - {"key": "", "message": ""}, - ), - ("List GPT Agents", "list_agents", {}), - ("Delete GPT Agent", "delete_agent", {"key": ""}), - ( - "Clone Repository", - "clone_repository", - {"repository_url": "", "clone_path": ""}, - ), - ("Write to file", "write_to_file", {"file": "", "text": ""}), - ("Read file", "read_file", {"file": ""}), - ("Append to file", "append_to_file", {"file": "", "text": ""}), - ("Delete file", "delete_file", {"file": ""}), - ("Search Files", "search_files", {"directory": ""}), - ("Analyze Code", "analyze_code", {"code": ""}), - ( - "Get Improved Code", - "improve_code", - {"suggestions": "", "code": ""}, - ), - ( - "Write Tests", - "write_tests", - {"code": "", "focus": ""}, - ), - ("Execute Python File", "execute_python_file", {"file": ""}), - ("Generate Image", "generate_image", {"prompt": ""}), - ("Send Tweet", "send_tweet", {"text": ""}), - ] - - # Only add the audio to text command if the model is specified - if cfg.huggingface_audio_to_text_model: - commands.append( - ("Convert Audio to text", "read_audio_from_file", {"file": ""}), - ) - - # Only add shell command to the prompt if the AI is allowed to execute it - if cfg.execute_local_commands: - commands.append( - ( - "Execute Shell Command, non-interactive commands only", - "execute_shell", - {"command_line": ""}, - ), - ) - commands.append( - ( - "Execute Shell Command Popen, non-interactive commands only", - "execute_shell_popen", - {"command_line": ""}, - ), - ) - - # Only add the download file command if the AI is allowed to execute it - if cfg.allow_downloads: - commands.append( - ( - "Downloads a file from the internet, and stores it locally", - "download_file", - {"url": "", "file": ""}, - ), - ) - - # Add these command last. - commands.append( - ("Do Nothing", "do_nothing", {}), - ) - commands.append( - ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), - ) - - # Add commands to the PromptGenerator object - for command_label, command_name, args in commands: - prompt_generator.add_command(command_label, command_name, args) - - # Add resources to the PromptGenerator object - prompt_generator.add_resource( - "Internet access for searches and information gathering." - ) - prompt_generator.add_resource("Long Term memory management.") - prompt_generator.add_resource( - "GPT-3.5 powered Agents for delegation of simple tasks." - ) - prompt_generator.add_resource("File output.") - - # Add performance evaluations to the PromptGenerator object - prompt_generator.add_performance_evaluation( - "Continuously review and analyze your actions to ensure you are performing to" - " the best of your abilities." - ) - prompt_generator.add_performance_evaluation( - "Constructively self-criticize your big-picture behavior constantly." - ) - prompt_generator.add_performance_evaluation( - "Reflect on past decisions and strategies to refine your approach." - ) - prompt_generator.add_performance_evaluation( - "Every command has a cost, so be smart and efficient. Aim to complete tasks in" - " the least number of steps." - ) - - # Generate the prompt string - return prompt_generator.generate_prompt_string() - - -def construct_prompt() -> str: - """Construct the prompt for the AI to respond to - - Returns: - str: The prompt string - """ - config = AIConfig.load(CFG.ai_settings_file) - if CFG.skip_reprompt and config.ai_name: - logger.typewriter_log("Name :", Fore.GREEN, config.ai_name) - logger.typewriter_log("Role :", Fore.GREEN, config.ai_role) - logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}") - elif config.ai_name: - logger.typewriter_log( - "Welcome back! ", - Fore.GREEN, - f"Would you like me to return to being {config.ai_name}?", - speak_text=True, - ) - should_continue = clean_input( - f"""Continue with the last settings? -Name: {config.ai_name} -Role: {config.ai_role} -Goals: {config.ai_goals} -Continue (y/n): """ - ) - if should_continue.lower() == "n": - config = AIConfig() - - if not config.ai_name: - config = prompt_user() - config.save(CFG.ai_settings_file) - - # Get rid of this global: - global ai_name - ai_name = config.ai_name - - return config.construct_full_prompt() diff --git a/autogpt/prompts/__init__.py b/autogpt/prompts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/promptgenerator.py b/autogpt/prompts/generator.py similarity index 79% rename from autogpt/promptgenerator.py rename to autogpt/prompts/generator.py index 0ad7046a..2ae9123b 100644 --- a/autogpt/promptgenerator.py +++ b/autogpt/prompts/generator.py @@ -1,8 +1,6 @@ """ A module for generating custom prompt strings.""" -from __future__ import annotations - import json -from typing import Any +from typing import Any, Callable, Dict, List, Optional class PromptGenerator: @@ -20,6 +18,10 @@ class PromptGenerator: self.commands = [] self.resources = [] self.performance_evaluation = [] + self.goals = [] + self.command_registry = None + self.name = "Bob" + self.role = "AI" self.response_format = { "thoughts": { "text": "thought", @@ -40,7 +42,13 @@ class PromptGenerator: """ self.constraints.append(constraint) - def add_command(self, command_label: str, command_name: str, args=None) -> None: + def add_command( + self, + command_label: str, + command_name: str, + args=None, + function: Optional[Callable] = None, + ) -> None: """ Add a command to the commands list with a label, name, and optional arguments. @@ -49,6 +57,8 @@ class PromptGenerator: command_name (str): The name of the command. args (dict, optional): A dictionary containing argument names and their values. Defaults to None. + function (callable, optional): A callable function to be called when + the command is executed. Defaults to None. """ if args is None: args = {} @@ -59,11 +69,12 @@ class PromptGenerator: "label": command_label, "name": command_name, "args": command_args, + "function": function, } self.commands.append(command) - def _generate_command_string(self, command: dict[str, Any]) -> str: + def _generate_command_string(self, command: Dict[str, Any]) -> str: """ Generate a formatted string representation of a command. @@ -96,7 +107,7 @@ class PromptGenerator: """ self.performance_evaluation.append(evaluation) - def _generate_numbered_list(self, items: list[Any], item_type="list") -> str: + def _generate_numbered_list(self, items: List[Any], item_type="list") -> str: """ Generate a numbered list from given items based on the item_type. @@ -109,10 +120,16 @@ class PromptGenerator: str: The formatted numbered list. """ if item_type == "command": - return "\n".join( - f"{i+1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ) + command_strings = [] + if self.command_registry: + command_strings += [ + str(item) + for item in self.command_registry.commands.values() + if item.enabled + ] + # terminate command is added manually + command_strings += [self._generate_command_string(item) for item in items] + return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings)) else: return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py new file mode 100644 index 00000000..7a53f603 --- /dev/null +++ b/autogpt/prompts/prompt.py @@ -0,0 +1,142 @@ +from colorama import Fore + +from autogpt.config.ai_config import AIConfig +from autogpt.config.config import Config +from autogpt.llm import ApiManager +from autogpt.logs import logger +from autogpt.prompts.generator import PromptGenerator +from autogpt.setup import prompt_user +from autogpt.utils import clean_input + +CFG = Config() + +DEFAULT_TRIGGERING_PROMPT = ( + "Determine which next command to use, and respond using the format specified above:" +) + + +def build_default_prompt_generator() -> PromptGenerator: + """ + This function generates a prompt string that includes various constraints, + commands, resources, and performance evaluations. + + Returns: + str: The generated prompt string. + """ + + # Initialize the PromptGenerator object + prompt_generator = PromptGenerator() + + # Add constraints to the PromptGenerator object + prompt_generator.add_constraint( + "~4000 word limit for short term memory. Your short term memory is short, so" + " immediately save important information to files." + ) + prompt_generator.add_constraint( + "If you are unsure how you previously did something or want to recall past" + " events, thinking about similar events will help you remember." + ) + prompt_generator.add_constraint("No user assistance") + prompt_generator.add_constraint( + 'Exclusively use the commands listed in double quotes e.g. "command name"' + ) + + # Define the command list + commands = [ + ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), + ] + + # Add commands to the PromptGenerator object + for command_label, command_name, args in commands: + prompt_generator.add_command(command_label, command_name, args) + + # Add resources to the PromptGenerator object + prompt_generator.add_resource( + "Internet access for searches and information gathering." + ) + prompt_generator.add_resource("Long Term memory management.") + prompt_generator.add_resource( + "GPT-3.5 powered Agents for delegation of simple tasks." + ) + prompt_generator.add_resource("File output.") + + # Add performance evaluations to the PromptGenerator object + prompt_generator.add_performance_evaluation( + "Continuously review and analyze your actions to ensure you are performing to" + " the best of your abilities." + ) + prompt_generator.add_performance_evaluation( + "Constructively self-criticize your big-picture behavior constantly." + ) + prompt_generator.add_performance_evaluation( + "Reflect on past decisions and strategies to refine your approach." + ) + prompt_generator.add_performance_evaluation( + "Every command has a cost, so be smart and efficient. Aim to complete tasks in" + " the least number of steps." + ) + prompt_generator.add_performance_evaluation("Write all code to a file.") + return prompt_generator + + +def construct_main_ai_config() -> AIConfig: + """Construct the prompt for the AI to respond to + + Returns: + str: The prompt string + """ + config = AIConfig.load(CFG.ai_settings_file) + if CFG.skip_reprompt and config.ai_name: + logger.typewriter_log("Name :", Fore.GREEN, config.ai_name) + logger.typewriter_log("Role :", Fore.GREEN, config.ai_role) + logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}") + logger.typewriter_log( + "API Budget:", + Fore.GREEN, + "infinite" if config.api_budget <= 0 else f"${config.api_budget}", + ) + elif config.ai_name: + logger.typewriter_log( + "Welcome back! ", + Fore.GREEN, + f"Would you like me to return to being {config.ai_name}?", + speak_text=True, + ) + should_continue = clean_input( + f"""Continue with the last settings? +Name: {config.ai_name} +Role: {config.ai_role} +Goals: {config.ai_goals} +API Budget: {"infinite" if config.api_budget <= 0 else f"${config.api_budget}"} +Continue ({CFG.authorise_key}/{CFG.exit_key}): """ + ) + if should_continue.lower() == CFG.exit_key: + config = AIConfig() + + if not config.ai_name: + config = prompt_user() + config.save(CFG.ai_settings_file) + + # set the total api budget + api_manager = ApiManager() + api_manager.set_total_budget(config.api_budget) + + # Agent Created, print message + logger.typewriter_log( + config.ai_name, + Fore.LIGHTBLUE_EX, + "has been created with the following details:", + speak_text=True, + ) + + # Print the ai config details + # Name + logger.typewriter_log("Name:", Fore.GREEN, config.ai_name, speak_text=False) + # Role + logger.typewriter_log("Role:", Fore.GREEN, config.ai_role, speak_text=False) + # Goals + logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False) + for goal in config.ai_goals: + logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False) + + return config diff --git a/autogpt/setup.py b/autogpt/setup.py index bfa68201..c71fb221 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -1,18 +1,26 @@ """Set up the AI and its goals""" +import re + from colorama import Fore, Style from autogpt import utils +from autogpt.config import Config from autogpt.config.ai_config import AIConfig +from autogpt.llm import create_chat_completion from autogpt.logs import logger +CFG = Config() + def prompt_user() -> AIConfig: """Prompt the user for input Returns: - AIConfig: The AIConfig object containing the user's input + AIConfig: The AIConfig object tailored to the user's input """ ai_name = "" + ai_config = None + # Construct the prompt logger.typewriter_log( "Welcome to Auto-GPT! ", @@ -21,6 +29,57 @@ def prompt_user() -> AIConfig: speak_text=True, ) + # Get user desire + logger.typewriter_log( + "Create an AI-Assistant:", + Fore.GREEN, + "input '--manual' to enter manual mode.", + speak_text=True, + ) + + user_desire = utils.clean_input( + f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: " + ) + + if user_desire == "": + user_desire = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt + + # If user desire contains "--manual" + if "--manual" in user_desire: + logger.typewriter_log( + "Manual Mode Selected", + Fore.GREEN, + speak_text=True, + ) + return generate_aiconfig_manual() + + else: + try: + return generate_aiconfig_automatic(user_desire) + except Exception as e: + logger.typewriter_log( + "Unable to automatically generate AI Config based on user desire.", + Fore.RED, + "Falling back to manual mode.", + speak_text=True, + ) + + return generate_aiconfig_manual() + + +def generate_aiconfig_manual() -> AIConfig: + """ + Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI. + + This function guides the user through a series of prompts to collect the necessary information to create + an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five + goals. If the user does not provide a value for any of the fields, default values will be used. + + Returns: + AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals. + """ + + # Manual Setup Intro logger.typewriter_log( "Create an AI-Assistant:", Fore.GREEN, @@ -60,7 +119,7 @@ def prompt_user() -> AIConfig: "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage" " multiple businesses autonomously'", ) - print("Enter nothing to load defaults, enter nothing when finished.", flush=True) + logger.info("Enter nothing to load defaults, enter nothing when finished.") ai_goals = [] for i in range(5): ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ") @@ -74,4 +133,86 @@ def prompt_user() -> AIConfig: "Develop and manage multiple businesses autonomously", ] - return AIConfig(ai_name, ai_role, ai_goals) + # Get API Budget from User + logger.typewriter_log( + "Enter your budget for API calls: ", + Fore.GREEN, + "For example: $1.50", + ) + logger.info("Enter nothing to let the AI run without monetary limit") + api_budget_input = utils.clean_input( + f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $" + ) + if api_budget_input == "": + api_budget = 0.0 + else: + try: + api_budget = float(api_budget_input.replace("$", "")) + except ValueError: + logger.typewriter_log( + "Invalid budget input. Setting budget to unlimited.", Fore.RED + ) + api_budget = 0.0 + + return AIConfig(ai_name, ai_role, ai_goals, api_budget) + + +def generate_aiconfig_automatic(user_prompt) -> AIConfig: + """Generates an AIConfig object from the given string. + + Returns: + AIConfig: The AIConfig object tailored to the user's input + """ + + system_prompt = """ +Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task. + +The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation. + +Example input: +Help me with marketing my business + +Example output: +Name: CMOGPT +Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more. +Goals: +- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer. + +- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations. + +- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment. + +- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track. +""" + + # Call LLM with the string as user input + messages = [ + { + "role": "system", + "content": system_prompt, + }, + { + "role": "user", + "content": f"Task: '{user_prompt}'\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n", + }, + ] + output = create_chat_completion(messages, CFG.fast_llm_model) + + # Debug LLM Output + logger.debug(f"AI Config Generator Raw Output: {output}") + + # Parse the output + ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1) + ai_role = ( + re.search( + r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)", + output, + re.IGNORECASE | re.DOTALL, + ) + .group(1) + .strip() + ) + ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output) + api_budget = 0.0 # TODO: parse api budget using a regular expression + + return AIConfig(ai_name, ai_role, ai_goals, api_budget) diff --git a/autogpt/config/singleton.py b/autogpt/singleton.py similarity index 100% rename from autogpt/config/singleton.py rename to autogpt/singleton.py diff --git a/autogpt/speech/base.py b/autogpt/speech/base.py index d74fa51b..7adcc37d 100644 --- a/autogpt/speech/base.py +++ b/autogpt/speech/base.py @@ -2,7 +2,7 @@ import abc from threading import Lock -from autogpt.config import AbstractSingleton +from autogpt.singleton import AbstractSingleton class VoiceBase(AbstractSingleton): diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py index 821fdf2f..ffa4e51e 100644 --- a/autogpt/speech/brian.py +++ b/autogpt/speech/brian.py @@ -1,4 +1,4 @@ -""" Brian speech module for autogpt """ +import logging import os import requests @@ -35,6 +35,9 @@ class BrianSpeech(VoiceBase): os.remove("speech.mp3") return True else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) + logging.error( + "Request failed with status code: %s, response content: %s", + response.status_code, + response.content, + ) return False diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py index ea84efd8..06806b56 100644 --- a/autogpt/speech/eleven_labs.py +++ b/autogpt/speech/eleven_labs.py @@ -69,6 +69,8 @@ class ElevenLabsSpeech(VoiceBase): Returns: bool: True if the request was successful, False otherwise """ + from autogpt.logs import logger + tts_url = ( f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}" ) @@ -81,6 +83,6 @@ class ElevenLabsSpeech(VoiceBase): os.remove("speech.mpeg") return True else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) + logger.warn("Request failed with status code:", response.status_code) + logger.info("Response content:", response.content) return False diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py index 727983d1..917fa65c 100644 --- a/autogpt/speech/say.py +++ b/autogpt/speech/say.py @@ -3,39 +3,44 @@ import threading from threading import Semaphore from autogpt.config import Config +from autogpt.speech.base import VoiceBase from autogpt.speech.brian import BrianSpeech from autogpt.speech.eleven_labs import ElevenLabsSpeech from autogpt.speech.gtts import GTTSVoice from autogpt.speech.macos_tts import MacOSTTS -CFG = Config() -DEFAULT_VOICE_ENGINE = GTTSVoice() -VOICE_ENGINE = None -if CFG.elevenlabs_api_key: - VOICE_ENGINE = ElevenLabsSpeech() -elif CFG.use_mac_os_tts == "True": - VOICE_ENGINE = MacOSTTS() -elif CFG.use_brian_tts == "True": - VOICE_ENGINE = BrianSpeech() -else: - VOICE_ENGINE = GTTSVoice() - - -QUEUE_SEMAPHORE = Semaphore( +_QUEUE_SEMAPHORE = Semaphore( 1 ) # The amount of sounds to queue before blocking the main thread def say_text(text: str, voice_index: int = 0) -> None: """Speak the given text using the given voice index""" + cfg = Config() + default_voice_engine, voice_engine = _get_voice_engine(cfg) def speak() -> None: - success = VOICE_ENGINE.say(text, voice_index) + success = voice_engine.say(text, voice_index) if not success: - DEFAULT_VOICE_ENGINE.say(text) + default_voice_engine.say(text) - QUEUE_SEMAPHORE.release() + _QUEUE_SEMAPHORE.release() - QUEUE_SEMAPHORE.acquire(True) + _QUEUE_SEMAPHORE.acquire(True) thread = threading.Thread(target=speak) thread.start() + + +def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]: + """Get the voice engine to use for the given configuration""" + default_voice_engine = GTTSVoice() + if config.elevenlabs_api_key: + voice_engine = ElevenLabsSpeech() + elif config.use_mac_os_tts == "True": + voice_engine = MacOSTTS() + elif config.use_brian_tts == "True": + voice_engine = BrianSpeech() + else: + voice_engine = GTTSVoice() + + return default_voice_engine, voice_engine diff --git a/autogpt/spinner.py b/autogpt/spinner.py index 4e33d742..ed02eb44 100644 --- a/autogpt/spinner.py +++ b/autogpt/spinner.py @@ -54,8 +54,8 @@ class Spinner: def update_message(self, new_message, delay=0.1): """Update the spinner message Args: - new_message (str): New message to display - delay: Delay in seconds before updating the message + new_message (str): New message to display. + delay (float): The delay in seconds between each spinner update. """ time.sleep(delay) sys.stdout.write( diff --git a/autogpt/url_utils/__init__.py b/autogpt/url_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/url_utils/validators.py b/autogpt/url_utils/validators.py new file mode 100644 index 00000000..c85a00ba --- /dev/null +++ b/autogpt/url_utils/validators.py @@ -0,0 +1,103 @@ +import functools +from typing import Any, Callable +from urllib.parse import urljoin, urlparse + +from requests.compat import urljoin + + +def validate_url(func: Callable[..., Any]) -> Any: + """The method decorator validate_url is used to validate urls for any command that requires + a url as an arugment""" + + @functools.wraps(func) + def wrapper(url: str, *args, **kwargs) -> Any: + """Check if the URL is valid using a basic check, urllib check, and local file check + + Args: + url (str): The URL to check + + Returns: + the result of the wrapped function + + Raises: + ValueError if the url fails any of the validation tests + """ + # Most basic check if the URL is valid: + if not url.startswith("http://") and not url.startswith("https://"): + raise ValueError("Invalid URL format") + if not is_valid_url(url): + raise ValueError("Missing Scheme or Network location") + # Restrict access to local files + if check_local_file_access(url): + raise ValueError("Access to local files is restricted") + + return func(sanitize_url(url), *args, **kwargs) + + return wrapper + + +def is_valid_url(url: str) -> bool: + """Check if the URL is valid + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is valid, False otherwise + """ + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except ValueError: + return False + + +def sanitize_url(url: str) -> str: + """Sanitize the URL + + Args: + url (str): The URL to sanitize + + Returns: + str: The sanitized URL + """ + parsed_url = urlparse(url) + reconstructed_url = f"{parsed_url.path}{parsed_url.params}?{parsed_url.query}" + return urljoin(url, reconstructed_url) + + +def check_local_file_access(url: str) -> bool: + """Check if the URL is a local file + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is a local file, False otherwise + """ + local_prefixes = [ + "file:///", + "file://localhost/", + "file://localhost", + "http://localhost", + "http://localhost/", + "https://localhost", + "https://localhost/", + "http://2130706433", + "http://2130706433/", + "https://2130706433", + "https://2130706433/", + "http://127.0.0.1/", + "http://127.0.0.1", + "https://127.0.0.1/", + "https://127.0.0.1", + "https://0.0.0.0/", + "https://0.0.0.0", + "http://0.0.0.0/", + "http://0.0.0.0", + "http://0000", + "http://0000/", + "https://0000", + "https://0000/", + ] + return any(url.startswith(prefix) for prefix in local_prefixes) diff --git a/autogpt/utils.py b/autogpt/utils.py index e93d5ac7..112a1508 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -1,17 +1,60 @@ import os +import re import requests import yaml -from colorama import Fore -from git import Repo +from colorama import Fore, Style +from git.repo import Repo + +from autogpt.logs import logger + +# Use readline if available (for clean_input) +try: + import readline +except ImportError: + pass + +from autogpt.config import Config -def clean_input(prompt: str = ""): +def clean_input(prompt: str = "", talk=False): try: - return input(prompt) + cfg = Config() + if cfg.chat_messages_enabled: + for plugin in cfg.plugins: + if not hasattr(plugin, "can_handle_user_input"): + continue + if not plugin.can_handle_user_input(user_input=prompt): + continue + plugin_response = plugin.user_input(user_input=prompt) + if not plugin_response: + continue + if plugin_response.lower() in [ + "yes", + "yeah", + "y", + "ok", + "okay", + "sure", + "alright", + ]: + return cfg.authorise_key + elif plugin_response.lower() in [ + "no", + "nope", + "n", + "negative", + ]: + return cfg.exit_key + return plugin_response + + # ask for input, default when just pressing Enter is y + logger.info("Asking user via keyboard...") + answer = input(prompt) + return answer except KeyboardInterrupt: - print("You interrupted Auto-GPT") - print("Quitting...") + logger.info("You interrupted Auto-GPT") + logger.info("Quitting...") exit(0) @@ -43,15 +86,17 @@ def readable_file_size(size, decimal_places=2): return f"{size:.{decimal_places}f} {unit}" -def get_bulletin_from_web() -> str: +def get_bulletin_from_web(): try: response = requests.get( "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" ) if response.status_code == 200: return response.text - except: - return "" + except requests.exceptions.RequestException: + pass + + return "" def get_current_git_branch() -> str: @@ -63,15 +108,46 @@ def get_current_git_branch() -> str: return "" -def get_latest_bulletin() -> str: +def get_latest_bulletin() -> tuple[str, bool]: exists = os.path.exists("CURRENT_BULLETIN.md") current_bulletin = "" if exists: current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read() new_bulletin = get_bulletin_from_web() - is_new_news = new_bulletin != current_bulletin + is_new_news = new_bulletin != "" and new_bulletin != current_bulletin + + news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n" + if new_bulletin or current_bulletin: + news_header += ( + "Below you'll find the latest Auto-GPT News and updates regarding features!\n" + "If you don't wish to see this message, you " + "can run Auto-GPT with the *--skip-news* flag.\n" + ) if new_bulletin and is_new_news: open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) - return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}" - return current_bulletin + current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}" + + return f"{news_header}\n{current_bulletin}", is_new_news + + +def markdown_to_ansi_style(markdown: str): + ansi_lines: list[str] = [] + for line in markdown.split("\n"): + line_style = "" + + if line.startswith("# "): + line_style += Style.BRIGHT + else: + line = re.sub( + r"(? Path: - """Get full path for item in workspace - - Parameters: - relative_path (str | Path): Path to translate into the workspace - - Returns: - Path: Absolute path for the given path in the workspace - """ - return safe_path_join(WORKSPACE_PATH, relative_path) - - -def safe_path_join(base: Path, *paths: str | Path) -> Path: - """Join one or more path components, asserting the resulting path is within the workspace. - - Args: - base (Path): The base path - *paths (str): The paths to join to the base path - - Returns: - Path: The joined path - """ - base = base.resolve() - joined_path = base.joinpath(*paths).resolve() - - if CFG.restrict_to_workspace and not joined_path.is_relative_to(base): - raise ValueError( - f"Attempted to access path '{joined_path}' outside of workspace '{base}'." - ) - - return joined_path diff --git a/autogpt/workspace/__init__.py b/autogpt/workspace/__init__.py new file mode 100644 index 00000000..b348144b --- /dev/null +++ b/autogpt/workspace/__init__.py @@ -0,0 +1,5 @@ +from autogpt.workspace.workspace import Workspace + +__all__ = [ + "Workspace", +] diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py new file mode 100644 index 00000000..c1050230 --- /dev/null +++ b/autogpt/workspace/workspace.py @@ -0,0 +1,137 @@ +""" +========= +Workspace +========= + +The workspace is a directory containing configuration and working files for an AutoGPT +agent. + +""" +from __future__ import annotations + +from pathlib import Path + +from autogpt.logs import logger + + +class Workspace: + """A class that represents a workspace for an AutoGPT agent.""" + + NULL_BYTES = ["\0", "\000", "\x00", r"\z", "\u0000", "%00"] + + def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool): + self._root = self._sanitize_path(workspace_root) + self._restrict_to_workspace = restrict_to_workspace + + @property + def root(self) -> Path: + """The root directory of the workspace.""" + return self._root + + @property + def restrict_to_workspace(self): + """Whether to restrict generated paths to the workspace.""" + return self._restrict_to_workspace + + @classmethod + def make_workspace(cls, workspace_directory: str | Path, *args, **kwargs) -> Path: + """Create a workspace directory and return the path to it. + + Parameters + ---------- + workspace_directory + The path to the workspace directory. + + Returns + ------- + Path + The path to the workspace directory. + + """ + # TODO: have this make the env file and ai settings file in the directory. + workspace_directory = cls._sanitize_path(workspace_directory) + workspace_directory.mkdir(exist_ok=True, parents=True) + return workspace_directory + + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the workspace. + + Parameters + ---------- + relative_path + The relative path to resolve in the workspace. + + Returns + ------- + Path + The resolved path relative to the workspace. + + """ + return self._sanitize_path( + relative_path, + root=self.root, + restrict_to_root=self.restrict_to_workspace, + ) + + @staticmethod + def _sanitize_path( + relative_path: str | Path, + root: str | Path = None, + restrict_to_root: bool = True, + ) -> Path: + """Resolve the relative path within the given root if possible. + + Parameters + ---------- + relative_path + The relative path to resolve. + root + The root path to resolve the relative path within. + restrict_to_root + Whether to restrict the path to the root. + + Returns + ------- + Path + The resolved path. + + Raises + ------ + ValueError + If the path is absolute and a root is provided. + ValueError + If the path is outside the root and the root is restricted. + + """ + + # Posix systems disallow null bytes in paths. Windows is agnostic about it. + # Do an explicit check here for all sorts of null byte representations. + + for null_byte in Workspace.NULL_BYTES: + if null_byte in str(relative_path) or null_byte in str(root): + raise ValueError("embedded null byte") + + if root is None: + return Path(relative_path).resolve() + + logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") + + root, relative_path = Path(root).resolve(), Path(relative_path) + + logger.debug(f"Resolved root as '{root}'") + + if relative_path.is_absolute(): + raise ValueError( + f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." + ) + + full_path = root.joinpath(relative_path).resolve() + + logger.debug(f"Joined paths as '{full_path}'") + + if restrict_to_root and not full_path.is_relative_to(root): + raise ValueError( + f"Attempted to access path '{full_path}' outside of workspace '{root}'." + ) + + return full_path diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py similarity index 97% rename from benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py rename to benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py index 9a5025d3..608e9630 100644 --- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py +++ b/benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py @@ -3,7 +3,7 @@ import subprocess import sys -def benchmark_entrepeneur_gpt_with_difficult_user(): +def benchmark_entrepreneur_gpt_with_difficult_user(): # Test case to check if the write_file command can successfully write 'Hello World' to a file # named 'hello_world.txt'. @@ -102,4 +102,4 @@ Not what I need.""" # Run the test case. if __name__ == "__main__": - benchmark_entrepeneur_gpt_with_difficult_user() + benchmark_entrepreneur_gpt_with_difficult_user() diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..15a83ad2 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,18 @@ +coverage: + status: + project: + default: + target: auto + threshold: 1% + informational: true + patch: + default: + target: 80% + +## Please add this section once you've separated your coverage uploads for unit and integration tests +# +# flags: +# unit-tests: +# carryforward: true +# integration-tests: +# carryforward: true diff --git a/data_ingestion.py b/data_ingestion.py index b89a33da..c907cedf 100644 --- a/data_ingestion.py +++ b/data_ingestion.py @@ -1,7 +1,7 @@ import argparse import logging -from autogpt.commands.file_operations import ingest_file, search_files +from autogpt.commands.file_operations import ingest_file, list_files from autogpt.config import Config from autogpt.memory import get_memory @@ -10,11 +10,13 @@ cfg = Config() def configure_logging(): logging.basicConfig( - filename="log-ingestion.txt", - filemode="a", format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s", datefmt="%H:%M:%S", level=logging.DEBUG, + handlers=[ + logging.FileHandler(filename="log-ingestion.txt", mode="a"), + logging.StreamHandler(), + ], ) return logging.getLogger("AutoGPT-Ingestion") @@ -26,12 +28,13 @@ def ingest_directory(directory, memory, args): :param directory: The directory containing the files to ingest :param memory: An object with an add() method to store the chunks in memory """ + global logger try: - files = search_files(directory) + files = list_files(directory) for file in files: ingest_file(file, memory, args.max_length, args.overlap) except Exception as e: - print(f"Error while ingesting directory '{directory}': {str(e)}") + logger.error(f"Error while ingesting directory '{directory}': {str(e)}") def main() -> None: @@ -64,29 +67,26 @@ def main() -> None: help="The max_length of each chunk when ingesting files (default: 4000)", default=4000, ) - args = parser.parse_args() # Initialize memory memory = get_memory(cfg, init=args.init) - print("Using memory of type: " + memory.__class__.__name__) + logger.debug("Using memory of type: " + memory.__class__.__name__) if args.file: try: ingest_file(args.file, memory, args.max_length, args.overlap) - print(f"File '{args.file}' ingested successfully.") + logger.info(f"File '{args.file}' ingested successfully.") except Exception as e: logger.error(f"Error while ingesting file '{args.file}': {str(e)}") - print(f"Error while ingesting file '{args.file}': {str(e)}") elif args.dir: try: ingest_directory(args.dir, memory, args) - print(f"Directory '{args.dir}' ingested successfully.") + logger.info(f"Directory '{args.dir}' ingested successfully.") except Exception as e: logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}") - print(f"Error while ingesting directory '{args.dir}': {str(e)}") else: - print( + logger.warn( "Please provide either a file path (--file) or a directory name (--dir)" " inside the auto_gpt_workspace directory as input." ) diff --git a/docker-compose.yml b/docker-compose.yml index be225d7d..4c6656dd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,9 +9,11 @@ services: build: ./ env_file: - .env + environment: + MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} + REDIS_HOST: ${REDIS_HOST:-redis} volumes: - - "./autogpt:/app" - - ".env:/app/.env" + - ./:/app profiles: ["exclude-from-up"] redis: diff --git a/docs/code-of-conduct.md b/docs/code-of-conduct.md new file mode 120000 index 00000000..0400d574 --- /dev/null +++ b/docs/code-of-conduct.md @@ -0,0 +1 @@ +../CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/docs/configuration/imagegen.md b/docs/configuration/imagegen.md new file mode 100644 index 00000000..38fdcebb --- /dev/null +++ b/docs/configuration/imagegen.md @@ -0,0 +1,59 @@ +# 🖼 Image Generation configuration + +| Config variable | Values | | +| ---------------- | ------------------------------- | -------------------- | +| `IMAGE_PROVIDER` | `dalle` `huggingface` `sdwebui` | **default: `dalle`** | + +## DALL-e + +In `.env`, make sure `IMAGE_PROVIDER` is commented (or set to `dalle`): +``` ini +# IMAGE_PROVIDER=dalle # this is the default +``` + +Further optional configuration: + +| Config variable | Values | | +| ---------------- | ------------------ | -------------- | +| `IMAGE_SIZE` | `256` `512` `1024` | default: `256` | + +## Hugging Face + +To use text-to-image models from Hugging Face, you need a Hugging Face API token. +Link to the appropriate settings page: [Hugging Face > Settings > Tokens](https://huggingface.co/settings/tokens) + +Once you have an API token, uncomment and adjust these variables in your `.env`: +``` ini +IMAGE_PROVIDER=huggingface +HUGGINGFACE_API_TOKEN=your-huggingface-api-token +``` + +Further optional configuration: + +| Config variable | Values | | +| ------------------------- | ---------------------- | ---------------------------------------- | +| `HUGGINGFACE_IMAGE_MODEL` | see [available models] | default: `CompVis/stable-diffusion-v1-4` | + +[available models]: https://huggingface.co/models?pipeline_tag=text-to-image + +## Stable Diffusion WebUI + +It is possible to use your own self-hosted Stable Diffusion WebUI with Auto-GPT: +``` ini +IMAGE_PROVIDER=sdwebui +``` + +!!! note + Make sure you are running WebUI with `--api` enabled. + +Further optional configuration: + +| Config variable | Values | | +| --------------- | ----------------------- | -------------------------------- | +| `SD_WEBUI_URL` | URL to your WebUI | default: `http://127.0.0.1:7860` | +| `SD_WEBUI_AUTH` | `{username}:{password}` | *Note: do not copy the braces!* | + +## Selenium +``` shell +sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 +``` diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md new file mode 100644 index 00000000..7d707598 --- /dev/null +++ b/docs/configuration/memory.md @@ -0,0 +1,204 @@ +## Setting Your Cache Type + +By default, Auto-GPT set up with Docker Compose will use Redis as its memory backend. +Otherwise, the default is LocalCache (which stores memory in a JSON file). + +To switch to a different backend, change the `MEMORY_BACKEND` in `.env` +to the value that you want: + +* `local` uses a local JSON cache file +* `pinecone` uses the Pinecone.io account you configured in your ENV settings +* `redis` will use the redis cache that you configured +* `milvus` will use the milvus cache that you configured +* `weaviate` will use the weaviate cache that you configured + +## Memory Backend Setup + +Links to memory backends + +- [Pinecone](https://www.pinecone.io/) +- [Milvus](https://milvus.io/) – [self-hosted](https://milvus.io/docs), or managed with [Zilliz Cloud](https://zilliz.com/) +- [Redis](https://redis.io) +- [Weaviate](https://weaviate.io) + +### Redis Setup + +!!! important + If you have set up Auto-GPT using Docker Compose, then Redis is included, no further + setup needed. + +!!! caution + This setup is not intended to be publicly accessible and lacks security measures. + Avoid exposing Redis to the internet without a password or at all! + +1. Launch Redis container + + :::shell + docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest + +3. Set the following settings in `.env` + + :::ini + MEMORY_BACKEND=redis + REDIS_HOST=localhost + REDIS_PORT=6379 + REDIS_PASSWORD= + + Replace `` by your password, omitting the angled brackets (<>). + + Optional configuration: + + - `WIPE_REDIS_ON_START=False` to persist memory stored in Redis between runs. + - `MEMORY_INDEX=` to specify a name for the memory index in Redis. + The default is `auto-gpt`. + +!!! info + See [redis-stack-server](https://hub.docker.com/r/redis/redis-stack-server) for + setting a password and additional configuration. + +### 🌲 Pinecone API Key Setup + +Pinecone lets you store vast amounts of vector-based memory, allowing the agent to load only relevant memories at any given time. + +1. Go to [pinecone](https://app.pinecone.io/) and make an account if you don't already have one. +2. Choose the `Starter` plan to avoid being charged. +3. Find your API key and region under the default project in the left sidebar. + +In the `.env` file set: + +- `PINECONE_API_KEY` +- `PINECONE_ENV` (example: `us-east4-gcp`) +- `MEMORY_BACKEND=pinecone` + +### Milvus Setup + +[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store +huge amounts of vector-based memory and provide fast relevant search. It can be quickly +deployed with docker, or as a cloud service provided by [Zilliz Cloud](https://zilliz.com/). + +1. Deploy your Milvus service, either locally using docker or with a managed Zilliz Cloud database: + - [Install and deploy Milvus locally](https://milvus.io/docs/install_standalone-operator.md) + + - Set up a managed Zilliz Cloud database + 1. Go to [Zilliz Cloud](https://zilliz.com/) and sign up if you don't already have account. + 2. In the *Databases* tab, create a new database. + - Remember your username and password + - Wait until the database status is changed to RUNNING. + 3. In the *Database detail* tab of the database you have created, the public cloud endpoint, such as: + `https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443`. + +2. Run `pip3 install pymilvus` to install the required client library. + Make sure your PyMilvus version and Milvus version are [compatible](https://github.com/milvus-io/pymilvus#compatibility) + to avoid issues. + See also the [PyMilvus installation instructions](https://github.com/milvus-io/pymilvus#installation). + +3. Update `.env`: + - `MEMORY_BACKEND=milvus` + - One of: + - `MILVUS_ADDR=host:ip` (for local instance) + - `MILVUS_ADDR=https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443` (for Zilliz Cloud) + + The following settings are **optional**: + + - `MILVUS_USERNAME='username-of-your-milvus-instance'` + - `MILVUS_PASSWORD='password-of-your-milvus-instance'` + - `MILVUS_SECURE=True` to use a secure connection. + Only use if your Milvus instance has TLS enabled. + *Note: setting `MILVUS_ADDR` to a `https://` URL will override this setting.* + - `MILVUS_COLLECTION` to change the collection name to use in Milvus. + Defaults to `autogpt`. + +### Weaviate Setup +[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store +data objects and vector embeddings from ML-models and scales seamlessly to billion of +data objects. To set up a Weaviate database, check out their [Quickstart Tutorial](https://weaviate.io/developers/weaviate/quickstart). + +Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) +is supported which allows the Auto-GPT process itself to start a Weaviate instance. +To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`. + +#### Install the Weaviate client + +Install the Weaviate client before usage. + +``` shell +$ pip install weaviate-client +``` + +#### Setting up environment variables + +In your `.env` file set the following: + +``` ini +MEMORY_BACKEND=weaviate +WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance +WEAVIATE_PORT="8080" +WEAVIATE_PROTOCOL="http" +WEAVIATE_USERNAME="your username" +WEAVIATE_PASSWORD="your password" +WEAVIATE_API_KEY="your weaviate API key if you have one" +WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" # this is optional and indicates where the data should be persisted when running an embedded instance +USE_WEAVIATE_EMBEDDED=False # set to True to run Embedded Weaviate +MEMORY_INDEX="Autogpt" # name of the index to create for the application +``` + +## View Memory Usage + +View memory usage by using the `--debug` flag :) + + +## 🧠 Memory pre-seeding +Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT. + +``` shell +$ python data_ingestion.py -h +usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] + +Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script. + +options: + -h, --help show this help message and exit + --file FILE The file to ingest. + --dir DIR The directory containing the files to ingest. + --init Init the memory and wipe its content (default: False) + --overlap OVERLAP The overlap size between chunks when ingesting files (default: 200) + --max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000) + +# python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000 +``` + +In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/autogpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000. + +Note that you can also use the `--file` argument to ingest a single file into memory and that data_ingestion.py will only ingest files within the `/auto_gpt_workspace` directory. + +The DIR path is relative to the auto_gpt_workspace directory, so `python data_ingestion.py --dir . --init` will ingest everything in `auto_gpt_workspace` directory. + +You can adjust the `max_length` and `overlap` parameters to fine-tune the way the + documents are presented to the AI when it "recall" that memory: + +- Adjusting the overlap value allows the AI to access more contextual information + from each chunk when recalling information, but will result in more chunks being + created and therefore increase memory backend usage and OpenAI API requests. +- Reducing the `max_length` value will create more chunks, which can save prompt + tokens by allowing for more message history in the context, but will also + increase the number of chunks. +- Increasing the `max_length` value will provide the AI with more contextual + information from each chunk, reducing the number of chunks created and saving on + OpenAI API requests. However, this may also use more prompt tokens and decrease + the overall context available to the AI. + +Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data +into its memory. Chunks of data are split and added to memory, allowing the AI to access +them quickly and generate more accurate responses. It's useful for large datasets or when +specific information needs to be accessed quickly. Examples include ingesting API or +GitHub documentation before running Auto-GPT. + +!!! attention + If you use Redis for memory, make sure to run Auto-GPT with `WIPE_REDIS_ON_START=False` + + For other memory backends, we currently forcefully wipe the memory when starting + Auto-GPT. To ingest data with those memory backends, you can call the + `data_ingestion.py` script anytime during an Auto-GPT run. + +Memories will be available to the AI immediately as they are ingested, even if ingested +while Auto-GPT is running. diff --git a/docs/configuration/search.md b/docs/configuration/search.md new file mode 100644 index 00000000..4640d63c --- /dev/null +++ b/docs/configuration/search.md @@ -0,0 +1,37 @@ +## 🔍 Google API Keys Configuration + +!!! note + This section is optional. Use the official Google API if search attempts return + error 429. To use the `google_official_search` command, you need to set up your + Google API key in your environment variables. + +Create your project: + +1. Go to the [Google Cloud Console](https://console.cloud.google.com/). +2. If you don't already have an account, create one and log in +3. Create a new project by clicking on the *Select a Project* dropdown at the top of the + page and clicking *New Project* +4. Give it a name and click *Create* +5. Set up a custom search API and add to your .env file: + 5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard) + 6. Click *Enable APIs and Services* + 7. Search for *Custom Search API* and click on it + 8. Click *Enable* + 9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page + 10. Click *Create Credentials* + 11. Choose *API Key* + 12. Copy the API key + 13. Set it as the `GOOGLE_API_KEY` in your `.env` file +14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) + the Custom Search API on your project. (Might need to wait few minutes to propagate.) + Set up a custom search engine and add to your .env file: + 15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page + 16. Click *Add* + 17. Set up your search engine by following the prompts. + You can choose to search the entire web or specific sites + 18. Once you've created your search engine, click on *Control Panel* + 19. Click *Basics* + 20. Copy the *Search engine ID* + 21. Set it as the `CUSTOM_SEARCH_ENGINE_ID` in your `.env` file + +_Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._ diff --git a/docs/configuration/voice.md b/docs/configuration/voice.md new file mode 100644 index 00000000..728fbaf5 --- /dev/null +++ b/docs/configuration/voice.md @@ -0,0 +1,37 @@ +# Text to Speech + +Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT + +``` shell +python -m autogpt --speak +``` + +Eleven Labs provides voice technologies such as voice design, speech synthesis, and +premade voices that Auto-GPT can use for speech. + +1. Go to [ElevenLabs](https://beta.elevenlabs.io/) and make an account if you don't + already have one. +2. Choose and setup the *Starter* plan. +3. Click the top right icon and find *Profile* to locate your API Key. + +In the `.env` file set: + +- `ELEVENLABS_API_KEY` +- `ELEVENLABS_VOICE_1_ID` (example: _"premade/Adam"_) + +### List of available voices + +!!! note + You can use either the name or the voice ID to configure a voice + +| Name | Voice ID | +| ------ | -------- | +| Rachel | `21m00Tcm4TlvDq8ikWAM` | +| Domi | `AZnzlk1XvdvUeBnXmlld` | +| Bella | `EXAVITQu4vr4xnSDxMaL` | +| Antoni | `ErXwobaYiN019PkySvjV` | +| Elli | `MF3mGyEYCl7XYWbV9V6O` | +| Josh | `TxGEqnHWrfWFTfGW9XjX` | +| Arnold | `VR6AewLTigWG4xSOukaG` | +| Adam | `pNInz6obpgDQGcFmaJgB` | +| Sam | `yoZ06aMxZJJ28mfd3POQ` | diff --git a/docs/contributing.md b/docs/contributing.md new file mode 120000 index 00000000..44fcc634 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..fb63ea14 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,5 @@ +# Auto-GPT + +Welcome to Auto-GPT. Please follow the [Installation](/setup/) guide to get started. + +It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. diff --git a/docs/plugins.md b/docs/plugins.md new file mode 100644 index 00000000..cc4a3299 --- /dev/null +++ b/docs/plugins.md @@ -0,0 +1,8 @@ +## Plugins + +⚠️💀 **WARNING** 💀⚠️: Review the code of any plugin you use thoroughly, as plugins can execute any Python code, potentially leading to malicious activities, such as stealing your API keys. + +See our [Plugins Repo](https://github.com/Significant-Gravitas/Auto-GPT-Plugins) for more info on how to install all the amazing plugins the community has built! + +Alternatively, developers can use the [Auto-GPT Plugin Template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template) as a starting point for creating your own plugins. + diff --git a/docs/setup.md b/docs/setup.md new file mode 100644 index 00000000..d0917c8b --- /dev/null +++ b/docs/setup.md @@ -0,0 +1,211 @@ +# Setting up Auto-GPT + +## 📋 Requirements + +Choose an environment to run Auto-GPT in (pick one): + + - [Docker](https://docs.docker.com/get-docker/) (*recommended*) + - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) + - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) + + +## 🗝️ Getting an API key + +Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys). + +!!! attention + To use the OpenAI API with Auto-GPT, we strongly recommend **setting up billing** + (AKA paid account). Free accounts are [limited][openai/api limits] to 3 API calls per + minute, which can cause the application to crash. + + You can set up a paid account at [Manage account > Billing > Overview](https://platform.openai.com/account/billing/overview). + +[openai/api limits]: https://platform.openai.com/docs/guides/rate-limits/overview#:~:text=Free%20trial%20users,RPM%0A40%2C000%20TPM + +!!! important + It's highly recommended that you keep keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage). + You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). + +![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./imgs/openai-api-key-billing-paid-account.png) + + +## Setting up Auto-GPT + +### Set up with Docker + +1. Make sure you have Docker installed, see [requirements](#requirements) +2. Pull the latest image from [Docker Hub] + + :::shell + docker pull significantgravitas/auto-gpt + +3. Create a folder for Auto-GPT +4. In the folder, create a file called `docker-compose.yml` with the following contents: + + :::yaml + version: "3.9" + services: + auto-gpt: + image: significantgravitas/auto-gpt + depends_on: + - redis + env_file: + - .env + environment: + MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} + REDIS_HOST: ${REDIS_HOST:-redis} + volumes: + - ./:/app + profiles: ["exclude-from-up"] + redis: + image: "redis/redis-stack-server:latest" + +5. Create the necessary [configuration](#configuration) files. If needed, you can find + templates in the [repository]. +6. Continue to [Run with Docker](#run-with-docker) + +!!! note "Docker only supports headless browsing" + Auto-GPT uses a browser in headless mode by default: `HEADLESS_BROWSER=True`. + Please do not change this setting in combination with Docker, or Auto-GPT will crash. + +[Docker Hub]: https://hub.docker.com/r/significantgravitas/auto-gpt +[repository]: https://github.com/Significant-Gravitas/Auto-GPT + + +### Set up with Git + +!!! important + Make sure you have [Git](https://git-scm.com/downloads) installed for your OS. + +!!! info "Executing commands" + To execute the given commands, open a CMD, Bash, or Powershell window. + On Windows: press ++win+x++ and pick *Terminal*, or ++win+r++ and enter `cmd` + +1. Clone the repository + + :::shell + git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git + +2. Navigate to the directory where you downloaded the repository + + :::shell + cd Auto-GPT + + +### Set up without Git/Docker + +!!! warning + We recommend to use Git or Docker, to make updating easier. + +1. Download `Source code (zip)` from the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) +2. Extract the zip-file into a folder + + +### Configuration + +1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may + be hidden by default in some operating systems due to the dot prefix. To reveal + hidden files, follow the instructions for your specific operating system: + [Windows][show hidden files/Windows], [macOS][show hidden files/macOS]. +2. Create a copy of `.env.template` and call it `.env`; + if you're already in a command prompt/terminal window: `cp .env.template .env`. +3. Open the `.env` file in a text editor. +4. Find the line that says `OPENAI_API_KEY=`. +5. After the `=`, enter your unique OpenAI API Key *without any quotes or spaces*. +6. Enter any other API keys or tokens for services you would like to use. + + !!! note + To activate and adjust a setting, remove the `# ` prefix. + +7. Save and close the `.env` file. + +!!! info "Using a GPT Azure-instance" + If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and + make an Azure configuration file: + + - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: + - `fast_llm_model_deployment_id`: your gpt-3.5-turbo or gpt-4 deployment ID + - `smart_llm_model_deployment_id`: your gpt-4 deployment ID + - `embedding_model_deployment_id`: your text-embedding-ada-002 v2 deployment ID + + Example: + + :::yaml + # Please specify all of these values as double-quoted strings + # Replace string in angled brackets (<>) to your own ID + azure_model_map: + fast_llm_model_deployment_id: "" + ... + + Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model. + If you're on Windows you may need to install an [MSVC library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). + +[show hidden files/Windows]: https://support.microsoft.com/en-us/windows/view-hidden-files-and-folders-in-windows-97fbc472-c603-9d90-91d0-1166d1d9f4b5 +[show hidden files/macOS]: https://www.pcmag.com/how-to/how-to-access-your-macs-hidden-files +[openai-python docs]: https://github.com/openai/openai-python#microsoft-azure-endpoints +[Azure OpenAI docs]: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line + + +## Running Auto-GPT + +### Run with Docker + +Easiest is to use `docker-compose`. Run the commands below in your Auto-GPT folder. + +1. Build the image. If you have pulled the image from Docker Hub, skip this step. + + :::shell + docker-compose build auto-gpt + +2. Run Auto-GPT + + :::shell + docker-compose run --rm auto-gpt + + By default, this will also start and attach a Redis memory backend. If you do not + want this, comment or remove the `depends: - redis` and `redis:` sections from + `docker-compose.yml`. + + For related settings, see [Memory > Redis setup](./configuration/memory.md#redis-setup). + +You can pass extra arguments, e.g. running with `--gpt3only` and `--continuous`: +``` shell +docker-compose run --rm auto-gpt --gpt3only --continuous +``` + +If you dare, you can also build and run it with "vanilla" docker commands: +``` shell +docker build -t auto-gpt . +docker run -it --env-file=.env -v $PWD:/app auto-gpt +docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous +``` + +[docker-compose file]: https://github.com/Significant-Gravitas/Auto-GPT/blob/stable/docker-compose.yml + + +### Run with Dev Container + +1. Install the [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension in VS Code. + +2. Open command palette with ++f1++ and type `Dev Containers: Open Folder in Container`. + +3. Run `./run.sh`. + + +### Run without Docker + +Simply run the startup script in your terminal. This will install any necessary Python +packages and launch Auto-GPT. + +- On Linux/MacOS: + + :::shell + ./run.sh + +- On Windows: + + :::shell + .\run.bat + +If this gives errors, make sure you have a compatible Python version installed. See also +the [requirements](./installation.md#requirements). diff --git a/docs/testing.md b/docs/testing.md new file mode 100644 index 00000000..9a173596 --- /dev/null +++ b/docs/testing.md @@ -0,0 +1,46 @@ +# Running tests + +To run all tests, use the following command: + +``` shell +pytest +``` + +If `pytest` is not found: +``` shell +python -m pytest +``` + +### Running specific test suites + +- To run without integration tests: + + :::shell + pytest --without-integration + +- To run without *slow* integration tests: + + :::shell + pytest --without-slow-integration + +- To run tests and see coverage: + + :::shell + pytest --cov=autogpt --without-integration --without-slow-integration + +## Running the linter + +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. +We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. +See the [flake8 rules](https://www.flake8rules.com/) for more information. + +To run the linter: + +``` shell +flake8 . +``` + +Or: +``` shell +python -m flake8 . +``` diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 00000000..80fa7985 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,95 @@ +# Usage + +## Command Line Arguments +Running with `--help` lists all the possible command line arguments you can pass: + +``` shell +./run.sh --help # on Linux / macOS + +.\run.bat --help # on Windows +``` + +!!! info + For use with Docker, replace the script in the examples with + `docker-compose run --rm auto-gpt`: + + :::shell + docker-compose run --rm auto-gpt --help + docker-compose run --rm auto-gpt --ai-settings + +!!! note + Replace anything in angled brackets (<>) to a value you want to specify + +Here are some common arguments you can use when running Auto-GPT: + +* Run Auto-GPT with a different AI Settings file + + :::shell + ./run.sh --ai-settings + +* Specify a memory backend + + :::shell + ./run.sh --use-memory + + +!!! note + There are shorthands for some of these flags, for example `-m` for `--use-memory`. + Use `./run.sh --help` for more information. + +### Speak Mode + +Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT + +``` shell +./run.sh --speak +``` + +### 💀 Continuous Mode ⚠️ + +Run the AI **without** user authorization, 100% automated. +Continuous mode is NOT recommended. +It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorize. +Use at your own risk. + +``` shell +./run.sh --continuous +``` +To exit the program, press ++ctrl+c++ + +### ♻️ Self-Feedback Mode ⚠️ + +Running Self-Feedback will **INCREASE** token use and thus cost more. This feature enables the agent to provide self-feedback by verifying its own actions and checking if they align with its current goals. If not, it will provide better feedback for the next loop. To enable this feature for the current loop, input `S` into the input field. + +### GPT-3.5 ONLY Mode + +If you don't have access to GPT-4, this mode allows you to use Auto-GPT! + +``` shell +./run.sh --gpt3only +``` + +You can achieve the same by setting `SMART_LLM_MODEL` in `.env` to `gpt-3.5-turbo`. + +### GPT-4 ONLY Mode + +If you have access to GPT-4, this mode allows you to use Auto-GPT solely with GPT-4. +This may give your bot increased intelligence. + +``` shell +./run.sh --gpt4only +``` + +!!! warning + Since GPT-4 is more expensive to use, running Auto-GPT in GPT-4-only mode will + increase your API costs. + +## Logs + +Activity and error logs are located in the `./output/logs` + +To print out debug logs: + +``` shell +./run.sh --debug +``` diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..1294a840 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,27 @@ +site_name: Auto-GPT +site_url: https://docs.agpt.co/ +repo_url: https://github.com/Significant-Gravitas/Auto-GPT +nav: + - Home: index.md + - Setup: setup.md + - Usage: usage.md + - Plugins: plugins.md + - Configuration: + - Search: configuration/search.md + - Memory: configuration/memory.md + - Voice: configuration/voice.md + - Image Generation: configuration/imagegen.md + + - Contributing: + - Contribution guide: contributing.md + - Running tests: testing.md + - Code of Conduct: code-of-conduct.md + + - License: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/LICENSE + +theme: readthedocs + +markdown_extensions: + admonition: + codehilite: + pymdownx.keys: diff --git a/plugin.png b/plugin.png new file mode 100644 index 00000000..865ce3c9 Binary files /dev/null and b/plugin.png differ diff --git a/plugins/__PUT_PLUGIN_ZIPS_HERE__ b/plugins/__PUT_PLUGIN_ZIPS_HERE__ new file mode 100644 index 00000000..e69de29b diff --git a/pyproject.toml b/pyproject.toml index fdb43d66..bf71c70c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,25 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + [project] -name = "auto-gpt" -version = "0.1.0" -description = "A GPT based ai agent" +name = "agpt" +version = "0.3.0" +authors = [ + { name="Torantulino", email="support@agpt.co" }, +] readme = "README.md" +requires-python = ">=3.10" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +description = "An open-source attempt to make GPT-4 autonomous" + +[project.urls] +"Homepage" = "https://github.com/Significant-Gravitas/Auto-GPT" +"Bug Tracker" = "https://github.com/Significant-Gravitas/Auto-GPT" [tool.black] line-length = 88 diff --git a/requirements.txt b/requirements.txt index 66c90c79..e1900ef5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ -beautifulsoup4 +beautifulsoup4>=4.12.2 colorama==0.4.6 +distro==1.8.0 openai==0.27.2 playsound==1.2.2 python-dotenv==1.0.0 @@ -13,15 +14,16 @@ duckduckgo-search google-api-python-client #(https://developers.google.com/custom-search/v1/overview) pinecone-client==2.2.1 redis -orjson +orjson==3.8.10 Pillow -selenium +selenium==4.1.4 webdriver-manager jsonschema tweepy click +charset-normalizer>=3.1.0 spacy>=3.0.0,<4.0.0 -en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.4.0/en_core_web_sm-3.4.0-py3-none-any.whl +en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl ##Dev coverage @@ -31,6 +33,13 @@ pre-commit black isort gitpython==3.1.31 +auto-gpt-plugin-template +mkdocs +pymdown-extensions + + +# OpenAI and Generic plugins import +openapi-python-client==0.13.4 # Items below this point will not be included in the Docker Image @@ -42,3 +51,5 @@ pytest-benchmark pytest-cov pytest-integration pytest-mock +vcrpy +pytest-recording diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/check_requirements.py b/scripts/check_requirements.py index e4eab024..3323697b 100644 --- a/scripts/check_requirements.py +++ b/scripts/check_requirements.py @@ -1,3 +1,4 @@ +import re import sys import pkg_resources @@ -16,7 +17,7 @@ def main(): for package in required_packages: if not package: # Skip empty lines continue - package_name = package.strip().split("==")[0] + package_name = re.split("[<>=@ ]+", package.strip())[0] if package_name.lower() not in installed_packages: missing_packages.append(package_name) diff --git a/scripts/install_plugin_deps.py b/scripts/install_plugin_deps.py new file mode 100644 index 00000000..d15c166f --- /dev/null +++ b/scripts/install_plugin_deps.py @@ -0,0 +1,35 @@ +import os +import subprocess +import sys +import zipfile +from pathlib import Path + + +def install_plugin_dependencies(): + """ + Installs dependencies for all plugins in the plugins dir. + + Args: + None + + Returns: + None + """ + plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins")) + for plugin in plugins_dir.glob("*.zip"): + with zipfile.ZipFile(str(plugin), "r") as zfile: + try: + basedir = zfile.namelist()[0] + basereqs = os.path.join(basedir, "requirements.txt") + extracted = zfile.extract(basereqs, path=plugins_dir) + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "-r", extracted] + ) + os.remove(extracted) + os.rmdir(os.path.join(plugins_dir, basedir)) + except KeyError: + continue + + +if __name__ == "__main__": + install_plugin_dependencies() diff --git a/tests/browse_tests.py b/tests/browse_tests.py deleted file mode 100644 index f896e7dd..00000000 --- a/tests/browse_tests.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import sys -import unittest - -from bs4 import BeautifulSoup - -sys.path.append(os.path.abspath("../scripts")) - -from browse import extract_hyperlinks - - -class TestBrowseLinks(unittest.TestCase): - def test_extract_hyperlinks(self): - body = """ - - Google - Foo -
Some other crap
- - """ - soup = BeautifulSoup(body, "html.parser") - links = extract_hyperlinks(soup, "http://example.com") - self.assertEqual( - links, - [("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")], - ) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..da00058b --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,41 @@ +from pathlib import Path + +import pytest +from pytest_mock import MockerFixture + +from autogpt.config import Config +from autogpt.llm import ApiManager +from autogpt.workspace import Workspace + +pytest_plugins = ["tests.integration.agent_factory"] + + +@pytest.fixture() +def workspace_root(tmp_path: Path) -> Path: + return tmp_path / "home/users/monty/auto_gpt_workspace" + + +@pytest.fixture() +def workspace(workspace_root: Path) -> Workspace: + workspace_root = Workspace.make_workspace(workspace_root) + return Workspace(workspace_root, restrict_to_workspace=True) + + +@pytest.fixture() +def config(mocker: MockerFixture, workspace: Workspace) -> Config: + config = Config() + + # Do a little setup and teardown since the config object is a singleton + mocker.patch.multiple( + config, + workspace_path=workspace.root, + file_logger_path=workspace.get_path("file_logger.txt"), + ) + yield config + + +@pytest.fixture() +def api_manager() -> ApiManager: + if ApiManager in ApiManager._instances: + del ApiManager._instances[ApiManager] + return ApiManager() diff --git a/tests/context.py b/tests/context.py index cef969db..0d531468 100644 --- a/tests/context.py +++ b/tests/context.py @@ -1,6 +1,7 @@ import os import sys +# Add the scripts directory to the path so that we can import the browse module. sys.path.insert( 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts")) ) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py new file mode 100644 index 00000000..8cb622ec --- /dev/null +++ b/tests/integration/agent_factory.py @@ -0,0 +1,148 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.command import CommandRegistry +from autogpt.config import AIConfig, Config +from autogpt.memory import LocalCache, NoMemory, get_memory +from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT +from autogpt.workspace import Workspace + + +@pytest.fixture +def agent_test_config(config: Config): + was_continuous_mode = config.continuous_mode + was_temperature = config.temperature + config.set_continuous_mode(True) + config.set_temperature(0) + yield config + config.set_continuous_mode(was_continuous_mode) + config.set_temperature(was_temperature) + + +@pytest.fixture +def memory_local_cache(agent_test_config: Config): + was_memory_backend = agent_test_config.memory_backend + + agent_test_config.set_memory_backend("local_cache") + yield get_memory(agent_test_config, init=True) + + agent_test_config.set_memory_backend(was_memory_backend) + + +@pytest.fixture +def memory_none(agent_test_config: Config): + was_memory_backend = agent_test_config.memory_backend + + agent_test_config.set_memory_backend("no_memory") + yield get_memory(agent_test_config, init=True) + + agent_test_config.set_memory_backend(was_memory_backend) + + +@pytest.fixture +def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.commands.web_selenium") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="browse_website-GPT", + ai_role="an AI designed to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer the question 'What is the price of the book?' and write the price to a file named \"browse_website.txt\", and use the task_complete command to complete the task.", + ai_goals=[ + "Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer the question 'What is the price of the book?'", + 'Write the price of the book to a file named "browse_website.txt".', + "Use the task_complete command to complete the task.", + "Do not use any other commands.", + ], + ) + ai_config.command_registry = command_registry + + system_prompt = ai_config.construct_full_prompt() + + agent = Agent( + ai_name="", + memory=memory_none, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace.root, + ) + + return agent + + +@pytest.fixture +def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="write_to_file-GPT", + ai_role="an AI designed to use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\" and then use the task_complete command to complete the task.", + ai_goals=[ + "Use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\".", + "Use the task_complete command to complete the task.", + "Do not use any other commands.", + ], + ) + ai_config.command_registry = command_registry + + triggering_prompt = ( + "Determine which next command to use, and respond using the" + " format specified above:" + ) + system_prompt = ai_config.construct_full_prompt() + + agent = Agent( + ai_name="", + memory=memory_none, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=triggering_prompt, + workspace_directory=workspace.root, + ) + + return agent + + +@pytest.fixture +def memory_management_agent( + agent_test_config, memory_local_cache, workspace: Workspace +): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="Follow-Instructions-GPT", + ai_role="an AI designed to read the instructions_1.txt file using the read_file method and follow the instructions in the file.", + ai_goals=[ + "Use the command read_file to read the instructions_1.txt file", + "Follow the instructions in the instructions_1.txt file", + ], + ) + ai_config.command_registry = command_registry + + system_prompt = ai_config.construct_full_prompt() + + agent = Agent( + ai_name="", + memory=memory_local_cache, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace.root, + ) + + return agent diff --git a/tests/integration/agent_utils.py b/tests/integration/agent_utils.py new file mode 100644 index 00000000..a899afcf --- /dev/null +++ b/tests/integration/agent_utils.py @@ -0,0 +1,12 @@ +import concurrent.futures + +from autogpt.agent.agent import Agent + + +def run_interaction_loop(agent: Agent, timeout: float | None): + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit(agent.start_interaction_loop) + try: + result = future.result(timeout=timeout) + except concurrent.futures.TimeoutError: + assert False, f"The process took longer than {timeout} seconds to complete." diff --git a/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml new file mode 100644 index 00000000..ec4636e0 --- /dev/null +++ b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml @@ -0,0 +1,168 @@ +interactions: +- request: + body: '{"input": [[1985]], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '83' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaSxO6Orvl5++n2LWn9FsiIgl7xl3kkiAgYldXlyAiKHJNgJw6371L/6dOd08c + QAqV5HnWWr/kP/71119/t1ld5NPf//z197sap7//x/fa/Tbd/v7nr//5r7/++uuv//h9/n8jiyYr + 7vfqU/6G/25Wn3ux/P3PX/x/X/m/g/756++DcanIKGZXd/X5SZTuBpdQW6+lYQqkfQ5NFN8pSgIw + zBGFBLyF+0x9ZATRzFVcC+xx3aJPfw3cJaBFA2RnibBlW4EuBNKhlLJ5dbH/QRJYDniFELe0Ryw1 + zy4l2M8lVX4IVLvccrDkD26WikG50PuxCQFdnkwEiSTcsFJvgcv2m6mB50DfUpu5FLAPV1ZQMZ8b + IvW7azZ//KsHo/R9nYA/OPXa3M0WXkbEI3Dus2z9lNsEjEngUv+4PdWrN6EKdpdVoMba1vqqHZkH + b+fNG4mbTQRmWGsFxIdzQM3Kfkbt1AUcBI3doc1xk9ZLJVoC3ClDSm3yBtl4AC8Bvk47CzunCbFl + aqkGQtGR0A74sT4HY8DDq8puaE3xHC1C9H7BD749CHUSFM03CxuwDd2YQD5eskks+B4idSLUZd1V + n+fOt2FzPT2pkptdNtu6EQD6Cg2sRFSr1/NdKvcmSu74EM8ioDerUyT9vHfIWElpvUwkz0F2c2e0 + vXpNNvuXsQe81EdofRp6LfDVTYMNUUx6PtQEjL//y79kHiuu1mRzc3xWsDg6TwQepu6OOhkEae88 + TKzIbykanVhZ5SOLI3zWZCmjogx6OCXDhAT9yuoulKYevMWdRl2+GOtu9W6a9CHZBxvBramHc8Up + MNsImCJ0jBgThDiA13A6U8UU5IxJ8keQGG8I2At5RecPahjD4yJBrGZgAl2aDgjGwQth47BVXR5l + eQyPQVnRMISPjMUvNgNJeRm4GC/BsMh0SsDUKRt6PB+LelHlIJCl9SYjYfewsjbe5Rb8MNvDONKd + gclhbEAMywgba6u762TdAkjVIkJrd3Pc9antBBgEgUIEXZPZdDzHL/hCTxEtOGkHtg5XBQiuvsUe + DR76a+oCCDe720rWtGncxT69JHi49BTbp/jo8vxe5mEjlx02733NVuNIb4BDao0PgNnDnOWLBoNH + YlB8ebb6ulyVUS5at8MW8c5DeVb2IdQv8RN7R9F02VEWRxBM2Yka77HVZzqLMbwdpBSJVyNzRymr + rY2zKwusPUQpIrNlFYC+AgOHmmCxbfbkS3mMeR47t0UB/ImJHvBP64C4GV/rebiFCKKw66h78mRA + twSKwOJ5DtvXng4ru62tlPsgx+63Hpep/Sgwm2cXH55jxNZzfL4BkkeYHPqNMVBkCSFkaL8isbq4 + gPmml0AnmldsPYNbvYiZBwEcZhcHqV9my/bSWVDZRwOaUSyxWs4WT17k15keClcZmCRTHua8lmGn + 2QTR9CDbEtabDUaSqLHoMsfUkgoo+WRNL5+oc6ruJqnc3qeeHIwRldNjBXb81aCHeE7ZOnKrAMs6 + JeRZtn00qXIQwho5Cek+J21Y5/NJg547drRQwStaLreWg+M2irB7ZM96em4kDSiX65bsDjAeRv5o + FxDt+JEqUVmzWUkwhD23rthXls0wLRvTAWgnjGhV9/nQ77ZKLpUhxNTYmEs0LU5pyNKTK4gEBjmb + 9N1DA/zhpmPNutz0JdZ8A3Kq4OOD8HnUtBma8M99fXOpaxLfHF56VUxFwtozfS10vYc5jhl671zD + 7ayTOErIrC4EGB9xmPbVK4aBW2dorrdAXwN9HGExwJCikPu4zMx2BF6OmU6k0DCGuXWjXLKi/EVj + 6cPYlD+EGSr704CPt/NnWBRijjBqDzLaXMIuYrvNkgIm5DYRWWXWs7XbOlC0e4/aBlcNzNn0PcyX + 4YiA8UmHlRVVDBsOKvQU8Za+RmkqgcwZPIy2xgiWNNzOsBncC9Wt/D6Q7a0RwFsKrlhP44ixuK1z + mJ9QT93w2LqzFJocrO+tRA9G1NdLW18R7N2qIuLuRoalfTwrYMvjTOZvf1qorhBgHHuB6tK+cef9 + dXFgBIUnqTt7x2b1/kkgi/oLavuDDKZdJdrSSqWeatz5zOa38EHAy7BPzYDYYAG9ToDs9yrhv/O7 + 3kQ7Br4WHLD71bdOeCgF5OhVRnJYTWDmzWCG7gXK2MxhmlH95s9gPrsaPvrPul6GduWhpZUdPp5a + N2PcoxThd/6pJrx1sN5EJYGnk8OhrdsLw7AUhgfdNssRF36WqOtJGUDH9I5Ym54LYx6gDjjipiH8 + GzRgILEkwqnjJHqA2qGeuYfFwfO6a5Akth82c+RUwlZrZ3o+eHt3GRvZAiS7xPiwOw86I0WqASsq + XiiYHiqYL1AeAa34CTufxHKX61vUwIDHiR6bOM3owxV4MCXdhM3g4ej8RRs8OBlsR80VvrMlLdIW + 3nfBgJXJ9tz3tXReEIKAYteXjtHylLwGevfXA59j+VMvt9vZhtKaythJKk0XnCFuwFefscdLJhtP + bPZkJd16JMolO2PGpChw674Q4QXqsDnzm1y69MuduvxWi3jyiRA8bz2Rekfxra/2W4JwSk0DR8G5 + 1ufr6Qkh+qQ+eX/1tputOYUtvzHJOnU2W80EImmTcBHa8Ks0LPYlTOC2O26o/yjs73qiyR9/5CVW + XM/LPJbgFFUB2QUiZatq6RUsJPVB5u/7FOIks+HhaliIPWSZjUXjJHB95C69U3DRp+zJV0BYbJ6c + 0rAG7KffB6kosW/K7bCcmBVAk5Idtk/1h43h6CuSu79cUNTZF0DuYPSAWRkOmWt5dafuPqxwDdID + RuVRdZmcuCvciMlEXSW9slV1Zw74/rPCSF6O7uortgAJ6Z9oEndrNKLXksPMs2qqHhRNHxkALyhe + vC1GZr4b5lrqCYyO1pZaqcTrHXgVAbwcNiN267qsV2z2OZzsV0ldheJh1nrowBRr7/96fxetRvBU + azySmk2Qje1JEeHWbRD1YL9j1FPzFiaf2id9EunZ+pATGy59ZKNdIGLwez5wwnOGjbJ86Gy6lTG0 + 3vHtT723vJnOcCPGE3a//WnZVbMDY+7IYeWtGtkCWMtDn/VP9OaAoDMuSBLAKamAtdi26+UdVCv0 + nxHGB0/P63arZ8p+P8aAjKfdFgxTIvYSSooQqy0NdRbWqgDezcajWno5RLO5X0W407WeOob7yGbX + OzcSnYH01e8SzHSeEyjPUkidgdcH5sqjBEs+7mgx4339HtpVgKfH1FBNSEyXP1qAg+vmiqj9XHYZ + M7o0gWP4qAhI1MWdw9HUYErUiXqqvrB5TIpv/QoT1qT7yBbnagRAfUgmthscg4UF6igp/aTR42ab + DvOYezYYNchh/bW/6qPJm7lUJCeGkjXjs7EWmAIN1/Cxsk+ygcwukCCIA4aNoHtHdMCnAOyPTwfj + G8uHpRh8Afh+XSGiOfXPn/HAj5CGBKa2jKJE4aExFh+qtJH4/X1eCEP9HpE9pYrO25u4grz+vmHn + 5o4ZScclgGVVMNTZgLqtsWYljPJUo97xibOlPro3aXEDhUbcbR+tGZgVyNXeh6r5rEbjKh17OJ+P + GrX7gg0lW4sc7PjMoLb9GdksWoIEHTUryV5528OuhwUH6xzX1ASdWn/rPQbVkLwRj/kFLP6wjlB7 + 4hrJqDfA1m1OhmQJwZlG5XrK/ughamObmvrwZrPjDC0wN35GcQsUIDiLpsHwfD5SDfcO4B2nbqF0 + 0mqql+sSMRSebHghLwsHXVC79OfHp1ueYPu+J1EHl9oCxFsHqj9Tk9EPM3t4etAGH+73rT7LzeqA + 0UjP3/mANT2MPJKm4XbCasRb7uI3fgjuZ/FEHfG0uuNmjGYQcy6HXpL6yKbaPdrwlzd1IWuHpZxp + DrFcINJ882XHrLEBh0tLv/mnGlbFPoYgDcQc65L7za9bQ4GOESJqQU8DW+tc3sCoPj/Y3Yk70FWH + aw4DeTeTfWVe69mSqwamD67E5tfv7OxeKCC+kopsK/uZLbOcIVDySUd21/eoz3TQJCg81itWlxxF + 8x4VIQzoWiNxF16GOS9bAukzS4i8v1tgvVw3CkRvTsAq92n1dc3nVN5fNx15g04dhL0g3eDY2io1 + Y/kzrKNRxrK6hCV2HU3Jfs8D4oGcsKY9Fn1BtL8BdFYAdeAxc8k3/8GnlgmoV/rRnRtzEX71RK83 + q/35j1jKK/tNfdS/wHxvFQu8zWYlUXDW9bksHg7sbo6N3SRt2PrJsARffL9DrDcrdyBp38DPfCoQ + f38r2Swmai5/x1Nlu22jKaDFC7KovRDh5FTunBp3WwqCUKEqunLR3KVlKC+39YX4aUjBKD+NEX7X + P1r22UOfYzbeoDffTtS5uV7GErZW0h+9KQJDX0JBhkDfLQo1N09nmL/5Bhiu5VPjmqjD7tUYLSik + EpA6ZsqwO2VVDlKiT+h1KIaoS4ugBd/5or/1O3qBGcD0yV/oY5tifW0rGoKNadyRYFuBy0javwD4 + NMo3f94zFtZH/ud/aELW/pvXRgluwfP0x68vh2tg/PG/1tpHOvXUuP/lETKpQslI5AkatGqgI5k/ + 3cGiawuE9VPSyfztZ+xZ1go8v8cYeyipavbzb/GjD6minXuXFLrbg7g01m8+7Fxat9oN4g+j1AIX + nH3rrwVj66gYJY9b1L8uLw1WtpqSLd6fM4YsLtj7fJNjJMIzWPL3ywMavozUupb20AfSoYILtDn6 + 84vELv0S5kpIkbjrpuwPfwmGjYQku8uBcFtvEiSkfRIQHludJa8PhEuyAei1trU708ER4SSHB3zU + W06fV9Ks8BxeD1SP0rSe57UP4VooPFbf1gHM6LUUcvp4GGT51gfbA1mE9f1YYLuRlWi9kgGB4C5O + 9GAvtvu5yjoPz5pYY4zugst2+tOBumSH1LxHyJ0jyo9wauua7O3XNVtMO2tg7+V3IontAcxD/Jqh + ubwIvbc6ijrTqCXotpJIFc5FbHYvcw4vnNFj5z5u6mWWIw8qkt/Qg3cLszXnPR7IhVVS1XluMxI/ + xBBAKeapW5ApY5301mBNDlck2h8PrMn7JIJFbs7UiJ8kG03B9cDxWdzx8aPVbOVUmUBouguxfbPP + mKjWNxhGYEWQj0/ZrJOa/62fnz7U6zPUUjgEbfdnfuf8VDgwy94HrMe+566L/+bhKPgIl7zI6U8h + mhrw3oHDL78z9s3nIqGvHDvGEjG25ftY+vYDrGsvvp6PKioBMFaMxML29UXfXRTw43nuLaBgvb6R + Bb/8B9FI74f5+RbKXx6jaJtVQ0f3Q7k/12cNB3nc6lRO1QoWAxeixi1BtPiNGUL+teGpbQvD0Pey + IoLp824JTO59NpNJ6SFVFEjawWh1VlbtDF/K3qUG94CMvqdRA6p7fP78l7twyyPYJ3ZlYr+ePjW5 + vi0L1iVnozJc02hOHa2Rv36ILOUprpcj/tjw+rY+2Dm/d4xOJ92Tf/xJfTAB0N96JpxrUttQ02ht + xgDJ19gZ8c//rBN7QMDrJkedOVSzMWzPMdyOyoT2xn2pxy9fg21cNtP+vHPcHnGuBiEkFlW2mDHm + OfsZdpYAsMKLhT5eT08OjALpCfzsNLaquO1hcZgcwhwJuIvbXA1gb5wDko4XO1vEArbAvXAyteFh + cNnpLGvQng2dPkprqic+WlJZGe0r2fNmw358FBRJxMii6Zcf38x/+QXndfvSf/4QvJ6XHmvRvhtY + 4D0k6W7A5Ntv+2H83oe2m2Y0g9D55rVAg/0oQiKxswB6Im0SiWxMheL9Z2LdMPYV+OWH42mjusTM + ux7MZltgMy0k1ke58gLf+aFq7nhgTjiQ/3nf2mVZh+HKGgIDeTtj7CQkI6qSWFAUP5i8u+d9oD1M + uP03X9JDFdpRb5XXGG4dZ0fN4NG7bMcXJehasqBebD9gXiW1lW9pvKO6ewrAt7+uwB1Ch2rlNAHW + 8bsGfip0Ii/8Zjq7bo4N+PJq+s1/YHs+vnK4+YjoD+9dvFeWAF0cAT7l12Egx3qL4ErFnhpxiIf9 + GV9FuM5Cg60mr4cF7oIS6s3WJFs327G1GVMEI3nOqbbfpdGojXsR7urHEYlfXracuM4B37z8zW9P + 0FOtWIF9D05fvnZnzO65HIIXDfHXr331UirBTn1H1OCEsP7pgXQqh4rI85Vn6+/9LE5ikr3/rIcv + b7Z+/AEf4aq7c+ebKQDGjIkgVLk+7697B2qCpJGBuqCeOHbWwEswEI6mZpuRn14GhG/JJtKd+udX + 4I9H+8dmZfP97At/+MzmxuAw4c/Og/sxAdTyzoQxJeBz8PE39pdv7/W3U3UpLDUlxt4NfMD4q9d3 + lxT4y1N0iryBQLe95tRNPu9h0TYOAvBS6/T40XS2rHYfSycGKbVhcmRbAroQLgN5YG+gA+sfFkhg + 2PYJEoT9AkZwfcdgE79V6pq+r/M6fwqg3qEL9peDEs1uqCtw95JK8tRufs1AUNnAOJxe1Do5mr47 + inog/fR9zmPb/fILBRKXn7G3T/WMbu61DS89uyMAETeQjU57sG4yRIZZoe46n68KtNj1g621Z+54 + VK0S2l6c4JsalD+9QeCb12mmcChbrdsg/vIZxpxfRayMCg++L0WLoDFCl11lXQDvs5lQnz/J7Mej + gZvUzc+/RuyRqSncZ1eFfv25y3/rWT6cT3eMkiCrd+mIE7BjoMX6+/OqWXWTX/CrF4QT4ZbNh+3m + 9ocXWf1QZXMfbC3ocXBPvTPl2Ki3XAo43q7oob/OP55syGopqPSX10lTAQPoUS1SWynf7penoD9+ + 6dSdrXoXk1SERg71n37UfSY4DfzuT2C9I162Oz1CBxo599O/dlhF5hB4E5cEF8MCAG2WbIR5sb38 + /HJEj2pbwN4r7ljV9B2YuQeCkmJtEsRzjxysxZ4UkJ6klSoXdacT64RmcNw8O3pQjIFR7TlDuM2s + FStqumEL3KUV7EygIMsbXwPtD3tJEh6BjP0xnPSfvsBbAkpSfCIuY0HoJ0AXLh2av3x6Fpw1he5B + 1bC2zPMwvgXqwaOPY4pbfxvN8f4kyE8zeFFzIyqR8Ov/H1+2qdL2ZOgVWw3+jDfubyXaYWfiwIdc + P9SrpHR4PbeqAdqoGagenHWXLP7E//whPpyTKWNfvyh/88mf/YWV2h4C1s4LsDMgqtP4YSXQo/mZ + BhMph13Udx7cbZMNgS/ryabuo4jwiFyMcXRTsuWbp6BUPg/kQ1EdLQFNGrgxrTs2pE8E+tsaSvAy + te6Pp4AJLoMB9XiDsdtMhb5uu30KN9GxR2y6JMP84rcB1IVzh7oqx9my9ZAnhfvXif78ycKUjQPS + zb7+7S8w8kQ3AkVJawjvbIp6XuUlgbzURmQP19plV9aMsBssHZtA93Xy88O/vK+ViaiP0r0U5K8f + psahcKNZcKQbbFwUY6x2oT6/KBbAb/9HxyWuZ+mZVjByKo/65oV3R/zwNPjjZUyITsMo908e3LVD + S/g4pDWTkOj94Y+XdHOpZ16wbLgRhSOST/CarSMnCWDevp/426/An/xvc+1Av+uVzXIj2dI+Fy5Y + iY7IpZddj8D+qhK0+er7aqR7CAso+kRsmr6epQu7yTW2Ttj/wCaaP5z1gostEqpc0hHQIuo18OXZ + ZJGO/cDsXsjB8T4w0hqqGHV0k3I//4rV6D5n9P1CEjjs9SO27mrGGA6iFGZw80Q722U1WY6cLe2U + LsV2RdRhXvkcwfAyZjTcbnBGfzx4t26m335a9NvvBV8egY+k58A01GYKv/yY+rMj6N3LA9ZPn7AD + d80wbMWbAr/9AvuqoLBp3r1n+M1zROhj7+tPrQpu1BNP3nZaRUtyaEZ4xK+G/Pj1ck2mAv78uTek + gt5utrMo/fTLFHiSrVn/GeUfn4y/ej0XVdjCe5xLGD0kTWeKb8ywAsz4+jPObVM8a/CsSTVG/srp + 0zBWJfyUq0Qkv/m4q+ynrz/1AGrPYaszxC8Q5I8EzdrZ0dmLditYrhcfW5X/YqOPDhW8XVf05XOb + aC3sxYITjD2qLM1xYCu1R2guDUFSv9tnP14n9/dbivFxkw5ffbaBwCqLKi/j5tIHbsr9ZVOpWHck + oLPrPnZk8zsT6BNx0dgdzyOA1tum+uodwWrdakn8+3cq4D//9ddf/+t3wqBp78X7ezBgKpbp3/99 + VODft/vt3zwv/JsKf04ikPFWFn//81+HEP7uhrbppv89ta/iM/79z1/bP6cN/p7a6fb+fy7/6/td + //mv/wMAAP//AwDOXgQl4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c09bf823fb50b70-AMS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 17:29:41 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-kd1j0bcill5flig1m29wdaof + openai-processing-ms: + - '69' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 555d4ffdb6ceac9f62f60bb64d87170d + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml new file mode 100644 index 00000000..b6d6a75b --- /dev/null +++ b/tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml @@ -0,0 +1,1484 @@ +interactions: +- request: + body: '{"input": [[5289, 564, 71, 773, 89, 2332, 19747, 87, 664, 71, 8311, 85, + 392, 25299, 11032, 940, 89, 85, 398, 80, 73, 5481, 48558, 86, 1919, 1634, 8890, + 5346, 44185, 53060, 450, 392, 68273, 64145, 511, 77, 80, 3696, 71, 2042, 28990, + 44490, 19456, 89, 21807, 44490, 6331, 3180, 23361, 71, 455, 76, 583, 87, 21436, + 34695, 352, 70, 953, 278, 59524, 2857, 93000, 641, 6043, 73, 70, 36463, 7141, + 21807, 391, 1273, 8536, 566, 71, 7513, 580, 263, 936, 10333, 86, 983, 822, 9700, + 73, 3316, 80, 1320, 365, 27278, 564, 67, 12591, 3576, 36851, 2465, 1739, 657, + 6375, 89, 23361, 6885, 352, 88, 14046, 292, 70, 32204, 648, 300, 73, 32547, + 869, 20854, 28679, 89, 11805, 70, 9081, 80, 11251, 87, 28679, 6194, 85, 906, + 64, 300, 16501, 70494, 6511, 51622, 87, 80, 8462, 295, 13536, 49463, 604, 64, + 333, 79, 302, 5469, 36068, 267, 5289, 19870, 911, 71, 587, 4912, 32868, 64087, + 22650, 89, 3368, 83, 50798, 391, 273, 365, 72, 1233, 604, 3858, 48380, 710, + 6375, 93000, 3013, 427, 7796, 66, 11160, 77, 10910, 85, 4777, 5969, 276, 7717, + 87, 71, 21325, 80, 73389, 3576, 86, 29238, 544, 1026, 31019, 20218, 3524, 64, + 7270, 7783, 80, 1498, 77, 1739, 5979, 327, 49450, 1791, 36409, 86, 582, 894, + 73, 85, 89, 2174, 87, 40494, 20824, 40078, 89, 80, 4317, 1609, 85, 3601, 73, + 86, 36068, 894, 56871, 89, 4680, 67, 2562, 1494, 6331, 16017, 2332, 21046, 86, + 11823, 18854, 85, 10616, 85, 23913, 73, 86682, 441, 1494, 360, 1978, 3013, 2562, + 511, 86, 427, 5298, 81, 2580, 89835, 14694, 86, 3059, 26738, 65736, 84, 1394, + 4458, 677, 85, 3368, 71, 15479, 391, 713, 35531, 566, 94869, 325, 14029, 52360, + 80, 406, 1290, 10616, 79, 773, 3315, 365, 89, 73, 2111, 13045, 73, 89, 14694, + 710, 95210, 1804, 79, 20824, 80, 4086, 19555, 936, 1091, 301, 936, 1494, 73, + 10784, 1029, 77, 1302, 1310, 86, 80, 24752, 89, 73, 89, 1302, 343, 38977, 324, + 25457, 9902, 88, 4168, 67, 3841, 953, 96518, 4777, 3696, 80, 61057, 90563, 80, + 20127, 86, 3418, 24672, 80, 796, 88, 85, 1026, 80, 8910, 285, 7864, 406, 14694, + 454, 361, 85245, 83, 664, 43210, 3368, 27278, 9832, 89, 5010, 89, 762, 86, 19747, + 299, 23808, 69057, 80, 4922, 20211, 71, 73, 404, 89, 1395, 830, 939, 5298, 402, + 83, 80, 70, 1631, 97582, 3306, 7387, 73, 89, 2034, 66, 3906, 53060, 339, 343, + 582, 2465, 80, 52926, 2642, 1739, 69057, 3906, 13649, 85, 3178, 291, 66, 267, + 67910, 24672, 2201, 2933, 34175, 4777, 23361, 10784, 73, 764, 1517, 87, 12825, + 49755, 24752, 89, 265, 307, 85, 80, 689, 332, 1331, 76, 1031, 1658, 325, 2303, + 36851, 87, 8892, 365, 87, 402, 17606, 80, 12407, 4341, 85, 78498, 370, 3074, + 455, 44183, 79224, 830, 20811, 906, 361, 73, 2580, 80, 359, 258, 10753, 2329, + 37406, 50139, 67702, 10616, 56029, 7513, 2438, 402, 3258, 80, 22491, 4748, 359, + 41129, 2034, 11032, 1658, 65, 3714, 65, 1919, 40153, 363, 2580, 52333, 73, 87, + 80, 45015, 4086, 3141, 69, 392, 76, 19747, 21486, 708, 88, 1273, 21486, 85, + 764, 67, 16373, 4223, 641, 53060, 784, 8977, 73, 85, 1739, 6043, 321, 89, 18571, + 49450, 32589, 450, 88, 2303, 62497, 49595, 80, 49755, 1494, 80, 302, 2689, 87, + 29238, 89, 1551, 8088, 76, 20926, 87, 2034, 66, 785, 22873, 53060, 8480, 708, + 88, 906, 73, 4680, 80, 22119, 89, 398, 78498, 1919, 67, 64087, 318, 24409, 869, + 754, 20990, 1498, 23760, 648, 728, 89, 22619, 29224, 1216, 2933, 85, 1386, 8587, + 7737, 87, 17043, 24990, 70561, 47171, 6375, 67, 1073, 85, 89835, 3933, 85407, + 564, 38359, 3647, 60338, 89, 17008, 10079, 80, 385, 1394, 32837, 1247, 14170, + 74, 936, 1658, 87, 73, 365, 73, 21807, 76, 50139, 2629, 20568, 80, 1609, 5241, + 21206, 96518, 301, 36722, 2249, 74, 581, 75, 641, 1494, 31019, 6780, 967, 361, + 288, 77, 49755, 1631, 73, 11160, 20990, 876, 4511, 69, 675, 78, 352, 15809, + 363, 65, 9110, 20990, 2034, 82, 544, 28546, 939, 73, 37227, 266, 70, 3013, 7911, + 365, 14625, 65, 3841, 62559, 69, 2658, 76, 2302, 41922, 383, 347, 75, 3172, + 784, 634, 89, 26554, 74, 75406, 544, 71, 1146, 20926, 10784, 32589, 71, 894, + 64, 2319, 10784, 509, 15720, 327, 2701, 45015, 89, 344, 9810, 71, 344, 29700, + 2194, 5308, 303, 85, 23361, 402, 86, 939, 283, 6870, 2005, 73, 1919, 5308, 858, + 18854, 74, 11032, 911, 65, 80, 10784, 3576, 4223, 5289, 69, 2249, 37406, 44508, + 89, 2739, 80, 70, 80, 10462, 80, 23001, 31019, 80, 40429, 9092, 19072, 80, 56102, + 15444, 598, 30986, 85, 23866, 3933, 67, 1494, 86, 21325, 73, 2580, 18571, 675, + 19239, 876, 352, 24990, 87, 1336, 14029, 78, 762, 9258, 2319, 9081, 87, 24244, + 50536, 15720, 668, 85, 359, 86, 652, 70, 73, 70, 675, 355, 1725, 3696, 65941, + 295, 93797, 9081, 1273, 49891, 830, 581, 360, 302, 89, 14878, 87, 80, 73, 44067, + 86, 80, 5544, 49450, 7141, 65, 2465, 70, 344, 49463, 303, 10616, 3172, 906, + 74, 1658, 9832, 88564, 65, 80, 1216, 69, 89, 58153, 9832, 85, 4168, 32204, 2067, + 2465, 773, 73, 28774, 89, 5308, 18571, 49463, 300, 664, 14694, 3178, 71, 38844, + 80, 876, 13423, 65, 1146, 87, 34175, 327, 1394, 68916, 302, 74, 78498, 22016, + 450, 65, 2005, 7141, 80, 352, 31764, 73, 49865, 641, 16684, 17692, 2067, 9697, + 28679, 73, 85514, 80, 337, 1804, 939, 84, 80, 906, 15618, 69, 8671, 894, 385, + 89, 318, 13028, 18458, 70391, 89, 12328, 89, 5010, 49404, 285, 764, 32868, 3423, + 70, 5192, 73, 70, 80, 24244, 566, 3172, 346, 31764, 89, 1273, 75, 3427, 87, + 80, 38361, 1631, 4714, 78930, 21807, 24990, 40078, 86, 3906, 1310, 70, 406, + 80, 360, 1100, 87, 2032, 73, 49865, 668, 37406, 6358, 24244, 6634, 265, 1100, + 64, 16711, 822, 80, 52926, 8910, 3059, 1247, 4922, 87, 24244, 16400, 1729, 70, + 1320, 427, 85, 355, 22524, 42908, 1729, 8510, 80, 89, 26929, 1897, 5230, 363, + 80, 858, 89, 80, 64087, 637, 80, 48480, 69, 18577, 2303, 307, 818, 41129, 953, + 66, 2912, 2067, 73, 822, 8671, 1247, 22619, 1073, 383, 67665, 8212, 752, 10759, + 23866, 71, 1026, 89, 26929, 50139, 71, 16274, 8462, 82, 764, 391, 318, 74, 2465, + 9832, 3013, 713, 16400, 89, 1065, 86, 39911, 74, 773, 81, 2042, 77, 1029, 9701, + 22524, 80, 24672, 71, 22619, 76, 65653, 2034, 73, 581, 4748, 17008, 1320, 6634, + 1498, 85, 80, 73, 85, 402, 67800, 637, 77, 820, 302, 12082, 4657, 831, 437, + 74, 31881, 89, 11328, 9117, 80, 6358, 343, 363, 5964, 65, 546, 85, 32478, 1498, + 87, 84, 454, 295, 3933, 88, 762, 1394, 69, 677, 96518, 88, 25299, 1098, 764, + 3919, 447, 82649, 3423, 70, 566, 2249, 89, 302, 2201, 88, 983, 1498, 1320, 333, + 3368, 34175, 84, 392, 87, 71, 16400, 93000, 8186, 27752, 74, 2465, 2642, 7141, + 74, 11823, 11927, 80, 18499, 454, 85, 9907, 1662, 4171, 12273, 2706, 36325, + 89, 7387, 65, 4922, 20926, 89, 23576, 73, 27515, 80, 86611, 9991, 333, 343, + 2881, 20990, 3423, 540, 74, 796, 3013, 89, 85, 36068, 14485, 10056, 14878, 27515, + 2465, 1026, 89, 2332, 303, 76, 28927, 43210, 3306, 81, 1658, 3576, 327, 292, + 76627, 3415, 351, 75, 447, 89, 2302, 291, 75, 4086, 53289, 21703, 89, 392, 2042, + 677, 3013, 70, 73, 784, 89, 14878, 6358, 81, 17156, 78, 582, 24752, 9996, 68054, + 2312, 18571, 906, 830, 76, 344, 263, 87, 4680, 83, 12498, 65019, 4912, 65, 346, + 10644, 75, 9902, 45228, 564, 73, 13423, 6331, 39911, 24235, 774, 74, 6789, 85, + 1919, 441, 4748, 273, 28679, 25299, 41922, 1073, 68475, 18571, 3978, 89, 675, + 66, 1897, 361, 37406, 9810, 53835, 66467, 87, 74, 1729, 894, 61057, 564, 81, + 27515, 728, 86, 2562, 652, 73, 1336, 2739, 675, 71, 1897, 869, 677, 675, 86, + 41796, 73, 637, 80, 24752, 2319, 1498, 8671, 28996, 13671, 9110, 375, 450, 1347, + 69, 689, 72, 1100, 51318, 1609, 2042, 69, 303, 3933, 69, 1347, 8318, 86, 3647, + 74, 710, 85, 4680, 24012, 278, 89, 70, 788, 7911, 28679, 70226, 41796, 2174, + 57208, 11328, 292, 5308, 21888, 13536, 5308, 982, 65, 375, 820, 564, 32962, + 8998, 2438, 74, 15479, 51622, 8587, 283, 2303, 67, 2857, 347, 372, 11823, 65, + 303, 631, 77118, 92216, 3696, 65, 80, 2329, 4341, 74, 6200, 73, 70, 12591, 87, + 80, 39911, 9902, 1320, 7387, 838, 56871, 61318, 86, 34175, 12328, 46082, 73, + 87, 4341, 75406, 36825, 80, 16284, 12940, 13752, 70, 73, 88, 5544, 67, 75092, + 30683, 80, 566, 65, 675, 84, 70678, 375, 511, 70, 74, 50900, 2465, 88, 9081, + 70, 324, 80546, 7387, 24672, 7501, 87, 4511, 38977, 69, 73, 27278, 303, 2025, + 5979, 4317, 915, 14170, 80, 9978, 12825, 32204, 1310, 14878, 89, 13423, 11032, + 27515, 13536, 80, 7369, 86, 339, 14878, 89, 28531, 2303, 8587, 73, 85514, 501, + 80, 501, 427, 4777, 45228, 939, 85, 774, 87, 6789, 4867, 19456, 71, 6885, 455, + 75, 728, 65, 303, 675, 81, 3172, 72, 20629, 72, 587, 302, 86, 89, 3696, 86, + 8480, 49463, 869, 295, 73, 385, 80, 73, 1658, 13183, 87, 94908, 8480, 3601, + 1546, 99355, 66, 6194, 5192, 77, 376, 73, 88, 1729, 85, 329, 19285, 31268, 406, + 641, 1463, 76, 392, 27780, 10470, 347, 1029, 22524, 376, 69, 22016, 86, 784, + 288, 36851, 85407, 10056, 87, 6870, 9092, 292, 84, 1354, 70, 604, 723, 876, + 80, 51759, 1320, 4922, 2171, 66, 22119, 85590, 85, 365, 65, 12328, 69, 283, + 73, 86, 67800, 713, 309, 5308, 8892, 84, 554, 2329, 60933, 80, 74, 332, 86, + 5308, 2042, 83, 652, 39152, 275, 69, 8783, 73, 28774, 24672, 87, 3893, 370, + 79, 1662, 81, 24752, 3178, 20824, 71, 31019, 80, 78, 80, 21659, 1026, 301, 675, + 2701, 76, 8671, 51622, 4214, 8311, 87, 80, 295, 12825, 89, 303, 5964, 92512, + 784, 74, 99066, 31019, 97523, 454, 93000, 8671, 581, 50922, 80, 634, 77, 3893, + 18854, 19239, 18631, 75, 50139, 7488, 455, 41129, 7453, 80, 4171, 86, 16618, + 25105, 4341, 10776, 5683, 360, 9810, 89835, 1791, 31881, 70, 7050, 34106, 12825, + 17134, 983, 27515, 74, 34175, 8050, 7488, 10910, 84, 354, 299, 2332, 87, 18244, + 85, 441, 710, 13423, 2332, 70, 2032, 68, 2319, 73, 89, 764, 1604, 35555, 39152, + 40078, 8050, 73, 2234, 65, 575, 1800, 38361, 73, 10616, 14878, 49450, 3919, + 20369, 6375, 1725, 86, 8462, 325, 49755, 347, 83, 73, 87, 7513, 89, 830, 12328, + 71, 8890, 97523, 303, 1631, 83, 70391, 29684, 1031, 87, 752, 31881, 1320, 4239, + 1347, 784, 49450, 3576, 6591, 70494, 664, 2203, 2329, 86, 80, 36708, 80, 70981, + 85, 1764, 80, 7047, 796, 78, 1073, 263, 7501, 450, 375, 16820, 73, 1310, 25457, + 80, 7488, 65019, 1026, 355, 73, 14606, 89, 325, 295, 447, 73, 291, 69, 89, 36238, + 911, 66, 14092, 3013, 344, 77, 398, 4086, 88, 12412, 53835, 7387, 66, 1551, + 87, 73, 604, 73873, 1631, 1517, 318, 13009, 74, 376, 89, 8050, 664, 4193, 2000, + 87, 74, 4086, 454, 8890, 73, 89, 32547, 3919, 10333, 554, 63643, 89, 327, 80, + 784, 88, 3988, 13884, 664, 73, 88, 22119, 10196, 80, 8088, 41796, 343, 85, 28679, + 65, 8998, 11032, 1662, 361, 56029, 3368, 36851, 72245, 2034, 14878, 87, 2319, + 69, 73, 4942, 80, 70, 34097, 67, 406, 62497, 17156, 869, 1546, 7141, 10616, + 80, 1729, 87, 68156, 1662, 10630, 4922, 15930, 11160, 55109, 74, 25299, 73, + 59524, 579, 12825, 86, 822, 454, 77, 4171, 58305, 1517, 12498, 77584, 5192, + 31764, 3716, 675, 383, 21486, 291, 8998, 11823, 321, 19639, 8587, 309, 66565, + 3635, 3013, 89, 906, 3418, 7759, 18894, 89, 762, 275, 1658, 3933, 398, 22016, + 337, 747, 2249, 56029, 566, 94869, 2249, 4458, 670, 65, 16220, 89, 8050, 8587, + 85, 80, 301, 74, 511, 266, 79, 6194, 331, 87, 540, 6410, 1494, 1273, 4484, 4341, + 1073, 283, 11377, 84, 3935, 89470, 80, 70, 292, 76, 1105, 2642, 20824, 87, 32837, + 303, 12825, 4942, 406, 80, 74901, 44067, 89, 21206, 15618, 1065, 77, 2445, 2580, + 73384, 5010, 38181, 1065, 354, 77, 5972, 664, 82999, 689, 2302, 1910, 74, 1791, + 258, 1810, 71, 7006, 85, 953, 8587, 80, 501, 377, 77, 2739, 361, 25123, 54761, + 2005, 307, 96518, 22619, 1494, 76, 1146, 18442, 14878, 2201, 30148, 11949, 85, + 13092, 728, 1065, 30506, 29684, 14878, 13183, 14878, 65, 8088, 86, 19870, 86, + 544, 85, 11160, 1498, 69, 89, 5104, 71137, 28774, 331, 1031, 79, 91410, 80, + 29224, 49450, 93000, 5964, 1394, 8890, 84, 80, 42908, 10176, 953, 11251, 24672, + 87, 355, 55233, 25100, 74, 4511, 16400, 20211, 17137, 3306, 20990, 372, 13855, + 309, 89, 47689, 2701, 1910, 20127, 363, 80, 354, 637, 9117, 329, 50864, 427, + 80, 278, 89, 708, 343, 62369, 1302, 16400, 73, 20191, 3576, 16618, 70, 302, + 69, 392, 8480, 764, 65, 80, 321, 377, 675, 6489, 3059, 84, 80, 85514, 4223, + 69, 12328, 74, 6634, 6358, 17043, 1739, 89, 21807, 641, 67492, 1336, 49595, + 4341, 17008, 710, 9701, 87, 797, 92869, 5418, 2469, 60013, 377, 564, 28679, + 1897, 50139, 418, 87, 11253, 329, 78498, 265, 4239, 377, 2329, 20926, 49463, + 2580, 8212, 283, 3841, 46894, 762, 86, 7047, 462, 80, 47928, 80, 1331, 85, 451, + 20568, 80, 3172, 27093, 43210, 2445, 40078, 1026, 89, 80, 359, 75, 360, 7501, + 5469, 710, 6358, 677, 74, 5469, 80, 5192, 71, 14029, 94055, 325, 8890, 383, + 52333, 85, 2008, 74, 7488, 36415, 12328, 74, 713, 80, 2302, 41622, 11253, 80, + 784, 89, 501, 4937, 21206, 89, 3013, 74, 710, 86, 564, 1065, 81, 27455, 359, + 81, 708, 347, 70, 13183, 2554, 88472, 28679, 80, 1494, 82, 8480, 7439, 86, 652, + 34175, 74, 13423, 39911, 455, 21807, 953, 66, 4223, 14460, 81, 38359, 93000, + 14736, 66, 7759, 68, 3524, 336, 71, 61952, 329, 7369, 28927, 581, 77, 303, 42818, + 331, 14606, 76, 3172, 1026, 11089, 52971, 80, 20824, 73, 16276, 6194, 89, 18577, + 677, 70, 331, 1551, 87, 86, 7387, 773, 37406, 2411, 355, 355, 4912, 85, 80, + 1273, 39911, 31721, 9907, 376, 20191, 406, 2249, 869, 14170, 1336, 2438, 1551, + 1347, 8088, 86626, 80, 2303, 80, 10616, 10776, 65019, 21659, 41196, 333, 989, + 8050, 10011, 581, 509, 67, 3714, 1026, 80, 69, 1804, 75, 13565, 31881, 89, 352, + 87, 68054, 363, 84, 3178, 2727, 710, 42848, 77827, 2303, 72, 332, 784, 273, + 45015, 5230, 3601, 292, 454, 36722, 566, 343, 318, 1864, 2042, 2554, 333, 44508, + 87, 85, 48029, 20127, 3172, 953, 71, 29684, 1310, 69, 657, 12825, 34175, 329, + 3178, 66, 2465, 80, 34175, 73, 360, 283, 73, 20211, 830, 8088, 85, 80, 17851, + 73, 11377, 1030, 69, 1498, 85, 4086, 8050, 67, 10616, 45283, 86, 5481, 80, 38359, + 22580, 84, 7453, 581, 830, 70, 4168, 675, 89, 70, 1919, 75, 604, 74, 664, 12591, + 41625, 31603, 675, 52926, 35401, 17008, 39152, 581, 30407, 9802, 89, 38977, + 6151, 86, 39749, 52767, 24672, 89, 12082, 73, 88, 4341, 73, 1026, 16400, 12825, + 16373, 564, 4657, 80, 354, 8462, 14029, 2003, 53060, 80, 17708, 1609, 87, 959, + 69, 754, 89, 2957, 544, 19456, 57208, 20211, 9991, 71, 18244, 74, 4355, 33863, + 16400, 81, 10784, 69, 575, 7783, 1804, 89, 4010, 798, 5544, 1273, 69, 19456, + 89, 7047, 4835, 65, 80, 19747, 455, 6194, 1336, 76, 75406, 3055, 587, 70, 80, + 4086, 10776, 85, 11828, 73, 451, 42908, 2249, 73, 50139, 1336, 79, 5735, 566, + 3906, 73, 614, 87, 86, 1302, 352, 70, 80, 302, 83, 14912, 80, 70, 20926, 23866, + 2562, 83, 16468, 277, 10002, 2562, 15930, 73, 6780, 648, 4458, 70678, 22619, + 76, 579, 74, 19072, 84, 566, 89, 784, 12591, 363, 47689, 383, 1263, 86, 58305, + 371, 76, 2200, 2203, 5010, 3919, 67, 13028, 346, 582, 5241, 71, 2042, 4239, + 73, 7783, 1729, 70391, 2249, 15930, 2727, 2005, 4912, 46341, 1216, 75406, 14736, + 69, 332, 18596, 347, 75, 3933, 3055, 27960, 86, 89, 265, 1100, 9810, 1100, 869, + 36238, 1804, 85, 12328, 12825, 80, 32962, 39911, 8385, 80, 93797, 14029, 77827, + 418, 2303, 361, 65019, 954, 86746, 953, 52971, 540, 3716, 363, 89, 4680, 80, + 8385, 318, 544, 5683, 1791, 80, 1498, 79, 316, 10196, 80, 14088, 347, 3696, + 80, 4991, 83, 2174, 50226, 65, 89044, 17692, 7911, 80, 5104, 4381, 752, 32204, + 1331, 66, 3306, 21486, 73, 88, 1320, 75, 20824, 87, 11823, 88, 2701, 5010, 87, + 86, 5192, 32962, 784, 3423, 4055, 1498, 3178, 301, 80, 3716, 1216, 818, 48380, + 8035, 285, 588, 65, 540, 15770, 83, 7387, 785, 86, 3576, 2302, 52097, 7910, + 88, 38359, 45015, 13536, 3059, 66443, 10462, 80, 1498, 21325, 73, 1729, 4777, + 22619, 73, 28990, 89, 29700, 3427, 728, 344, 11805, 267, 86656, 392, 7513, 831, + 7910, 391, 33539, 9117, 300, 4341, 8783, 869, 2203, 2249, 85, 2042, 1631, 37406, + 2329, 70, 80, 3447, 1662, 13223, 57208, 11089, 65019, 73, 10776, 81, 302, 4168, + 1411, 450, 49712, 80, 8050, 3251, 3423, 53016, 70, 447, 86, 2117, 352, 48045, + 17063, 86611, 1310, 89, 587, 652, 32962, 60013, 3933, 14170, 87, 1031, 97523, + 675, 44067, 5104, 77, 45015, 333, 8050, 70, 80, 525, 404, 87, 392, 41796, 69, + 24752, 3696, 441, 30986, 49463, 21436, 67, 7387, 44508, 87, 3055, 675, 14606, + 69, 894, 2715, 84, 83438, 73, 5673, 6358, 354, 2445, 34854, 32547, 73, 23361, + 5308, 4086, 1631, 1551, 68467, 77, 45015, 73, 89, 70, 6733, 447, 333, 3647, + 66, 22119, 535, 73, 86, 1498, 86, 4080, 88, 73822, 365, 41982, 454, 20127, 80, + 1336, 392, 830, 75, 10776, 6723, 80, 74, 2580, 51924, 40261, 2042, 19275, 983, + 86, 869, 82, 62497, 86611, 822, 64, 5171, 73, 6292, 19571, 1302, 27093, 370, + 79, 14773, 447, 61057, 89, 80, 70, 83256, 80, 28546, 89, 80, 85369, 28679, 74, + 3418, 283, 74, 89, 3368, 85, 953, 87, 352, 15618, 23265, 3576, 625, 784, 65, + 318, 1197, 359, 88, 4171, 44567, 71, 46341, 2032, 16373, 354, 88, 78498, 10910, + 3906, 85, 354, 64087, 2445, 85, 17134, 6511, 6375, 89, 19747, 65, 80, 581, 87, + 1289, 31721, 8934, 752, 12591, 11949, 80, 1634, 12082, 333, 89, 33863, 24990, + 5308, 67, 3841, 12328, 44970, 73, 25457, 85, 764, 8671, 35401, 2067, 73, 42848, + 454, 80, 89, 1347, 4835, 89, 19239, 80, 31695, 511, 8480, 309, 5308, 77, 2329, + 22119, 73, 8890, 1411, 1273, 69, 2302, 61828, 75, 13856, 5230, 33721, 1030, + 327, 20191, 16276, 52926, 24244, 71, 876, 76, 1658, 52971, 74, 402, 69, 418, + 42848, 73, 6358, 38828, 28774, 302, 79566, 49463, 79, 1791, 85, 8998, 52097, + 80, 30506, 564, 89, 641, 375, 84, 2580, 41221, 4991, 36851, 8050, 796, 2332, + 87, 74, 18571, 906, 288, 360, 6634, 85, 80, 73, 9112, 1910, 74, 10056, 86, 85, + 12940, 51622, 1130, 86, 16618, 344, 73, 11823, 351, 3315, 2319, 22580, 37762, + 80, 7521, 81, 376, 79, 24286, 337, 71, 9907, 8480, 94055, 3906, 1395, 80, 69, + 9117, 85, 41796, 18442, 71, 44508, 339, 38359, 33367, 8587, 77, 7501, 5298, + 5241, 5701, 86, 14668, 5455, 45015, 37975, 71, 632, 8890, 27515, 28679, 2580, + 50536, 1347, 6200, 26397, 376, 8462, 73, 998, 295, 346, 88, 20990, 18442, 87, + 1336, 74, 17134, 87, 73, 70, 80, 14170, 87624, 85, 3696, 69, 4239, 86, 85, 40924, + 74, 818, 69, 75406, 24244, 566, 32962, 80, 675, 73, 88, 68054, 2249, 65, 344, + 2465, 652, 3306, 82999, 78, 7453, 363, 27278, 41287, 427, 89, 324, 1331, 484, + 69, 2034, 3418, 652, 32962, 70, 10470, 71, 3906, 28679, 45062, 2465, 20926, + 17008, 876, 1631, 544, 87, 14912, 27093, 18287, 85, 32204, 35734, 69, 1031, + 73, 566, 80, 2676, 72, 5308, 42528, 17008, 3906, 60013, 33367, 28774, 85, 3315, + 64, 316, 89835, 89, 4171, 76, 1551, 20990, 89, 316, 713, 80, 1100, 20824, 19456, + 80, 70, 21620, 77, 8892, 12407, 455, 86, 93000, 34152, 275, 73, 10036, 9490, + 352, 83, 6632, 278, 377, 65, 2554, 337, 309, 3919, 74901, 18442, 22506, 2580, + 7501, 74, 80, 40229, 12825, 86, 5972, 85, 8212, 336, 3576, 6151, 80, 4942, 74, + 87, 80, 25337, 71, 2034, 80, 3933, 5972, 13536, 11285, 38361, 89, 80, 38844, + 89, 2562, 89, 89687, 1551, 14694, 14606, 59524, 1662, 81, 764, 278, 59524, 652, + 3013, 747, 454, 25299, 87, 1551, 2465, 9081, 67800, 73873, 450, 339, 76627, + 53450, 89, 65116, 511, 747, 360, 89, 677, 86, 49568, 6511, 8587, 8587, 5781, + 9081, 89, 4168, 83, 80, 23001, 55282, 831, 10616, 406, 3633, 2249, 18854, 830, + 80, 74, 501, 363, 3696, 1791, 85, 28927, 17207, 73, 16820, 80, 28927, 441, 89, + 15479, 86, 327, 76, 1897, 575, 69243, 4991, 657, 70, 32589, 83079, 36463, 89, + 2249, 677, 86, 1919, 20510, 85, 383, 20103, 352, 73, 85, 8783, 78, 80, 46341, + 6358, 764, 78, 28679, 85, 89, 28774, 69, 33863, 20211, 93141, 2201, 71, 869, + 76, 447, 8311, 1897, 85, 89, 80, 23361, 72801, 4748, 11949, 544, 637, 86746, + 1910, 20211, 6200, 86, 437, 52552, 66, 60933, 1872, 14088, 501, 59524, 80, 70, + 10056, 582, 14170, 69, 20926, 752, 1411, 91943, 14170, 89, 3172, 13423, 59524, + 564, 343, 3172, 67702, 25299, 2034, 73, 1100, 80546, 1658, 265, 773, 65, 16400, + 86, 830, 87, 20926, 509, 44183, 73, 299, 71, 78512, 9112, 1804, 80546, 19747, + 80, 634, 80, 1082, 85, 5972, 13423, 266, 15618, 86, 16400, 64087, 321, 1551, + 277, 80, 3893, 68054, 708, 68, 354, 23361, 65, 18577, 79, 2303, 73, 57208, 1910, + 89, 17043, 1910, 302, 17063, 73, 377, 12407, 84, 7488, 55233, 50798, 398, 73, + 10776, 49595, 80, 3632, 5308, 16276, 50676, 454, 44970, 2329, 22119, 72801, + 2319, 70, 785, 71, 18244, 74, 28679, 87, 1072, 383, 89, 15720, 85, 1286, 8587, + 57208, 27278, 668, 20720, 546, 3647, 73, 361, 9490, 1919, 95328, 85, 10084, + 67, 1026, 20824, 25457, 17008, 85, 5683, 347, 343, 784, 544, 998, 2303, 25453, + 81, 2171, 86, 441, 85, 89, 8910, 30407, 73, 3251, 70, 5469, 73, 11377, 355, + 392, 83, 73, 6194, 87, 80, 604, 983, 3178, 87, 587, 86, 664, 47689, 269, 75, + 80, 13637, 283, 73, 9701, 73, 9902, 4835, 41796, 4171, 74, 16400, 79939, 45015, + 43505, 80, 17063, 85, 9112, 88, 39911, 5721, 89833, 73, 4748, 85, 5298, 14029, + 376, 13694, 65, 4341, 2067, 76423, 89, 3251, 85407, 418, 9701, 300, 13009, 85, + 40185, 754, 1169, 61057, 9832, 77, 1215, 87, 18571, 89, 22580, 42583, 2329, + 85, 6331, 67, 4223, 69013, 376, 8480, 86, 1331, 56201, 5104, 983, 77, 1517, + 65116, 45228, 983, 77, 337, 80, 81606, 65, 336, 83, 1336, 77, 564, 21068, 80, + 9810, 85, 1336, 2562, 346, 1463, 85, 30407, 1216, 28927, 266, 58153, 7501, 869, + 87, 44508, 94869, 302, 77, 936, 26738, 20103, 50798, 9112, 71, 1725, 4575, 33963, + 2562, 343, 67, 3576, 17692, 76, 16400, 2465, 9907, 283, 27093, 74, 939, 339, + 73, 89, 1725, 31881, 74, 1800, 87, 2203, 70, 3306, 785, 74, 89, 42908, 11032, + 2857, 69, 406, 65019, 73, 613, 427, 62497, 70, 7501, 67, 4010, 89, 8998, 75, + 2857, 84, 1509, 36825, 1631, 88, 85, 14029, 23001, 7737, 80, 5010, 41796, 10176, + 20127, 19239, 74, 2303, 4341, 11823, 329, 77, 1031, 80, 4777, 406, 1677, 87, + 1065, 69, 73, 283, 9907, 89, 21206, 88, 73, 22119, 23866, 80, 3251, 73, 49865, + 652, 728, 55442, 392, 86746, 7737, 85, 49755, 351, 14172, 83470, 3423, 74, 3919, + 23048, 1889, 80, 70, 20553, 89, 3601, 307, 49463, 2067, 509, 53060, 773, 10002, + 86, 32589, 544, 75, 59524, 11823, 486, 88, 73, 1662, 301, 7387, 344, 360, 86, + 24468, 677, 42528, 360, 85, 1310, 70, 74, 2438, 1897, 89, 278, 41796, 3696, + 86746, 22619, 85, 2067, 69, 73, 88, 23361, 84, 8587, 983, 2332, 80, 450, 27093, + 88823, 8783, 73, 47689, 540, 14460, 73, 511, 90906, 69, 1026, 331, 28927, 11949, + 30797, 1791, 76, 28927, 5298, 4777, 258, 76, 4168, 70, 540, 11032, 344, 87, + 80, 301, 1662, 49595, 17134, 87, 80, 11160, 398, 80, 89, 6331, 77, 43686, 51622, + 831, 1247, 88, 8892, 19639, 42426, 87, 6870, 427, 329, 3919, 65, 4080, 24752, + 69, 1804, 3601, 3906, 1216, 23001, 7787, 36851, 20127, 1098, 316, 3433, 51622, + 2445, 70, 5308, 7521, 85, 1029, 11927, 1091, 86, 85, 21659, 11823, 2438, 86, + 561, 80, 10333, 7369, 331, 16276, 45228, 34175, 604, 6358, 17134, 75, 1273, + 85, 5308, 71, 4168, 689, 21206, 2332, 19747, 20127, 10056, 1146, 2933, 49865, + 86, 1029, 12940, 708, 5104, 73, 911, 418, 69, 2411, 88, 5308, 21888, 3906, 70391, + 668, 46790, 80, 581, 18854, 93366, 8050, 544, 25457, 18571, 39152, 86, 1791, + 710, 15444, 80, 4086, 13183, 329, 1631, 80, 22016, 1100, 1302, 9092, 1100, 11823, + 689, 2580, 370, 501, 28927, 10910, 1631, 69, 5308, 69, 575, 7501, 39911, 88, + 376, 2302, 3205, 40617, 450, 67, 33863, 511, 16820, 365, 295, 269, 677, 2329, + 67039, 73, 7141, 4777, 7907, 3178, 58345, 2329, 56871, 273, 73, 20854, 9701, + 48380, 86, 876, 8890, 1551, 89, 325, 23361, 49568, 86, 80, 3172, 1631, 89, 70, + 73, 86, 8480, 89, 5192, 44508, 2392, 11823, 454, 87, 307, 2303, 75, 24990, 1551, + 27455, 7047, 5981, 80, 1100, 869, 71, 67139, 13044, 45015, 3906, 70, 73, 9117, + 88, 8462, 32868, 31695, 86, 80, 24244, 74, 89, 17319, 377, 69, 4511, 89, 5701, + 48380, 876, 92562, 7387, 318, 1494, 87, 17008, 641, 7488, 71, 42510, 4922, 75, + 49537, 8783, 42441, 24672, 66, 375, 79, 302, 42792, 6331, 80, 2302, 86, 637, + 29238, 76736, 541, 1336, 12255, 581, 87, 427, 78498, 53904, 3919, 1347, 7387, + 23361, 8998, 71, 43505, 80, 33495, 2302, 84, 12328, 346, 652, 89, 80, 74, 33863, + 31881, 9117, 773, 13536, 80, 4680, 68, 295, 1725, 648, 71, 57647, 85407, 1609, + 67, 34175, 85, 40924, 8998, 867, 5418, 710, 634, 48372, 1662, 38181, 3013, 17063, + 19239, 73, 4239, 80, 18577, 81, 7488, 80, 1247, 70, 45015, 269, 86, 11823, 26397, + 89, 7783, 70, 52097, 73, 876, 28990, 89, 329, 77, 80, 376, 74, 5308, 7907, 337, + 2580, 19072, 728, 5972, 71, 402, 28679, 87, 540, 3576, 85514, 13423, 74, 2641, + 3251, 65, 391, 32868, 89, 80, 8998, 23361, 263, 87, 7513, 87, 9092, 4010, 65, + 5192, 266, 67175, 300, 2933, 455, 22580, 38359, 13545, 42908, 80, 85514, 6200, + 773, 1718, 77, 2329, 7521, 3601, 17319, 73, 53289, 491, 55911, 402, 283, 89, + 3933, 8212, 76, 675, 86, 89, 12591, 337, 4458, 347, 76, 23361, 3141, 86, 93471, + 44183, 8587, 77, 36068, 511, 20824, 2192, 38181, 70, 14088, 81, 7501, 5485, + 4922, 73, 70, 26397, 1072, 80, 1662, 708, 2580, 32547, 12183, 80, 20191, 89, + 9117, 2026, 76, 2303, 86, 85, 89, 587, 73, 88, 752, 74, 1739, 318, 87, 25105, + 9786, 89835, 1216, 1729, 87, 307, 28679, 324, 764, 89, 12407, 324, 21659, 67039, + 23866, 73, 418, 336, 53289, 6009, 67, 3423, 4239, 564, 4763, 2689, 28844, 11949, + 4341, 10616, 77, 23866, 1791, 65, 1662, 3628, 278, 89835, 377, 70391, 89, 14244, + 1072, 85, 5010, 303, 1804, 6723, 80, 88, 73, 85, 604, 24752, 46224, 418, 3601, + 771, 10616, 983, 1394, 1631, 83, 302, 38977, 74, 11328, 75, 1498, 89, 24705, + 24244, 2067, 89, 455, 31721, 65, 7910, 86, 316, 86, 80, 17063, 5683, 1031, 27515, + 71, 9117, 77, 1725, 2067, 32837, 2329, 85, 2438, 23001, 1136, 75611, 728, 10647, + 79, 713, 74, 84995, 365, 361, 74452, 65019, 89, 12825, 4168, 74, 32204, 73, + 8311, 1551, 1551, 44970, 1517, 1551, 62369, 89, 28679, 4942, 24823, 78604, 3172, + 8783, 14029, 67, 417, 2067, 80, 73, 13536, 71, 27782, 2808, 38359, 74, 664, + 73, 332, 764, 78604, 2312, 31268, 2739, 85, 360, 2026, 4086, 46640, 28073, 544, + 14878, 14878, 73, 86, 70494, 27455, 8212, 19072, 73, 87, 71, 2249, 69, 4168, + 61057, 277, 4223, 11805, 79, 39911, 3919, 83, 39911, 4341, 29700, 1729, 339, + 86, 876, 44606, 299, 64, 3141, 83, 4942, 10776, 81, 24244, 18442, 9832, 9490, + 80, 22016, 10616, 3315, 316, 8671, 67, 2580, 417, 31881, 15618, 278, 8088, 4168, + 4171, 500, 89, 85, 1026, 14088, 71, 10616, 21807, 10784, 81, 2727, 14878, 52253, + 10616, 87, 13855, 1336, 822, 336, 1247, 58305, 8318, 359, 299, 73, 87, 876, + 38828, 1072, 84159, 10784, 96304, 455, 3906, 80, 876, 76, 8587, 31721, 5701, + 73, 25734, 89, 752, 64, 458, 9907, 48380, 346, 45102, 12591, 62155, 20926, 93797, + 69, 1072, 351, 87, 7141, 288, 73, 8050, 78, 831, 288, 11823, 73, 22580, 3696, + 764, 10470, 86, 21807, 451, 5979, 324, 74, 11377, 86, 7821, 318, 299, 73, 501, + 668, 36851, 351, 89, 80, 1804, 14606, 73, 7387, 89, 80, 21206, 17008, 89687, + 70, 20824, 89835, 71, 1146, 2319, 1725, 22016, 21659, 80, 89, 3368, 316, 347, + 501, 7783, 85, 70678, 894, 32868, 1498, 86, 2249, 8910, 31881, 85, 9813, 375, + 24012, 80, 265, 1026, 21620, 303, 8207, 2641, 1029, 70, 4010, 664, 6194, 365, + 39911, 318, 713, 89, 39911, 3933, 84, 4317, 80, 5010, 31172, 80, 24672, 68, + 4341, 339, 69, 2857, 67, 2453, 24244, 587, 80, 38977, 53835, 31695, 351, 89, + 455, 44067, 86, 5683, 69, 14029, 327, 2868, 69, 1065, 64, 309, 80546, 1791, + 7141, 66, 1631, 21796, 86, 63958, 3172, 2249, 8587, 44183, 1264, 80, 17043, + 50798, 784, 87, 22119, 39911, 10462, 80, 677, 5469, 80, 797, 2739, 7387, 80, + 29684, 71, 2329, 31695, 789, 87, 3447, 87, 1215, 73, 1065, 19555, 89835, 32478, + 86, 80, 3524, 822, 8311, 11823, 1029, 30243, 78498, 1662, 540, 19456, 20553, + 2026, 80, 2438, 83079, 12498, 3696, 10188, 258, 5104, 846, 303, 70, 1919, 21807, + 19639, 35531, 418, 21206, 3055, 4223, 351, 4010, 73, 301, 81, 73, 1517, 911, + 36851, 337, 89, 80, 36238, 285, 46640, 20824, 1658, 21796, 42050, 2067, 263, + 1658, 261, 641, 34175, 2580, 566, 65116, 27278, 34664, 73, 61057, 80, 9112, + 325, 73, 5861, 2249, 7047, 48480, 10470, 2701, 79, 7513, 87, 398, 71, 2329, + 7521, 29224, 80, 31721, 60933, 80, 581, 1631, 18894, 6375, 89, 85, 564, 86, + 641, 89, 32547, 295, 14736, 8318, 90752, 18244, 87, 71137, 83079, 5298, 641, + 8536, 89, 7501, 2249, 39911, 18894, 36996, 12591, 3172, 69, 488, 1347, 3368, + 12082, 70, 80, 25457, 69, 1551, 544, 56029, 1919, 78, 80, 383, 23913, 402, 299, + 797, 8212, 85, 1157, 7453, 73, 95945, 22473, 85, 8480, 6194, 76, 575, 365, 773, + 91410, 32589, 89, 70, 73, 283, 71830, 20218, 9700, 1725, 344, 89, 2034, 7783, + 2302, 74, 2642, 89, 4657, 773, 8998, 65, 7737, 269, 4575, 75593, 74, 728, 4942, + 1347, 75, 3841, 581, 13536, 60013, 509, 46790, 80, 1073, 79, 60933, 73, 17007, + 372, 89, 1517, 89, 383, 40952, 1002, 316, 87, 32962, 74, 392, 46341, 10616, + 69, 3919, 31764, 4317, 773, 69, 12825, 3576, 2249, 339, 20926, 894, 12183, 89, + 73, 15618, 8207, 76, 588, 7387, 86, 7141, 79, 11089, 8998, 84, 352, 16618, 56871, + 89, 29443, 31764, 906, 73, 70, 44490, 450, 77, 1729, 19260, 675, 39911, 1919, + 7513, 69, 70494, 392, 10616, 32868, 2067, 17553, 61057, 418, 5418, 80, 2784, + 8318, 511, 77, 51622, 87, 14878, 76, 89, 2880, 64087, 9902, 1230, 80, 3576, + 454, 2303, 41129, 3524, 89, 13752, 441, 6358, 73, 10056, 12498, 3251, 32868, + 14736, 566, 70, 80, 4458, 752, 677, 343, 67, 376, 664, 2880, 4777, 85, 10470, + 86, 1100, 89, 9258, 288, 48380, 16400, 668, 8512, 634, 83, 8186, 5682, 43505, + 664, 12082, 540, 85, 26738, 80, 1725, 6009, 3141, 796, 7521, 42930, 47689, 537, + 1302, 12329, 79, 8910, 5010, 27515, 346, 80, 89, 39911, 74, 80, 336, 7910, 80, + 723, 462, 11160, 3919, 1073, 87, 73, 347, 637, 88, 1498, 70, 21486, 1031, 15444, + 73, 21796, 60013, 11949, 89, 3368, 88, 69, 2329, 1551, 349, 18442, 901, 85, + 336, 3251, 582, 2438, 48872, 450, 9081, 303, 1065, 76, 6200, 66, 1273, 581, + 85, 22532, 1347, 76, 55506, 44970, 73, 87, 8050, 52253, 28990, 86, 6358, 48872, + 87, 1739, 74, 89, 80, 44970, 80, 2042, 7501, 72801, 67455, 427, 16017, 85, 324, + 31764, 1729, 78, 380, 8671, 57367, 2319, 82, 1336, 89, 15444, 73, 8376, 2315, + 64, 454, 46640, 69057, 383, 73, 361, 80, 3059, 85, 316, 587, 86, 587, 637, 5308, + 5298, 70, 1394, 6733, 80, 11032, 80, 8998, 8212, 73, 87, 462, 71, 45015, 86, + 89, 8998, 34175, 69, 89, 6200, 14606, 85, 2332, 79, 876, 894, 68566, 8783, 86, + 11345, 65, 1631, 1302, 89, 9081, 8480, 73, 5972, 8587, 80, 2445, 2026, 1017, + 377, 88, 451, 277, 18244, 581, 336, 604, 8311, 85, 7737, 86, 89, 1031, 79, 3415, + 73, 1609, 79, 45015, 17319, 87, 86, 13536, 939, 89, 14088, 77, 1100, 21068, + 85, 89, 93366, 86, 4168, 2933, 73, 21206, 509, 28774, 71, 11949, 566, 6200, + 22619, 38041, 28204, 10647, 96518, 22619, 3326, 67919, 23361, 52971, 292, 65, + 713, 5308, 295, 76, 4867, 1568, 1729, 89, 10188, 88, 936, 300, 336, 3306, 3368, + 1065, 9326, 65, 7387, 2465, 869, 66, 9555, 83079, 1494, 87, 3423, 652, 11805, + 3906, 73, 70, 1910, 87, 10097, 72, 4171, 86, 8892, 24409, 80, 5230, 2554, 73, + 52097, 80, 2026, 38977, 365, 4080, 89, 32868, 16711, 80, 352, 84, 80, 23361, + 74, 80, 89, 66777, 773, 87, 21620, 1551, 34856, 462, 402, 27455, 8480, 55568, + 1919, 67, 39911, 87, 3178, 67665, 80, 3601, 4080, 89, 764, 17704, 80, 324, 74, + 9996, 80, 333, 87, 1919, 8385, 1739, 302, 4922, 14406, 830, 76, 96416, 89, 1910, + 89, 85, 5010, 67800, 45015, 77, 16373, 89, 1804, 76, 14029, 376, 77, 23577, + 4970, 73873, 11823, 1983, 74, 80, 3601, 67, 9092, 385, 22575, 672, 69057, 13045, + 354, 38361, 8212, 86, 89, 4991, 8088, 71, 7197, 19239, 38041, 64087, 875, 657, + 66, 773, 24244, 74, 4680, 9832, 74, 4890, 85, 332, 19555, 86, 2106, 77, 6151, + 1065, 346, 5308, 969, 5104, 28679, 74, 13536, 85, 1031, 509, 75, 3172, 15802, + 80, 5010, 75, 49450, 3933, 87, 1658, 87, 327, 47171, 21543, 34175, 87, 49358, + 398, 31764, 55233, 392, 44508, 73, 302, 73, 36722, 12940, 71, 73, 7488, 12825, + 89189, 37406, 52971, 3427, 4341, 3141, 876, 70, 5010, 324, 7047, 73, 2438, 3368, + 46224, 762, 385, 73, 11949, 2203, 31764, 66, 46640, 86, 5010, 65, 2580, 89, + 3153, 4664, 78930, 88, 1546, 398, 2034, 86, 5237, 44490, 784, 84, 263, 391, + 2580, 85, 346, 14468, 79, 2067, 85, 42818, 18442, 8385, 677, 1662, 2067, 86, + 383, 6723, 1073, 64145, 1026, 587, 2629, 383, 80, 8671, 40078, 953, 24990, 2026, + 66867, 1568, 66, 1551, 8088, 38041, 74, 764, 70, 32478, 49450, 32204, 15555, + 31146, 15809, 546, 15618, 689, 78498, 5308, 361, 73, 8462, 24738, 266, 26039, + 71, 7910, 80, 875, 53835, 88, 28679, 73640, 17008, 70, 5308, 34146, 71, 710, + 85, 8910, 8311, 2562, 42818, 6511, 796, 22119, 74, 87, 1394, 6200, 70, 46640, + 47154, 89, 6151, 581, 3906, 1273, 10470, 25299, 80, 30407, 299, 6292, 266, 73, + 63885, 1336, 20553, 1631, 86, 372, 752, 41796, 85, 336, 74, 1065, 66, 16499, + 69, 6358, 38385, 10776, 71, 23913, 276, 80, 73, 13855, 77, 764, 68054, 23866, + 796, 80, 1725, 4239, 71, 544, 2480, 89, 657, 86, 80, 78, 343, 73, 2603, 42511, + 69, 69057, 5964, 7369, 8462, 14029, 261, 12989, 11949, 49463, 73, 87, 4010, + 90231, 365, 450, 79, 29238, 85, 427, 82, 65167, 41129, 31019, 68411, 1347, 47689, + 70678, 1551, 87, 22119, 3418, 728, 65, 80, 657, 80, 73, 10820, 1029, 80, 30506, + 42908, 54899, 677, 451, 80, 20990, 85, 10036, 71, 2445, 20962, 708, 69, 764, + 652, 796, 7501, 80, 1498, 78930, 5010, 10196, 80, 1072, 295, 87, 23361, 70, + 45015, 70, 351, 67800, 86626, 66, 634, 1631, 9092, 73, 797, 87, 19866, 12498, + 74, 3172, 70, 2303, 74, 12498, 7737, 65, 3601, 2727, 80, 360, 2034, 32204, 2727, + 65, 1609, 5972, 89, 4680, 5981, 42818, 7501, 1302, 38977, 65019, 88, 797, 80574, + 300, 1658, 73, 4657, 309, 97715, 5192, 6009, 24027, 10333, 84, 80, 14029, 40924, + 2111, 67, 728, 372, 85, 365, 18854, 86, 5289, 276, 9902, 80, 359, 20553, 1804, + 339, 8311, 1065, 370, 869, 73, 10776, 77, 80, 1764, 10333, 76, 1910, 8311, 437, + 80, 4777, 77, 657, 74, 93000, 447, 85514, 6375, 4912, 36851, 42908, 7047, 25299, + 74, 1273, 21436, 76731, 1658, 85, 953, 88, 9700, 5104, 36825, 544, 7513, 73, + 438, 89, 11253, 10784, 73, 2026, 70, 74, 8207, 309, 25772, 4239, 38181, 3251, + 65, 831, 377, 8480, 87, 22873, 83, 73, 94817, 637, 5746, 3423, 953, 65, 427, + 41129, 89, 46224, 73, 1247, 370, 87, 300, 8248, 86, 2249, 8910, 85, 2221, 710, + 303, 1247, 87, 86, 1331, 87, 18577, 85, 26397, 16276, 3458, 80, 2857, 377, 11251, + 89, 7387, 42908, 302, 39911, 77, 385, 39911, 42050, 710, 89, 40078, 89835, 70, + 5289, 344, 11328, 80, 1339, 355, 31721, 41523, 344, 2438, 73, 7501, 333, 2203, + 540, 375, 79, 521, 5981, 588, 80, 89, 85, 10796, 87479, 8050, 73, 4239, 9988, + 76, 1631, 81, 8088, 79, 1394, 82, 2912, 70, 73, 52513, 822, 511, 9188, 450, + 10176, 46640, 87, 86, 398, 333, 86, 8385, 80, 300, 1897, 55773, 303, 1072, 2319, + 89, 9996, 664, 31268, 13855, 939, 69, 27457, 39911, 911, 55748, 347, 83, 73, + 86, 581, 6331, 370, 70, 73, 9907, 3105, 752, 78, 70391, 5298, 74, 28679, 65, + 22524, 74, 64087, 80, 78, 3251, 28400, 1116, 80, 74, 796, 24459, 723, 85, 89, + 3418, 344, 27515, 40078, 93797, 85, 1658, 6194, 73, 501, 2933, 71, 95143, 11206, + 71, 73, 5079, 1073, 74, 3178, 5683, 3431, 81, 40078, 89, 1354, 73, 86, 3013, + 303, 89, 454, 70, 80, 784, 80, 42818, 3368, 71, 7501, 67800, 85, 73445, 73976, + 86, 5650, 6194, 79, 318, 66, 318, 12328, 65, 2200, 13536, 31019, 3059, 689, + 3251, 6511, 80546, 564, 77, 1517, 28774, 4080, 41129, 292, 81, 783, 65, 70678, + 24752, 73, 89, 894, 939, 39152, 566, 85, 77827, 80, 4511, 72, 1130, 5964, 20811, + 263, 74, 339, 8910, 71, 8566, 604, 332, 1320, 16400, 307, 4763, 67665, 69, 73, + 12591, 42050, 19072, 80, 3906, 89, 392, 73, 7488, 89, 1122, 418, 20103, 80, + 56871, 4880, 65, 80, 74, 427, 566, 796, 77, 5701, 80, 74, 27455, 25817, 1551, + 309, 540, 354, 94908, 8998, 4223, 14878, 8050, 23001, 87, 1065, 65, 9907, 1494, + 1910, 581, 85514, 365, 88, 21888, 85, 5298, 998, 402, 9991, 85, 303, 299, 2496, + 45015, 3427, 352, 74, 80, 797, 23707, 87, 13441, 365, 49891, 66, 450, 343, 26397, + 1273, 455, 318, 267, 80, 7047, 1590, 318, 71, 20127, 564, 75, 303, 710, 5469, + 3251, 8207, 318, 75, 10784, 2438, 79, 87348, 4168, 74, 34870, 302, 80, 3458, + 80, 88, 50153, 1233, 86, 344, 23048, 370, 7521, 10002, 1100, 9700, 337, 20553, + 87, 363, 9451, 6194, 73, 63643, 93000, 6511, 9110, 73, 2649, 80, 89, 80, 70, + 9490, 86804, 87, 18577, 74, 3415, 7910, 6388, 1498, 27457, 1073, 70, 80, 69, + 18442, 359, 58153, 73, 9907, 540, 89, 3251, 18458, 365, 5192, 74, 412, 327, + 509, 78, 343, 73, 20127, 18442, 85, 19072, 10616, 391, 85, 80, 11160, 10056, + 911, 24752, 4922, 86, 85, 42908, 1631, 343, 318, 89, 641, 2784, 1729, 1030, + 5972, 15479, 1900, 2933, 43505, 2439, 9509, 728, 3978, 13536, 89, 32868, 6455, + 2249, 8998, 89, 56871, 10784, 784, 5308, 49712, 89, 22524, 52097, 509, 45015, + 8910, 2329, 73, 3418, 677, 4912, 80, 69, 24752, 11377, 10776, 391, 21325, 6511, + 552, 2319, 1658, 49712, 9700, 70, 73, 8998, 80, 285, 365, 64305, 89, 61057, + 1729, 86, 5298, 71, 544, 307, 798, 23913, 70, 2303, 263, 76, 16400, 89, 48380, + 33323, 80, 24244, 48372, 18926, 7910, 3418, 85, 1551, 4341, 677, 44067, 3601, + 3696, 48610, 3427, 89, 710, 10470, 587, 36722, 49463, 45015, 13009, 9700, 79, + 372, 30987, 81964, 21796, 48380, 588, 371, 2201, 3059, 89, 55418, 47154, 73, + 53289, 13722, 3524, 2642, 18854, 1764, 68, 16618, 80, 764, 80, 5241, 1604, 5972, + 88, 352, 1662, 64087, 80, 4991, 10910, 953, 80, 303, 80, 1498, 376, 377, 88, + 6634, 80, 1466, 7453, 94912, 9490, 8462, 15618, 65, 710, 89833, 75, 80, 88, + 437, 73873, 89, 2312, 318, 66, 359, 9832, 3601, 8207, 23361, 74, 418, 67, 34175, + 86, 80, 25457, 66, 333, 85, 3178, 1725, 268, 80, 4484, 604, 84, 4341, 5469, + 89, 78498, 89, 35531, 67800, 4852, 441, 65, 93000, 33721, 318, 74, 604, 36851, + 64145, 93797, 2465, 80, 61057, 302, 3316, 2191, 31019, 19456, 20824, 637, 8826, + 21486, 351, 79, 6414, 3178, 1725, 80, 89, 42818, 80, 404, 1498, 86, 85, 588, + 74, 634, 14088, 87, 23361, 80, 47689, 30506, 1029, 12119, 80381, 5964, 66, 1498, + 73, 6292, 291, 89, 6256, 7835, 1498, 66, 7453, 73, 87, 19239, 582, 5308, 93529, + 1634, 88, 329, 10616, 86746, 20191, 402, 4080, 86, 2005, 6511, 361, 74, 5308, + 450, 66, 8462, 6780, 363, 4719, 85, 9810, 80, 29238, 80, 1910, 13855, 309, 69, + 1498, 9092, 4922, 5418, 830, 79, 1100, 12682, 83, 73, 87, 73, 318, 66, 3423, + 65, 80, 2332, 3906, 370, 710, 327, 28546, 87, 7488, 86, 12940, 773, 6043, 80, + 10847, 84506, 352, 830, 65, 10616, 329, 22119, 4458, 64, 427, 79, 372, 21659, + 31881, 3423, 44970, 1662, 1233, 8510, 31577, 81, 1031, 2308, 87, 318, 12498, + 18299, 88, 869, 4657, 773, 339, 3841, 72, 4912, 3518, 406, 73, 3251, 65019, + 88, 7047, 10630, 9092, 53060, 9902, 82999, 4625, 309, 38361, 67800, 16848, 70, + 675, 42848, 87, 8671, 28990, 25604, 74470, 1662, 79, 7737, 418, 2690, 12328, + 3893, 24672, 398, 53016, 32759, 3181, 72, 906, 652, 80, 13075, 31019, 48872, + 96304, 5104, 637, 4381, 683, 70, 604, 1073, 625, 74, 58598, 28990, 418, 501, + 80, 1216, 53016, 87, 43723, 66, 352, 85, 12940, 747, 404, 2026, 14170, 80, 8088, + 1031, 967, 86, 911, 45228, 73873, 88, 1331, 581, 906, 85, 70391, 17008, 80, + 275, 22619, 2739, 85, 16400, 4317, 80, 509, 372, 343, 21659, 2176, 80, 333, + 1065, 9780, 73, 375, 20372, 1677, 71, 10056, 9902, 89, 5697, 336, 675, 71, 641, + 29684, 1030, 39152, 911, 19747, 80, 32204, 1662, 1065, 4484, 89388, 1609, 3647, + 94869, 6036, 1764, 15258, 1551, 16373, 5289, 85393, 5469, 2411, 77, 1551, 85, + 47154, 365, 69, 26397, 76, 1026, 5544, 1494, 15618, 52971, 11089, 51622, 70391, + 4991, 83079, 7737, 71, 450, 65, 16276, 84, 5308, 939, 24244, 9289, 89, 19072, + 462, 1729, 11805, 36851, 329, 4010, 307, 333, 1662, 38041, 11160, 1658, 796, + 5237, 45228, 8050, 70, 80, 10776, 9092, 1631, 363, 34175, 2070, 36068, 68054, + 14949, 1031, 3906, 73, 13536, 71, 10470, 83448, 5010, 9092, 77, 7488, 84, 300, + 25770, 26636, 544, 86, 4010, 22473, 8536, 3251, 76, 1286, 261, 17704, 4341, + 70, 27455, 11872, 55568, 351, 74, 10056, 80, 12498, 70, 73, 46790, 566, 377, + 398, 67, 1100, 376, 38361, 69, 24012, 580, 85, 80, 11851, 32589, 377, 22619, + 316, 3576, 652, 70, 2329, 3251, 343, 2319, 82, 441, 36851, 79, 20127, 71, 1609, + 29700, 89, 92216, 87, 398, 1517, 73, 637, 67, 5346, 71, 7501, 7141, 96518, 6098, + 81, 418, 85, 10470, 83, 544, 41796, 89, 89835, 6358, 85, 4777, 564, 35804, 604, + 80, 38181, 302, 95947, 70, 12723, 412, 80546, 395, 1273, 1029, 300, 87, 3368, + 1273, 454, 74, 5972, 74, 1030, 12748, 80, 450, 20127, 5964, 69, 351, 75, 9907, + 9092, 77827, 7063, 84, 2319, 266, 303, 365, 18577, 1073, 675, 3647, 69, 454, + 87, 8587, 73, 85, 3893, 28679, 80, 25299, 1029, 57208, 9092, 9117, 2034, 89, + 2689, 5972, 9991, 79, 12498, 74, 89, 29684, 2302, 29684, 76064, 89, 14694, 288, + 98667, 7501, 80, 39172, 2005, 73, 3479, 14244, 73, 13183, 31019, 344, 360, 22016, + 1936, 9081, 1725, 30506, 11160, 1919, 81, 42908, 80, 89, 4223, 86, 2302, 418, + 3172, 87, 1729, 303, 80, 2329, 566, 85, 1336, 13536, 85, 44970, 9832, 73, 86, + 302, 73, 1220, 18571, 774, 46224, 10784, 5544, 73, 4912, 1371, 12940, 1073, + 73, 12082, 80, 1394, 53060, 437, 6043, 511, 17137, 85, 1065, 88, 4010, 7044, + 32204, 451, 88, 7387, 6511, 1320, 283, 327, 581, 70, 33863, 23913, 89, 20824, + 22016, 1394, 40924, 344, 89, 754, 7453, 73, 85514, 80, 2668, 575, 67, 1725, + 675, 69, 73, 15720, 67782, 418, 69, 41796, 87, 302, 73, 54909, 10616, 2312, + 74901, 74, 36238, 2580, 18854, 2042, 89, 399, 89, 31268, 74, 45015, 38041, 5469, + 12498, 13391, 14029, 652, 66565, 3178, 35949, 1631, 4341, 5469, 49938, 65, 1897, + 295, 2111, 344, 47154, 73, 2580, 73, 65019, 10333, 8462, 78, 1065, 6634, 71, + 1804, 267, 66, 40078, 3013, 74, 6331, 355, 1729, 80, 24672, 540, 35531, 2445, + 15008, 89, 5104, 1273, 88, 69, 1494, 1091, 2249, 88, 37975, 2857, 9832, 25299, + 710, 70, 80, 89, 97523, 23866, 261, 89, 70678, 764, 87, 18577, 21486, 8480, + 69468, 3841, 20926, 80, 352, 12498, 73, 42528, 36825, 301, 80, 418, 89, 45788, + 337, 9832, 89, 85, 12498, 76, 1030, 1394, 11377, 73, 869, 728, 5241, 773, 88, + 3340, 85, 1310, 80, 637, 4239, 30407, 3415, 68, 71696, 762, 302, 19239, 89, + 1320, 1394, 764, 93000, 4680, 71, 2249, 69057, 501, 23760, 6200, 91998, 18400, + 85, 20926, 7501, 73976, 79, 46390, 80, 318, 370, 18577, 343, 1725, 70, 1658, + 4991, 96605, 73, 1029, 9832, 376, 67, 8536, 301, 89, 331, 85, 1347, 1931, 73, + 6632, 301, 278, 46341, 784, 1725, 63643, 60013, 32589, 89, 1310, 89, 24990, + 86, 18854, 4937, 4680, 87, 71, 483, 355, 455, 25299, 664, 67702, 1073, 9195, + 73, 6634, 59524, 74, 80, 2739, 18854, 74, 501, 1331, 86, 26738, 391, 1073, 64145, + 70, 2067, 359, 47875, 3251, 1498, 3933, 6780, 1739, 4912, 4223, 418, 79, 1897, + 664, 65, 10188, 80, 983, 11823, 266, 48320, 5192, 36851, 74, 8311, 876, 15444, + 3906, 87, 2332, 75, 80, 14878, 2465, 4835, 983, 76, 8462, 784, 5469, 575, 3013, + 59524, 375, 12328, 73, 20127, 89, 983, 9112, 713, 324, 86, 11253, 7819, 1092, + 86, 89, 339, 74, 15959, 295, 51924, 1073, 89, 31721, 325, 1331, 309, 85, 12407, + 8671, 59524, 20211, 4484, 998, 283, 8318, 88, 73, 2312, 1036, 21888, 7004, 2026, + 5241, 80, 2332, 36171, 87, 2303, 89, 26929, 1366, 20127, 71, 70678, 1551, 84, + 773, 3806, 21206, 278, 81, 9664, 76, 38359, 85, 1394, 773, 869, 3906, 953, 347, + 76, 1978, 383, 41622, 509, 339, 80, 17156, 266, 9117, 87, 84, 12748, 88467, + 708, 15618, 73, 46341, 447, 48380, 80, 7208, 74, 87, 70, 73, 70, 1290, 88, 324, + 302, 78, 336, 3306, 86, 361, 4922, 89, 73, 764, 307, 31172, 8671, 70, 283, 96518, + 31422, 88, 73, 2159, 332, 2067, 710, 668, 8462, 31764, 70, 3013, 19271, 86, + 657, 79, 316, 74, 447, 263, 44970, 5481, 44490, 17043, 32973, 4168, 2302, 74, + 7501, 9832, 451, 1247, 575, 344, 2042, 1031, 81, 1739, 69417, 273, 11289, 81, + 2445, 307, 28679, 1273, 8385, 648, 78, 511, 2445, 7047, 76188, 441, 2192, 80, + 54803, 9700, 5308, 68054, 71, 398, 3415, 953, 88, 73, 10462, 363, 31172, 370, + 89, 9810, 71, 1517, 52719, 84, 587, 3418, 82, 3423, 1729, 89, 876, 82, 3415, + 73, 85, 2642, 22524, 377, 4942, 73, 564, 9700, 87, 2106, 8318, 88, 80, 22619, + 752, 70, 1394, 75, 3178, 4835, 89, 657, 18571, 1729, 8207, 86, 89, 92216, 3172, + 4942, 3933, 332, 939, 57208, 9991, 363, 19239, 4511, 89, 73, 347, 86, 9081, + 70, 8587, 2580, 80, 325, 9490, 3906, 19747, 5481, 70678, 5544, 67, 34175, 29229, + 13616, 73, 12130, 36851, 85, 80, 36068, 6200, 427, 17704, 80, 3251, 85, 31695, + 402, 3516, 869, 74, 383, 2067, 72801, 79, 1791, 73, 939, 70, 1604, 3368, 86746, + 87, 392, 75, 543, 4835, 88, 80, 4086, 94055, 16400, 71, 75406, 339, 625, 1729, + 31764, 15479, 511, 1551, 5598, 398, 1320, 346, 11377, 437, 901, 85, 80, 38041, + 73, 59448, 4223, 85, 3601, 283, 16820, 41796, 85, 4381, 336, 55498, 283, 22119, + 14878, 44183, 455, 3059, 21538, 16017, 73, 5683, 89, 372, 708, 28927, 9832, + 66, 98667, 752, 80, 69, 89, 1231, 8088, 11222, 1394, 3306, 71, 1517, 911, 85, + 17134, 2302, 85, 818, 7141, 65, 5298, 70, 3601, 7521, 4355, 3223, 84, 347, 70, + 4080, 61057, 46341, 1804, 28927, 46341, 47154, 16400, 8462, 12119, 32547, 1658, + 12591, 20127, 12825, 84, 80, 275, 3172, 10470, 9117, 417, 73, 894, 34967, 80, + 7488, 86, 80, 953, 2312, 17134, 86, 288, 77, 343, 10616, 25105, 29238, 74, 9110, + 39152, 86, 1310, 76, 385, 1610, 74, 89, 9509, 61318, 77, 78498, 73, 88, 5418, + 56178, 89, 9197, 398, 347, 55655, 25299, 5289, 3893, 25105, 675, 43210, 398, + 65, 2329, 376, 2203, 17134, 75, 333, 89, 10910, 1336, 87, 32837, 75406, 80, + 4010, 80, 1026, 75, 3368, 68, 55334, 42818, 80, 89, 1551, 31695, 86, 10784, + 2329, 9701, 544, 81, 339, 20990, 10796, 75, 1094, 14606, 85, 1336, 45228, 3576, + 336, 11949, 391, 86, 8088, 46224, 291, 36851, 424, 73, 86, 3415, 1609, 89, 4168, + 88, 47391, 8587, 28990, 29700, 80, 3055, 402, 14928, 67, 7488, 876, 76, 6780, + 88, 36825, 42908, 2312, 3306, 2739, 80, 25295, 30506, 501, 21066, 93000, 2562, + 7215, 337, 80, 88, 677, 69, 33863, 80, 16820, 572, 35278, 77, 80, 1065, 64, + 309, 77, 544, 1149, 68054, 784, 2332, 67, 2641, 73, 88, 9117, 87, 4922, 79, + 911, 83, 10616, 11823, 275, 73, 21620, 31268, 70678, 1029, 98496, 12328, 10910, + 79, 416, 85, 89, 664, 26397, 71, 73, 32962, 10056, 85, 17134, 1146, 80, 30463, + 1551, 24990, 70, 41670, 604, 67702, 11032, 7488, 385, 581, 80, 1247, 11823, + 87, 9701, 501, 1146, 352, 76, 44183, 21620, 936, 9700, 76, 1216, 76, 2396, 2249, + 2739, 73873, 87, 18664, 88, 1919, 86, 85, 2042, 5972, 28546, 73, 86, 11089, + 22619, 80, 99161, 1216, 45228, 2438, 70, 511, 79, 391, 25299, 939, 86, 1718, + 80, 70, 1764, 21796, 74, 47154, 67800, 9490, 28679, 67, 365, 406, 73, 43974, + 89, 19239, 652, 71, 73, 89, 3841, 60013, 2249, 69, 5584, 6194, 70, 80, 73, 21807, + 58305, 329, 1029, 74, 7453, 1216, 80, 3141, 97816, 4511, 17134, 60933, 45015, + 74, 5721, 664, 5010, 327, 74, 80, 295, 346, 89, 24527, 1331, 96250, 344, 89, + 80, 8088, 953, 82, 2727, 318, 352, 347, 8207, 73, 88, 8462, 75, 796, 14606, + 1662, 540, 17851, 36825, 1031, 80, 359, 1310, 11823, 566, 588, 73, 5038, 4942, + 49595, 8462, 4942, 32204, 85, 1029, 73, 4484, 309, 9902, 85, 2857, 10784, 82969, + 747, 88, 64087, 302, 86, 4080, 73, 87, 31764, 728, 91295, 95143, 7513, 869, + 359, 67020]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '45399' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6XROyvJbs/f4Vb723nF2CClnsOwREvkwQEHVq6hQoIqAiHwmQqfnvp/TZNafm + xgtIIQmdXt298l//+Ouvv5usyq/D3//66+9n2Q9//5/vtVs6pH//66//+Mdff/3113/9fv/XyPyV + 5bdb+S5+w383y/ctn/7+11/i/1z5/4P+9dffPK51Zlm5w7mkNi+wXlnNrHNYerPffK4Qe1bI3MOi + 6Oa2C3X4PNM120LZV5MsPW2kif1Edu7uzscyWyXImg93LLn2bIyrW9qg7N0kOGq7VTYt12WAzDg8 + E3PBlt1U3E82IsXZIM5dEHlzXQ81ii0W0PF9LSOuN5CiPksntvGEYzSqjViqj4e1YMbwWXq9J5xN + tFoEB2JK/JIxIZ80YLfwyUjp4W5QvaxH3VN1iLVRrx09zrquVp1AsFAkp2rQiJeC7OMZd69FUHFo + HBGm55ERzxE9Y7x0eopky1jjhm53fGLPtQLz3b1QMRm31SRsdhZyNQNjtTaJMeeofcFhffdpIbIh + o900UPCYVjH8jhNvitmbouydXSjnn7KafdkZ4bHvPYLf8dIrHseugd/8rNwNvFFT/RIJaSRh9dJF + FevM5gz7ZzGzVBEO3UD3+wD69epNdhyzaG78kIIRgkT26+UWjfdVYUHXuwbTjRUx+gopV3RenW40 + iaa4Y+nufkXHSqHM8+cPH8++ZymDOZRs+xDXEeV+eQVpfjpEOzRtNulX+oJ5v6vZVq4HPssX2159 + ulhi2jQssmEc7BRNqPiQqMGWNxrIwNCAtSPG7FYZLRcigGxt1kxHvdx9nDGagZ2Shorn3jCmwZpq + aJDkElK9hGq6+MOIeBrpbPdODbRiO0GAL54Yvqt2JTnsDkjLUo1s9o/amE/B9gXwfN7JqZaNTJp4 + bKKQvyX8ePVON662rYhMnVREZ2LOebB8J6gt5pztZEutBmFDTHhcaotsGs8xBoLUFoyWtmR/6XjF + QprG4DTFkRnuS4/G9ycQ1cg7tmwfBgmfHHYX0PEmjFRab+4ZD7c4hU14rvFKttSOa/rzBSS9bsnh + Ng/VuPaWBZgTw7S9xj5azckN5OZMUrKr5Sobda+qYbdiO7KnNzfiCS8KGIL3DQtZ4XaSrYGLhnVN + 2HmdxsaUXbVa1U+iTwKla7vZsG6hvOAvj2hPOUJDrp1EOGO9Z9viYkfz6rBuQFjjJ9GFdp3NEQqX + ixlEnxHjlkZj2MgxvKrKJFqRxhV3anmUF6ZZUK4KGHGnkgDmTotZlnlbg0OQ6mglxzFzy6D1GuPz + tkDq1zb7rqfHj/1Fg+dCjohX54E3LhPPRif7bdK50KKKP2a4omJ6x3/wzoVlsYQuMHwq3M5rPoXK + 2gXVOIS0Nzuj49q895GFP5TEHHnestTWS9g+Y4Ec9cmsRJTeCvCRajINDR2iSfFcI1F1Cqyk0j5j + +/BsI/nGB1q5ZWRMl+ch/X1vRubr2uBJCgpSLl6Ej0dQO/o2dymkZoEI/sQ6mtx80cIiWx6Jpx/v + xsii2USkNzSy/Y7v5NZzYbWlNXP5uzbG5BgpatKiA71NJ5k/FpeZotSMBMolWzdGCaMW7jmViO99 + SmNQS9KDOo8q84Nsgca6S21Ay3iicgm8+iDT9IFtD5w5N/NZjdfgdgU3tAws5ctLRaVscwVu9zti + a89PNw8brYbsfbmQrWvP3sCV6gXTp5/ocr7bnMufXQJ1KXCK8F7mfLdVcgjfwor2kUa68eztAjRa + y4jZr7YzxvLKbXRqU6DhTRdRJx2DK1ocu4x5+OB4Iv4sl8ruvjgQ7bp5opEzMYD4qrjM8XzBmM/a + w1Ky4ZgQ52Q3Hi21tQhyBQ0zCpYaTD4zXbkesEnMaH5x3mXXFL58zyJnZ3j8Zl0EGMd0Q8z7a46G + UBlt1cIdZbo9XI35g+YExpBishMHyvtHf29gEzpvZjDf7OgPL3Xmq8xL6NugTjgDIAgpc7qbhqTm + sFPQqOQGXQPaRNP5ZpnyD0+6w27ZsL/NLvLr3KeX4bDgFL26BNISU7b/4Vd8rmflu//Jt54aY+bs + BWjagFBppBtj0m/lGR6dt2E75eMbo+51LwTYfLHTkDyyWeo+M4z0ZVM+W7ExHtVehzTwZLKX1m9v + XG3LJQjmS8JSPZ+8KTPPI7hb1SO+hp8dbcRWRN4Nd1Rp7HXGr0chh1CeGdkchNJ4+EPeoPXGdNlG + ezce0+atD7/6oGQrXE2va5CgLJszst0wI5oPZtVD9egCQpq6NcZzpShwKhKFuG5dId71YwHmZ3Fm + Ptsb3vDR3i+QpbkgRhV/UK9+1gn68iXl8iNAY7gYYrgqpYgVbxHxZTc/SqjJUacVs4/RUPtSCY3/ + uuJptYiz7/xmVMnZFi+Mh1PVMkpsqNcd/4P3Sa2MAslna2B4ExmoTvU5gPA9KBgkp6jGFndrkI5u + hIUiffEJSx9AxvLUECOTVNSroVbCCbBIaR9uKj69ZwtJziYjmiHORqfL1x7FylHD5f1dd+PeXRQw + nhuPGWur4NP+KiQoFhsNw7wLq/EkRliNJPdItLi8cD4FbgBpH4wEa3sejTdtFKDrZOvLF7eOK5dW + BO2kC199lFRDcT+5kKx0h+ESvSPWHm0KT9G4MMfE947vzGWzbu2pI3r+iL2pC9wzqF1ofvGmG/Ne + O8QovnZ7shUWNzQFxtYH1O5lYvCPXk3ai2NY8NpjyWXaZNMru5bKtjMEDJdjhr76ylp3QeiQXVY6 + xmg9Ugtu/o7R5UdA1ZRl5qxsVYsTO7lsKnGRvxXoZnND17b2yqZnByIaw9uBeP7s8Pn0VlNYBgEw + 7VAjr6cIp+j7fYlvuTof3i5gtGenFT4FhpqNgb3EaNloEztFG4TaVscmLFztxpzHTcz6fDy/YLXt + a3bd+abx0Yh3VpVbeSSeJLXe/OP73/Nd0dSz0dTDP/uJ7Y6LtJqisNHBr/cEL6sPy2j/2Kfo8ra2 + zFrCi491VCbw3T/Ez9ftd33iAi7HcE/8/WPMeCS+NLi8JJtZlyE0ZgXzAlgjV8RhS1ZN82MBgGEO + 8WCkajZrTppCc85PZHOjecQi6SEqS1n8sE0x+tk8aOb442c65vnK4Pf6mCuPy8v6t96s8kMDJx8s + ZjnXpuKKjXs0iGRPF7reRnO6OmP46ge8YKVdrcry6sJXnzOsVHnGn0W6RHR4qFSNfTGbF9FeQZ6X + crqKNhma9cWegnTQMLstRDNiqZ2F8NkNW6ZNE+3GnMUzcPvmMl0Su27apkMAbGHdfvXPm26KacEO + pwmdW00zui8+FfEyDxSpgcXHSxS+lMDpOebOzjC6cZW84GywgUJPzGpKBIsinxaYbevlKxq++EQi + T7KffjbmpBjWoCN/QRzsAqLoVSW//UA2SaYZs1ckNoiGOGJxPa2N4TTQdK3CcoEXLlhZ31+JpVS8 + 3zLbv5nZ/JAfIZzyx4bsOXyqfqcdcjiBL9IFSo1oXA2zAPbgMqK/hNCbF51cooV88MhOdns+dQTX + 4FaNQKyPkHV/9Mnmen0xq6m8aloJEsBLioHs1iZEf/Ss+CgqukjFh0HtKguUynVPWL2wkn/12wv1 + zktmztLL+CBeWgCq7Wy2UdTI48ZnjYEu6plthYWKJnRsElhfBZP5MQzelF+MHIZATFlyjKvshx90 + CIQYL+d7gyi7+6bSOnpDp9VCzOY8CnOw33imPF4tsnnpPlxVNUpCn2ehq7h1zAGcsj6TS6Xp6Hmr + AlNFe00jbhV6Ga+at4h+7+cxue+mvaUnahyKNvGspR+Ni/y9Br26u1SN+DObQ49j+Pofoq+iFnFx + v3fRV7/Q5+xM3nT1mhBYgypaJ30SDYvL3EOBuoT53kf3uKQW9c9fYWHpsmqOI7cHd7vwiFk2WrYs + XKdV5M+nwV35eqBpM2AF6Vt+oup+1Xh8L/c5WK/2gRcg8YqGohdDuU7uTNucCtQ/DVeHmPY6w7dr + 441ejwRoW7EgmxgKNNW+WsIPv8KhEIw2FL0E9uy4Ysaymo15OqIQdmpnEswDzZsITLmK8pOAmbwS + OZfPTIOEBRHxPJIZ88Fga7TeWC5G98bweL7/XJXP00F4+cXTTx///CnZpOOim7H11NF59UqpGNFT + xWhovNQsq3K2a89lNVbKskG3M35jHhlaNh8nC8N3/sw+QFVN2dV+Qbi4M2LuloD+4PlVPUz25cfo + p9eABJ6F5eFwR09hQyzUpYn39aOjN9ZRm8D5OZbEaJlVTe1I/J8/ZJ4aWGiWL5qtjpYYsa8fjz5n + aaKQR/X+D99xeFsaejZejuW7IKJpGaQu+vppslnWdTROj1uAgnvGaXWT3G48megF0gs8ZkdZySc3 + XzVw3W59huGO0LhUT7kizUuLcmdXeePjWDVwu8Y9u4dRxD+PRRrAPEKIZT0tqu6dWD7Yh1DCfNOc + jCYfg1oNjIfMTGdWok8aSSGYTykivlyybLTZZCnivW8ZPl2zbNTd94hkqrdsFxi3bHwMUQ7Sa8DM + pNfum38sYrhkvsuCrFhHM3pVMRxRWxFstoeM37a5q8T1xSC7pU89thy0ECF/MrC4vXTVMPZm/cc/ + b9z70mBHY2vCULYhw2maeuPgtzU6d/ROtsOoZ3NfOhh1dhmx7aGiiGWvfAadnUxislXG6SM4JuAv + jR358edk1pMC/vkYE2ckeTc+hixH0bb9+dclGrXjiNXAoZy5/efZcTJmDVzuMSHeNlLR5LCTAMLa + fzJPPy686WU2JaSBIzOD2cesD7WHryqTazFnb2cdY5fMhMLevPASXWrOg9dO//E1XW/1XTW9Rc2F + VZWeyEYqK4NrxiNVv9+T2bKqV8t75wZwPz8QfUwbiU8+TXO4HnyTed35ns2QGNoff+ofBfxvffr1 + w8SbV17GPWOyIfeWJ2Lr2qXi2Tt8oXbvDsQJu9bjemifUVQlN7br48KgX/5Tzvfnm+mw3xkSU5se + kiX3aZMcw2gePgFF7BY8md2/mm7oiraH9RYTtrujFZoyIwa0VjyVmblXRn0ptgLSLT0mrnwPs7ka + gytsanwl+x8fSWrxUm8ntSVWwvVq9fWb8k3bS8xe50bUU2SdQTxmHZ6K/uZNxVofQexez5/+ifoY + +ynqTOfAtsqLeVPWFwr6+hNifhZ7Po7zdIZccmxSdNLBaLP4DsjbGRPBsk2zbmZODfrnsiN+erp5 + 3/cX0HYvbsk9utTZhP1cQyf9syOb+8FH43utJuv11ieYhSfDWxq7g6VOGwC69m91xu/PQwBPWU2Y + tRaP3ie2sxTw8nkm/uo18V64nHWonVZn33wj4ununqPXQXDYdrkvMl4duAiO+37SxVzsMp7uTlfE + Pm5B7HQOslHV+jOwjUfpZOsmHz/2GaMv3qlssx7xYNXZsJ/OA9m1Mc+aH57P9/ebLneyjJjKLgnU + 24OG10JyiIbNYK3hsace/chO2s2vY1eihavfvvlOmdEIpSJ4kf0hhqosvni1XXguUER++KNgvtYo + toQ9baM06Jhxjk2k3j2fedHYZKPSSksoGM9ooz0/Vfm+Iu3np5jVQ5qNOkf9z78R3eF9xZ87htGv + Hm1SkfIezfsEHiDIzFxLu0w8V7OClm4iEP8apd04B6IA3+/D9iR5Z81PnxNcu1RI09QYH/2pAU9R + cjyqT8VgTj2NcNOIhIdLx7ve67kA3zyCOOf3uxsSAffout35GK1iJ5rXWlxAPhkGXjrXpusgCDV0 + 2FoXKm1YlTFXlxKEi/UZM5BLPry/keUP//o3z5kWU1ugjYyudHHfM8RNawWg32aH7fbOgH71DPJb + Q5i+TWg1hMraRgJuV2w3JJuvP1oLIAT5iD8289HUrcMSZGksiNYmEx/sjxkodrZTvn7DiuYdRzXo + i3pD9Hnpc/apnz5c7gkhbnJm2RQeah8xSc/+5FOzjQwL/uz/uG47iq2nBt/9QdfJ5VH96jU6YJQz + 77bTu+VK9kHRlMv0yw/RnBLNAlSXMvFyp0L9PYccvs+nM2EOQvuPb0NhBxXZnu69Nyx0fY0OySem + q2614dImSRX0psoH52nuZKO1xiYs+bpnm3WxQeJUXkzg+ugyTf042XRKlBSOvrv+6tHZ+NVTmEch + JBrpezT1i1aD9C1TYhRMMYY7W10B7XUNw/Xk8DkKsQIEn/ZUmU2pmx09bdBSfvcYYth7Xz0Z//Ie + LEaz9W8//9X/1N1ujhHvr4GvXr3AIjfhIKGzs9JeylA23/qUOxGvL14MeL5FlN8mFFHyrGMQ98qM + +TXxMv5cYRG9a/GN1x4s+dhfiQnu4oDodJv3FaebKkCLR5VQvqvjqOm3mfXbz1T56s3p4j9HaNiC + MFyvajSd880a/erBKa7bqr/n4lX5+W3SFAof7Na2ALD1IpZRdN4wP1YCnNXyRLaaL6KhX5S62h/3 + K6aLFBlTtXRywIb/JEbBP9E7tbMADsFwItagOd0YOLOufPM5Kn79+bTtti/l6CHEDLyYvF++DC1k + GU70jcfp69gVyD1IFjOSdECsF20N1YEssF0txd5k7F0TlOcG//LqaHp2oggbajwISceJz1HVaco+ + uqnffP0ezQu4iOBc25Rsd7isppvif9cvQnRh3NJsDGbdhvd4exJ7Lp9cerzLXB1bYjEDaaMxppdX + iFy5Rn/8sZgMBlW/foicnzuoeH3elVCplUusK9O8poAVwCGAmAo6jfjqm2fLjpCHbDNtt97orOwX + 5NEp+KMnx3YJmvytb/QlK89uBpOu4T32JXGjdOwmBrWCNm5DMVrzBn3zFwGG5hSyrW+San4NWYzk + g8yJZh6CaP4gJUGqEYW/+p799APC+aqks8EXiB/jB1a//Ej0eVa77/yCP/npthNr3gSz6/74hVbD + sM3GZWK4MLGG/um3DLcqsFRiN3uWf/UV3ytFqH7zIlq/N8yYv3kgWphWQcxZdrh4QS1Fcfj0mM3r + nTcw6BVwG01j200XG/1jiK7Qxp8X0faen/WDqFC4rD8a2We4NHpap2eQCD4SM/f0TFqSJFE+K3+k + 4oJv+SSOuYCmjQDM+fIdpbtriPYbWWekKVI0G3Q4K835esISJ49qZN1HUf7kU0UqdkyXr1QxQduz + 0JL2nN/yzaj+9MC2OlmIbl9Bqpb2UsSz0J6zofbVAjV+fSVHLTANfj0u81/egR/RbZNJP783at2F + SlD23ayKVoFwO9y+fif0uteLFkj5nAMqxDmgofmgQsklz/7lnd702VRnOLVnYKZ4drLR+DATts9E + wHP90PjXn/hoPGcYT6JhoFExPgXMUXgnREqXaLbL+xXmepWy7XqziPhl7NI//Qv9/HkZc9wqJUyf + u0gnTzhmrGreS9S9sEUFfa4MfuyOPrzrd4tlEy8qHt1mXT1kWsIueWXyadVsBGS0fUuc7Pkyhv1N + cUE8Ng3D+mwYqxttLegH/4OX96Co/uRJrrbB7E/9DRfPBM671fzL+6vPdz3hXT9btjktR4PTZlHD + MvFlcrcy2Rvnph5hcfxkRFdJk3H64emfftVPz4vJMVOgiN2colWQfvPeWoOH0TrEgnUdzfgetGqj + lykxX+qC0/3idoXLw7WJLzlFx/eHawLeJp7o5anM2ZtN+hoUhG2qVl1htGIo2EqQRZR2j2LK/vDN + t/9Kh68eZYN8WsNnJ+jErno/G0IZYvTt9+GFEM98JJe8AFtgLeVJOvD25y/mp3X6o/f/5NtfvqXr + XWUbvXnBI8hUa//wQ/PtP4L6Qjp9bw6RN6ZpaaHVkF/Y9ix03RyoVxcc4RqSg1tto5meoIZ4DARy + DhQ9EwNb8OFCtiHbfOvFJBVzDuna7YiT36VuyLW7iG61VeJlwvVOfLmHEmjVr5nfjltvONHWB6cp + j0SfxmvWf/MJdaO4l3/rXxTXhfKZAk42fsarsfTuGnrs7xjL7SnIpmt5DcG92lucuKt1N1KEz8it + WoERKbc86bPpzmg17K9fvvhwqjlhCurD16m8OvRR8z58dKCu5xJ3soyKf+oBw+tz31B+k9xKggx0 + NBfsSKE6NtncB08L0iThdA4/l2hkkWL96Y/99NhXX5XyWryqbK+fr0a/rzob7dxNjOd5vlXT2xUx + /PpL9Syl1SzptyWKN1rAAn0OOO+M+gV//04F/Pc//vrrP38nDF7NLX9+DwYM+TT883+OCvwzvaX/ + FMXlP9nyz0kE2qdF/ve//n0I4e9P17w+w/8dmjp/93//6y+QVOnPgYO/h2ZIn//7zj++//jf//h/ + AAAA//8DADtB0HjnIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c09bf85ef231c7e-AMS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 17:29:42 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-kd1j0bcill5flig1m29wdaof + openai-processing-ms: + - '406' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 3c27a4a0263197ea84526b8aa83799d6 + status: + code: 200 + message: OK +- request: + body: '{"input": [[359, 74901, 1146, 437, 11949, 3933, 74901, 728, 86, 511, 73, + 5241, 65116, 4991, 375, 10333, 20191, 89, 383, 22650, 1551, 327, 1810, 1804, + 80, 347, 11089, 3013, 752, 1100, 73976, 303, 32547, 73, 3696, 3906, 77, 969, + 455, 62615, 6581, 66, 501, 67, 2649, 12825, 8536, 89, 2067, 85, 80, 70, 74, + 755, 74, 352, 73, 4086, 3178, 87, 80, 35401, 376, 15618, 655, 13423, 33863, + 3933, 911, 77, 32204, 2689, 454, 29200, 2176, 1739, 65, 8998, 38844, 4223, 953, + 14088, 21486, 1316, 12328, 1725, 21486, 24752, 86, 1394, 5308, 321, 31019, 89, + 80, 73, 5981, 88, 69, 2249, 19072, 23576, 9110, 47154, 876, 73, 2319, 1029, + 6292, 69057, 2857, 20191, 2329, 80, 6455, 46640, 351, 89, 5289, 876, 77584, + 73, 2026, 84, 361, 23361, 87, 85, 4680, 85, 80, 13009, 86, 1725, 73, 641, 80, + 45015, 85, 447, 1072, 86, 22374, 14029, 83, 28679, 73, 70, 5298, 7369, 79, 544, + 12591, 327, 9701, 80, 2465, 84, 455, 11949, 7881, 74, 4912, 44508, 80, 5298, + 17553, 31268, 85, 604, 77, 11032, 6331, 9490, 45444, 450, 6236, 1919, 67, 20990, + 34175, 1804, 73, 41101, 80, 277, 73, 89, 664, 723, 49755, 80, 2191, 28531, 8587, + 15555, 79, 675, 42818, 375, 3601, 82, 1334, 76, 89, 511, 87, 73, 318, 4080, + 85, 1146, 80, 983, 796, 12825, 67, 4086, 78, 336, 1897, 80, 70, 11032, 7501, + 85, 12825, 651, 79, 16017, 80, 268, 69, 4021, 15817, 3757, 16400, 652, 28679, + 5964, 73, 17063, 80, 9907, 67039, 848, 85, 5308, 76, 657, 65, 1216, 88, 40924, + 11949, 76, 377, 79, 1091, 258, 343, 4080, 13752, 70, 27515, 265, 1273, 85, 89, + 44970, 71, 708, 582, 637, 1395, 1662, 83, 27457, 5308, 8942, 1498, 360, 89, + 10002, 67, 4317, 269, 587, 86, 13616, 31268, 73873, 299, 80, 73, 14088, 455, + 8480, 1609, 372, 53060, 48380, 17008, 48380, 35401, 40078, 45854, 652, 80, 21796, + 1551, 89, 9832, 87, 74, 1029, 307, 61057, 85, 12498, 648, 541, 267, 89, 70, + 24752, 80, 18577, 43210, 74, 370, 80, 830, 4341, 87, 8890, 4890, 31218, 641, + 82, 6331, 85, 80, 14170, 71, 325, 48186, 16400, 68054, 346, 9991, 78930, 939, + 1310, 4239, 93000, 73, 2067, 86, 11032, 43210, 8311, 80, 38768, 86, 1494, 2312, + 92216, 41670, 13028, 48802, 89, 7369, 69, 2005, 73, 7783, 953, 85, 49450, 73, + 1029, 402, 84274, 1026, 76, 89, 85, 664, 8088, 75163, 376, 20127, 80, 10470, + 406, 13183, 10616, 36722, 511, 1394, 87, 60933, 1031, 76, 1540, 5308, 66, 939, + 634, 4991, 29682, 351, 332, 4086, 19639, 15618, 16017, 49450, 73, 87, 20191, + 10176, 80, 32204, 32589, 1116, 355, 36238, 10470, 74, 2312, 76, 60013, 375, + 71, 55506, 87, 8671, 89, 11089, 8910, 74, 9326, 3933, 23048, 86, 441, 718, 7047, + 75, 398, 455, 73, 85, 20211, 343, 1565, 2042, 14088, 69516, 1517, 82, 5697, + 80, 2554, 88, 2005, 65736, 17043, 677, 19747, 28927, 69, 332, 70, 17207, 80, + 74, 1729, 2249, 82, 911, 71, 983, 664, 80, 301, 450, 83, 2465, 3423, 307, 84, + 4317, 80, 88, 275, 3172, 1800, 69, 352, 762, 36255, 953, 3696, 3919, 10470, + 64087, 46640, 73, 2329, 81, 8050, 13009, 20191, 71, 73, 365, 23797, 14088, 78604, + 3055, 8783, 85, 12953, 53060, 398, 79, 391, 22619, 540, 6043, 73, 85, 664, 77, + 2235, 86, 344, 79, 708, 89, 406, 73, 360, 2933, 41287, 9092, 88, 2026, 344, + 89, 56099, 2201, 1073, 359, 65, 35401, 86, 1897, 78989, 2130, 89, 43312, 67702, + 3906, 86611, 89, 894, 17692, 85, 30407, 86, 18854, 74, 710, 65, 93000, 13536, + 588, 764, 69722, 11949, 22650, 86, 501, 24244, 637, 418, 5298, 89, 450, 88593, + 919, 10036, 4341, 8480, 4341, 9907, 418, 87, 86, 80, 16276, 5104, 18854, 11328, + 14878, 80, 269, 5298, 84, 80, 68412, 89, 4777, 70114, 575, 67, 4355, 351, 32868, + 1146, 4341, 1331, 70, 5298, 77, 1498, 87, 2302, 18577, 998, 87, 664, 85, 1347, + 1729, 9700, 708, 11949, 73, 2302, 74, 89, 73, 906, 1220, 21620, 80, 69, 40078, + 5972, 372, 16916, 1215, 93797, 22016, 78772, 5289, 65, 7369, 12328, 9092, 67, + 2302, 70, 7387, 80, 81706, 444, 647, 3418, 80, 73, 1073, 28679, 77, 4223, 894, + 2701, 70, 6511, 10200, 4511, 77, 333, 27455, 509, 3368, 74, 17008, 93000, 18571, + 19747, 87, 85, 5972, 84, 70014, 24752, 876, 28927, 18287, 318, 27515, 14029, + 80, 30407, 1394, 82, 441, 266, 76, 45854, 1498, 582, 77, 24752, 74, 17134, 66, + 5981, 67, 1100, 2562, 325, 86, 295, 55655, 89, 1331, 85, 1302, 80, 1216, 32868, + 324, 35940, 1331, 9907, 2562, 9081, 4336, 83256, 46341, 303, 85, 2332, 70, 55418, + 1030, 370, 73, 3368, 5241, 263, 29238, 8998, 23866, 2641, 80, 764, 5289, 15306, + 1662, 28679, 77827, 8303, 20824, 35401, 23361, 9907, 85, 354, 86, 48738, 6993, + 80, 1026, 1146, 1910, 12498, 80, 8212, 12825, 65, 391, 22119, 87, 22873, 8088, + 24244, 915, 81, 11927, 6511, 307, 509, 52926, 1546, 4239, 47689, 27093, 28927, + 24037, 1546, 81, 4223, 1320, 5673, 283, 69, 418, 87, 71, 455, 85, 4080, 80, + 16618, 302, 911, 75, 2203, 48380, 80, 998, 67466, 14029, 1302, 72, 318, 51788, + 84, 1813, 89, 31764, 32204, 1302, 3601, 44970, 80, 22119, 71, 29684, 4942, 1094, + 318, 31722, 15008, 427, 79, 3178, 25506, 84, 355, 9092, 81, 4991, 55427, 73, + 7005, 7141, 49595, 3427, 87, 332, 86, 80, 831, 71, 9701, 76, 44183, 295, 277, + 363, 64087, 327, 53016, 85, 392, 24672, 8892, 3223, 83, 361, 89573, 69, 1029, + 82, 12940, 352, 6200, 73, 42848, 5469, 31695, 275, 64003, 581, 2739, 710, 360, + 40078, 581, 21659, 29238, 29700, 74, 77827, 12591, 79, 309, 587, 728, 9326, + 8934, 53835, 325, 3059, 450, 83, 80, 8783, 402, 79, 10616, 363, 80, 11949, 1631, + 352, 383, 21206, 1546, 35531, 74, 42818, 1029, 43505, 1791, 6511, 70678, 4922, + 564, 56749, 89, 11328, 404, 3306, 76, 89, 657, 462, 447, 79, 447, 87, 86611, + 8088, 71, 10036, 675, 27093, 84, 49450, 6331, 80, 69, 89, 70, 10616, 352, 354, + 324, 89, 18577, 1302, 337, 2303, 36722, 1517, 15930, 69, 8510, 18854, 67, 50798, + 1804, 77, 24752, 18571, 15720, 675, 1546, 71, 4912, 346, 28990, 24752, 544, + 36238, 87, 2034, 6583, 276, 69, 73, 88, 303, 69, 5701, 2642, 25947, 377, 511, + 77, 9117, 89, 3306, 65, 60933, 73, 31268, 39342, 511, 5701, 84, 360, 29229, + 72, 324, 12825, 4937, 65, 2580, 16618, 87, 71, 25013, 2642, 398, 19072, 50798, + 10056, 86, 85, 10056, 6292, 72, 540, 83, 3906, 266, 13009, 634, 3172, 31881, + 347, 8480, 46224, 4484, 8783, 2130, 74, 911, 9988, 67, 1331, 1910, 22016, 29238, + 4239, 47689, 38844, 22650, 89, 544, 22619, 71, 4680, 44067, 822, 88, 85, 2438, + 86, 3647, 4680, 39911, 11328, 27093, 69, 318, 318, 1994, 7141, 876, 7513, 21659, + 2319, 2076, 11328, 2298, 4223, 784, 85, 89, 80, 11823, 418, 1065, 300, 55233, + 441, 80, 74, 87, 24752, 44970, 67039, 89, 81, 73, 267, 23913, 1007, 9700, 65, + 27093, 28927, 77827, 3576, 39172, 1146, 953, 75721, 86, 455, 87, 86, 18854, + 80, 8318, 540, 5418, 327, 321, 21796, 24752, 1885, 7737, 324, 66, 566, 84, 1739, + 1739, 22780, 16276, 84, 80, 285, 894, 86, 830, 42848, 418, 23913, 28073, 20811, + 86, 344, 3647, 86, 406, 2332, 70, 8910, 1551, 2308, 339, 14088, 69, 7488, 72, + 12583, 316, 15618, 65, 20926, 11032, 71, 12412, 50536, 44067, 13855, 728, 278, + 876, 581, 23913, 2727, 44970, 447, 8480, 540, 7047, 5132, 4341, 333, 27093, + 1320, 72801, 31721, 78930, 462, 309, 23576, 19639, 59270, 32962, 437, 648, 1247, + 1919, 9902, 1568, 4484, 983, 24752, 87, 418, 540, 89, 80, 34175, 14878, 12940, + 2201, 4748, 375, 65, 1919, 65023, 564, 43518, 1100, 89, 85, 10776, 8839, 72540, + 12498, 4215, 713, 36726, 73, 87, 10616, 77, 10616, 45015, 359, 65, 5298, 343, + 92216, 87, 85, 344, 86, 22650, 953, 23913, 70, 31764, 64305, 75, 876, 70, 31695, + 318, 876, 85, 28679, 31268, 52971, 78498, 12940, 73873, 6194, 77, 3601, 84, + 2249, 75, 1551, 88, 38977, 3696, 936, 80, 70, 20975, 5010, 9081, 80, 764, 12184, + 333, 637, 18854, 87, 7521, 3919, 634, 78, 1540, 89, 1662, 83, 39911, 8088, 4317, + 329, 70, 3527, 40924, 74, 894, 1065, 18577, 74, 2641, 80, 336, 83, 1662, 59524, + 5104, 1517, 70, 2554, 4168, 85, 39911, 73, 796, 37406, 89, 11099, 31172, 80, + 17134, 84, 2005, 71, 28774, 2034, 15809, 1216, 69, 26397, 1100, 588, 301, 4777, + 831, 2985, 1029, 94908, 9081, 1609, 8050, 73873, 20926, 275, 9832, 64519, 86611, + 2933, 73, 1394, 7387, 818, 70678, 89, 1739, 21486, 73, 540, 86, 2067, 3306, + 16276, 7369, 43813, 4341, 641, 588, 35531, 74, 6358, 84, 427, 40078, 94346, + 8207, 10169, 89, 31695, 13223, 333, 355, 9110, 81656, 399, 79, 7907, 4021, 33721, + 8942, 52971, 1394, 80, 325, 85, 796, 14736, 637, 6098, 40101, 74, 383, 441, + 80, 17008, 437, 85, 22619, 80, 398, 87, 80, 73, 88, 5544, 8892, 24244, 652, + 89, 23048, 80, 6200, 65, 3841, 43189, 1029, 9991, 303, 1026, 81, 302, 69, 418, + 87, 566, 784, 9902, 370, 76, 32589, 1302, 48380, 501, 2192, 80, 60013, 2034, + 1065, 41922, 3251, 9490, 1320, 69, 451, 321, 1336, 361, 29700, 16400, 77, 8671, + 5544, 657, 81, 73, 911, 2312, 454, 1347, 13855, 2562, 80, 1310, 68054, 2642, + 4942, 8207, 86, 331, 67702, 6034, 92216, 14170, 58781, 1609, 19239, 74, 50798, + 377, 1310, 89, 564, 8050, 1919, 2857, 6043, 39911, 77, 9117, 1347, 76, 751, + 41129, 89, 8212, 68054, 35531, 70678, 2857, 80, 9902, 67800, 89, 406, 17851, + 89, 3013, 72, 360, 24752, 9902, 4341, 88, 830, 4385, 72, 1094, 1108, 437, 7217, + 70, 3933, 88, 80, 2580, 9509, 73, 29700, 641, 894, 5964, 89, 85, 894, 540, 8783, + 9490, 67039, 363, 81, 6511, 83, 80, 20811, 392, 3315, 2303, 2642, 70, 3306, + 2076, 76, 89, 30142, 38041, 4937, 11253, 73, 4086, 581, 8376, 982, 402, 2319, + 20306, 23457, 89, 44490, 675, 383, 22619, 80, 70, 80, 69, 3415, 68, 8635, 31172, + 86, 11032, 447, 67, 4835, 65, 26397, 1962, 65, 5469, 343, 1029, 66, 1677, 87, + 318, 86, 85, 1072, 39152, 1302, 1336, 80, 12591, 321, 7521, 42488, 87, 1546, + 17082, 17851, 418, 5972, 23361, 69, 38253, 66, 60013, 680, 73, 34485, 989, 18244, + 87, 398, 5298, 327, 1072, 3145, 355, 3178, 1739, 532, 20926, 81964, 22619, 32478, + 73, 30275, 392, 10616, 87, 73, 86, 89, 21206, 80, 943, 80, 49938, 3576, 8376, + 7717, 10784, 1105, 5481, 1247, 494, 86746, 9996, 55767, 17704, 75406, 28990, + 77, 62604, 5481, 13028, 710, 1273, 53835, 77, 39911, 6375, 15618, 919, 307, + 773, 85, 24752, 87, 5104, 454, 89, 35531, 8934, 13855, 451, 71, 2642, 347, 3178, + 261, 87, 371, 2727, 402, 67020, 8462, 1100, 85, 360, 376, 40924, 84, 20127, + 28679, 26047, 39152, 23866, 10616, 44183, 28927, 32589, 5104, 69, 1146, 45015, + 351, 13183, 708, 1247, 85, 1310, 98714, 2191, 69, 12407, 359, 544, 80, 74, 6870, + 84, 89835, 70, 8890, 352, 74901, 4385, 80, 3306, 86, 347, 86, 18244, 98493, + 3601, 316, 89, 501, 22650, 1273, 76, 24990, 511, 911, 77, 5544, 406, 32204, + 10616, 70, 376, 4744, 85, 3576, 84, 392, 71, 3178, 81, 20127, 3893, 80, 70, + 344, 87020, 303, 65, 66362, 41933, 45015, 71, 9110, 86, 383, 764, 75406, 2496, + 23913, 283, 80, 50139, 80, 56414, 581, 1302, 24990, 80, 7521, 77, 8480, 391, + 3315, 23866, 24752, 1347, 67665, 73, 18442, 85, 501, 74, 2641, 48836, 19260, + 588, 1662, 77827, 9451, 86, 540, 2319, 31881, 73, 6634, 564, 7513, 511, 1073, + 276, 79, 2933, 80, 89, 4171, 16276, 73, 3841, 89, 4937, 80, 7521, 566, 9794, + 6194, 80, 73, 10776, 14694, 84, 710, 10470, 67, 1302, 360, 73, 27093, 9490, + 1146, 36068, 9490, 76, 8671, 70, 60083, 333, 3055, 10333, 797, 12883, 73, 89, + 3013, 87, 21557, 80, 18577, 70, 1609, 14088, 75, 1791, 283, 73, 519, 11032, + 1336, 723, 51998, 20127, 53289, 88, 402, 677, 418, 588, 85, 325, 12498, 96518, + 4239, 6358, 4680, 87, 6133, 73, 86, 8376, 774, 303, 1546, 21807, 8637, 4680, + 450, 447, 20127, 86, 47154, 11089, 74, 11949, 5979, 316, 73, 4086, 28774, 70, + 21796, 80, 77152, 3178, 9902, 7369, 83, 73, 2562, 26738, 86, 11160, 455, 93797, + 8839, 1923, 85, 4381, 80, 5964, 347, 869, 85, 20926, 85, 1394, 343, 86, 29700, + 6194, 21206, 267, 1031, 14736, 67, 906, 80, 49755, 20127, 604, 1347, 277, 24752, + 74, 4239, 454, 80, 1498, 73, 9112, 35401, 1302, 73873, 8910, 2026, 4086, 87, + 86, 4341, 36238, 73, 89, 337, 266, 894, 20975, 3013, 89, 80, 61057, 46640, 318, + 1546, 32962, 822, 261, 952, 283, 88, 70828, 85, 7047, 93797, 5531, 1146, 1656, + 89, 70, 89835, 2034, 641, 4341, 3423, 85, 1031, 10616, 70, 713, 66, 4744, 94356, + 28400, 85, 6292, 80, 295, 7737, 81, 45015, 1073, 86, 455, 2319, 1609, 65, 2329, + 552, 7513, 1791, 69, 73, 1216, 13423, 87, 70, 19456, 1336, 352, 13009, 89, 5010, + 78930, 86, 13584, 604, 41196, 47154, 21659, 82969, 5893, 21796, 89, 80, 1494, + 31019, 87, 19639, 87, 73, 16499, 73, 10470, 894, 2580, 20211, 454, 9117, 324, + 80, 73, 2005, 337, 5308, 258, 76, 89, 3933, 8207, 80, 2445, 77, 80, 8318, 352, + 80, 652, 11328, 35531, 80, 7369, 8671, 6194, 89, 275, 4168, 69, 89, 398, 343, + 75, 4341, 752, 301, 94055, 10462, 46640, 267, 51622, 360, 96518, 22119, 73, + 86, 677, 2329, 12940, 86, 12806, 7783, 49450, 18287, 74, 1631, 352, 86, 6194, + 3340, 75, 10616, 302, 73, 376, 31172, 9832, 2642, 22016, 5289, 31172, 56266, + 14170, 81, 1919, 87, 1026, 80, 2642, 267, 1494, 447, 85, 1813, 3601, 80, 8910, + 76, 1072, 64, 37135, 391, 10776, 1073, 21301, 45228, 85, 16400, 74, 939, 70, + 1100, 96518, 299, 80, 441, 5481, 94908, 18287, 939, 78, 15769, 79, 5308, 14878, + 86, 8318, 23913, 398, 327, 5641, 501, 318, 28679, 17851, 2034, 5531, 564, 40078, + 49755, 28679, 71, 1551, 87, 35478, 9902, 302, 21206, 5010, 75, 8658, 20824, + 76, 8671, 332, 15720, 418, 7047, 79, 1273, 3972, 9700, 67702, 983, 7387, 86, + 89, 81, 73, 36722, 73, 47689, 70, 713, 276, 81, 2465, 20990, 45015, 831, 2303, + 764, 267, 511, 31695, 21888, 316, 11949, 494, 87, 25105, 82969, 4223, 352, 65, + 2880, 9902, 87, 20824, 5308, 796, 96518, 10333, 71, 398, 86, 4355, 3524, 50798, + 23913, 89, 699, 2329, 47342, 10196, 266, 67, 402, 40078, 2249, 34946, 17043, + 80, 372, 66, 634, 7501, 41796, 87, 74, 64003, 53835, 70, 784, 49993, 3415, 327, + 77, 10036, 441, 67665, 81, 273, 501, 86, 13536, 86, 4168, 14170, 45015, 24244, + 2319, 21659, 1310, 88, 80, 4239, 70, 1331, 291, 89, 67039, 73, 51924, 14878, + 2784, 42528, 7141, 76, 16017, 71, 12328, 81, 21486, 79, 1031, 392, 4484, 588, + 58305, 901, 365, 329, 2249, 657, 1804, 447, 45015, 23866, 1729, 67, 4991, 1498, + 13536, 3978, 2005, 1336, 339, 85, 43210, 98714, 343, 92791, 74, 677, 76, 15618, + 876, 8462, 911, 14029, 9907, 70, 346, 3524, 89, 59064, 822, 40078, 2392, 25457, + 402, 9700, 81, 2554, 14966, 86, 16400, 89, 11328, 69, 4171, 79, 22537, 8480, + 18571, 44508, 5192, 710, 12812, 830, 501, 17704, 86, 12498, 2365, 19747, 1031, + 77, 38844, 80, 4080, 88, 8783, 2319, 1310, 70, 73, 57071, 1551, 94375, 89, 28990, + 89, 4175, 9081, 1056, 23866, 10784, 5192, 87, 4922, 64, 1658, 27093, 2034, 52877, + 27515, 4086, 81, 40617, 83079, 45015, 70, 80, 19239, 8088, 4942, 2933, 9832, + 359, 76, 19383, 9832, 86, 5192, 817, 73640, 45015, 38977, 324, 6713, 71, 1302, + 677, 35989, 9832, 266, 92110, 14736, 14029, 8892, 73, 87, 71, 94908, 406, 49463, + 33721, 300, 70, 52097, 302, 12841, 2249, 8088, 85, 10470, 40078, 71, 2642, 74, + 67039, 13997, 648, 39911, 19555, 26738, 84, 580, 18854, 80, 82999, 333, 93000, + 2661, 540, 85, 6151, 677, 5746, 19456, 73, 85, 4193, 14606, 75, 303, 94468, + 85, 2676, 544, 81, 4922, 4239, 79, 450, 1029, 14694, 1273, 1494, 1072, 11823, + 91311, 80, 4942, 675, 675, 10462, 29229, 18854, 65019, 85, 78498, 509, 383, + 336, 41796, 20990, 373, 12825, 80, 1273, 76, 581, 267, 80, 47689, 5254, 87, + 14485, 53060, 9700, 352, 94817, 1658, 710, 96518, 764, 80, 82999, 88, 321, 1065, + 1072, 14029, 38844, 7521, 69, 50798, 16373, 73, 70, 9996, 77, 3368, 307, 5972, + 351, 1003, 88, 936, 351, 372, 391, 34175, 303, 1273, 398, 75, 447, 21963, 63643, + 5683, 4355, 1662, 23576, 93000, 34175, 89, 8510, 332, 74, 8462, 77, 351, 87, + 1609, 87, 86, 1725, 65, 511, 8207, 7387, 509, 73, 31695, 85, 1031, 80, 93797, + 89, 9832, 73, 26929, 13028, 23866, 25658, 87, 72231, 1073, 75, 13045, 579, 5308, + 71, 73873, 22119, 77, 752, 78, 762, 68054, 365, 683, 27780, 31019, 1336, 67, + 2332, 2739, 86, 859, 14878, 2076, 710, 9701, 5981, 2465, 87, 365, 9258, 277, + 58152, 321, 404, 51622, 300, 14029, 40078, 5289, 1302, 69, 73, 7141, 41796, + 1468, 7141, 677, 87, 2192, 318, 94023, 66, 664, 80, 604, 18854, 10630, 3716, + 455, 540, 36851, 88, 1073, 65, 4239, 15809, 26804, 764, 1394, 4991, 80, 88, + 73, 7864, 385, 82999, 268, 18287, 60933, 86, 1138, 67, 3746, 21297, 74, 456, + 4835, 820, 19555, 80, 2203, 22016, 6733, 62355, 76, 1216, 5972, 11305, 675, + 41796, 4239, 911, 332, 9832, 88, 347, 10497, 71, 4171, 1897, 2067, 49755, 20127, + 33863, 17528, 1286, 21659, 3423, 9117, 1073, 14878, 81, 1320, 64, 1540, 1030, + 327, 7513, 7387, 69, 89, 4378, 29684, 4937, 23361, 6091, 85, 3315, 31935, 6292, + 1813, 19456, 79, 9117, 796, 26929, 86, 9081, 4341, 68, 1302, 2302, 6194, 48380, + 86, 1568, 1919, 359, 337, 19747, 6200, 375, 65, 15618, 86, 564, 69, 1026, 73, + 87, 8674, 773, 19639, 86, 455, 2739, 1273, 67, 402, 2439, 90625, 2438, 66565, + 363, 579, 318, 47154, 2329, 372, 12825, 76, 1751, 76, 906, 34175, 69, 95233, + 1216, 76, 2302, 1336, 72, 592, 9700, 73873, 441, 68991, 13536, 66565, 4575, + 406, 89, 588, 80, 1073, 77, 3919, 93000, 5104, 1609, 3906, 87, 14694, 817, 33686, + 7513, 325, 25105, 8311, 87, 25457, 5104, 73, 89, 53060, 894, 70, 16276, 604, + 21659, 710, 2445, 69, 41460, 7910, 5104, 28679, 266, 38041, 6375, 26929, 71, + 14029, 45015, 71, 50813, 773, 87, 321, 85, 89, 275, 54964, 73, 38361, 344, 6043, + 4843, 939, 73, 16400, 1320, 23960, 73, 52926, 19654, 3251, 73, 9117, 25105, + 80, 76166, 637, 76, 38359, 398, 21659, 55282, 81, 18565, 1631, 73, 354, 1302, + 14606, 876, 44508, 9112, 74, 80, 73, 87, 80, 392, 31695, 291, 2067, 66, 8462, + 17134, 84, 5308, 1031, 79, 540, 316, 831, 80, 38361, 29238, 3145, 73873, 31695, + 581, 8747, 316, 339, 89, 784, 1100, 74, 75406, 1662, 81, 377, 81, 2174, 69, + 647, 7453, 73, 85, 27455, 80, 385, 89, 398, 1142, 75, 2554, 3418, 98667, 53289, + 73, 22619, 74, 8311, 1273, 54681, 1302, 2054, 10743, 74, 87, 70, 8998, 1394, + 28679, 57208, 20990, 983, 69, 3315, 53478, 266, 66, 86427, 80, 332, 77, 24752, + 65, 32345, 4151, 332, 87, 3306, 4681, 50798, 7501, 80, 13423, 28990, 74, 637, + 73873, 365, 86, 2249, 89, 85, 412, 86, 1072, 84, 3524, 27455, 4511, 10910, 21796, + 80, 33721, 588, 40429, 3576, 309, 74, 5289, 48872, 1494, 78498, 55506, 87, 35531, + 5972, 46341, 12825, 21620, 6511, 80, 38306, 17378, 354, 80, 69, 73, 85, 8587, + 80, 406, 747, 337, 51924, 85, 3423, 7047, 89, 27515, 1026, 85, 1029, 83, 641, + 77, 5544, 45015, 437, 13652, 764, 447, 351, 1103, 6733, 540, 80, 377, 544, 540, + 74, 70391, 10776, 87, 5308, 939, 87, 339, 3178, 1791, 1551, 80574, 1394, 75, + 12407, 60435, 80, 677, 81, 15720, 50139, 3919, 78, 581, 80, 89, 73, 581, 19072, + 80, 74, 80, 73, 32868, 69, 652, 376, 582, 354, 73, 361, 785, 21963, 87, 321, + 713, 52926, 73, 18854, 83079, 1919, 501, 3251, 92216, 450, 4942, 74, 544, 4285, + 1055, 392, 27515, 1546, 82, 2302, 354, 87, 80, 1910, 86, 10776, 87, 5230, 53835, + 71, 31695, 337, 4912, 1517, 69858, 392, 3906, 71, 2329, 544, 620, 80, 47689, + 5241, 80, 73, 86, 9112, 72, 454, 12825, 77468, 18571, 1273, 66, 71690, 15809, + 344, 6331, 372, 28990, 2933, 88, 7783, 80, 25604, 86, 85, 8671, 1073, 9490, + 1729, 88, 354, 17704, 73, 38041, 1031, 2143, 49712, 73, 634, 76, 36463, 652, + 302, 83, 73, 87, 10616, 15618, 377, 88, 73, 17247, 283, 4484, 1029, 2005, 1910, + 2034, 31268, 668, 73, 48372, 5010, 376, 74, 14878, 81, 26397, 86, 675, 8783, + 2642, 70, 80, 343, 80, 4777, 295, 13584, 86, 5964, 7141, 71, 8458, 2303, 8212, + 94908, 5308, 71, 13423, 85, 89, 3306, 848, 1072, 344, 67665, 2329, 87, 450, + 2868, 1073, 74, 17156, 80, 4223, 89835, 89, 6634, 21046, 73, 14244, 6200, 89, + 14694, 263, 79, 1031, 10776, 65019, 1551, 406, 76, 6358, 8088, 318, 71, 1804, + 8883, 10910, 23001, 24012, 80, 73, 95398, 2034, 3906, 80, 12341, 14606, 87, + 391, 73, 2329, 485, 75406, 24990, 46640, 64087, 754, 822, 80, 70, 376, 269, + 87, 27515, 86, 5308, 50139, 1395, 402, 3013, 86, 1026, 16400, 67702, 1764, 4922, + 19644, 65, 4341, 8587, 89835, 511, 91485, 80, 69, 73, 19456, 73, 8462, 728, + 1094, 31268, 87, 71, 5981, 89, 80, 372, 9700, 26397, 73, 21620, 51622, 1026, + 4223, 87, 4175, 2332, 76, 17912, 69, 10176, 80, 38359, 2727, 34175, 80, 9991, + 6414, 19747, 333, 8255, 352, 418, 677, 60933, 346, 70391, 80, 9112, 351, 89, + 540, 267, 49463, 1551, 2562, 4341, 3893, 501, 73, 1658, 13045, 285, 69, 754, + 80, 5701, 71, 39911, 81, 73, 953, 83214, 8311, 73, 19555, 86, 9112, 392, 40078, + 3251, 52971, 87, 28679, 67, 2438, 80, 285, 7521, 50798, 383, 22119, 604, 80, + 7387, 339, 28927, 41817, 28990, 10516, 1658, 2302, 91650, 26618, 1729, 3013, + 641, 4991, 73, 2312, 12940, 24620, 36068, 51622, 73, 6634, 29224, 372, 88, 11949, + 295, 20211, 17134, 5298, 87, 437, 402, 1347, 939, 63643, 20926, 89, 7141, 80, + 1273, 36825, 8783, 73, 4912, 5972, 85, 33863, 869, 303, 2042, 74, 1072, 455, + 664, 1394, 19072, 8385, 86, 1631, 81, 35063, 73, 1609, 88, 10616, 67, 64087, + 8480, 292, 41196, 31019, 316, 70, 939, 45015, 309, 89, 18577, 2554, 73, 1897, + 41129, 333, 71, 3893, 18442, 24752, 29811, 65, 33863, 3696, 18571, 7129, 80, + 2303, 668, 911, 87, 10056, 95233, 10910, 1320, 18577, 16276, 24752, 5298, 86, + 89, 2042, 16731, 38359, 6870, 80, 20824, 1897, 80, 1347, 39911, 1517, 78498, + 796, 73, 21340, 86, 46224, 278, 1029, 82, 3251, 23866, 67702, 3251, 4659, 30506, + 1394, 398, 754, 80, 70, 1517, 82, 2933, 89, 43441, 5418, 67175, 1729, 28700, + 63848, 316, 273, 9117, 2739, 73, 1029, 72801, 3633, 73, 85, 8671, 316, 17156, + 12328, 12682, 4171, 42294, 5192, 73, 1216, 32204, 87, 70, 399, 21963, 89, 360, + 14736, 16400, 1813, 21206, 74, 427, 1494, 5104, 346, 444, 84, 5241, 23866, 2067, + 79, 10470, 11328, 74, 2465, 70, 73, 87, 2933, 88, 3696, 69, 9081, 17134, 1725, + 8212, 1031, 19866, 77, 3576, 7908, 31803, 773, 43686, 1498, 1658, 17156, 511, + 22619, 86, 7141, 2042, 77, 6194, 85, 6200, 1658, 5469, 2562, 84, 23361, 65, + 5104, 450, 67, 1725, 73, 11089, 67039, 343, 5469, 49755, 25658, 2465, 10470, + 73, 4838, 23866, 74, 66867, 911, 4341, 1658, 27106, 352, 41796, 91998, 16514, + 1031, 65, 8536, 80, 969, 869, 38181, 12116, 87, 73, 1919, 359, 85, 5308, 75, + 29700, 70, 80, 2026, 2302, 69, 10784, 89, 1725, 86, 4991, 79, 27515, 71, 7369, + 69, 418, 62585, 65, 376, 77, 10910, 266, 8385, 575, 29700, 1517, 566, 11748, + 85, 42818, 4484, 2580, 69, 1494, 8671, 65019, 9258, 316, 1791, 9081, 575, 87, + 1026, 81, 620, 589, 80, 332, 7387, 24672, 73, 38361, 89, 70, 876, 89, 4239, + 21888, 83448, 45775, 85, 455, 5241, 67, 796, 42908, 64123, 76, 1073, 79, 564, + 18894, 2067, 1725, 65, 80, 24244, 375, 45015, 1331, 953, 13009, 89, 32962, 7501, + 70678, 2329, 77, 39911, 73, 21807, 31881, 648, 57932, 70, 292, 77, 80, 2438, + 29238, 10616, 22524, 285, 26397, 74, 80, 1302, 911, 1195, 86, 89, 4239, 79, + 2319, 89, 2303, 37406, 27476, 73, 13536, 80, 2176, 1944, 441, 31141, 69, 1336, + 15802, 73822, 21888, 1962, 91299, 32204, 376, 53060, 447, 5298, 85, 634, 359, + 752, 7521, 3919, 87, 1910, 2445, 49450, 73, 27093, 7737, 2689, 8088, 22016, + 86, 336, 16094, 73, 728, 18244, 87, 10056, 18287, 1302, 69, 587, 81, 73, 13075, + 67665, 3427, 675, 80723, 80, 52926, 73, 1897, 20990, 301, 70, 3906, 80, 3696, + 73, 566, 587, 8892, 80, 427, 29443, 70118, 69, 73, 87, 85, 637, 77, 3601, 37406, + 86, 9188, 2933, 80, 73, 398, 1919, 65, 8587, 316, 3423, 85, 20191, 6194, 346, + 1100, 12821, 450, 77, 455, 82969, 75406, 4080, 32280, 43776, 49755, 2580, 76, + 299, 23770, 5701, 5241, 65, 1031, 85, 6511, 675, 24672, 77, 80, 1662, 8587, + 1065, 4223, 566, 1030, 80, 2026, 82, 713, 24752, 455, 46246, 540, 365, 93797, + 41101, 17043, 460, 71, 91849, 4991, 327, 73, 87, 1336, 2727, 10616, 18244, 31019, + 316, 402, 42818, 7129, 587, 89, 31764, 7513, 3306, 21206, 383, 73, 85, 501, + 80, 69, 3251, 1546, 51924, 80, 1100, 2042, 46051, 86, 49595, 89, 299, 321, 38639, + 24752, 66, 1729, 939, 1302, 402, 76, 9490, 60933, 4843, 67, 447, 84, 455, 31268, + 22016, 80, 269, 97523, 455, 491, 77, 31881, 3368, 9701, 915, 65, 939, 8050, + 67, 3368, 81, 2727, 41129, 309, 944, 76, 876, 2026, 83, 33863, 2857, 2933, 71, + 2852, 2005, 4168, 73, 87, 86, 34855, 18854, 427, 75, 1662, 41625, 1897, 3035, + 43505, 3696, 70828, 73, 3415, 587, 77, 46390, 455, 50139, 23361, 70, 2034, 3180, + 309, 66, 88183, 347, 1791, 1126, 4341, 73, 18442, 85, 89, 5192, 1336, 50226, + 35531, 2465, 75, 5192, 372, 21963, 2642, 3251, 6870, 1551, 7501, 94055, 11328, + 1897, 434, 69, 89, 73, 1551, 80, 936, 73, 346, 1065, 73, 89, 1100, 2642, 3675, + 73, 1395, 352, 73, 4341, 45775, 347, 76, 728, 80, 906, 2319, 88, 327, 1764, + 351, 1055, 283, 88, 89835, 2026, 85, 89, 59125, 876, 76, 12940, 6780, 53146, + 71, 360, 65, 41670, 53289, 1751, 858, 15444, 339, 710, 87, 437, 89, 73, 11253, + 2329, 76, 15809, 327, 80, 2026, 764, 2312, 4991, 18244, 4317, 581, 77, 1331, + 65, 8910, 92216, 59448, 86, 3035, 73, 2857, 77, 3178, 664, 87, 46341, 70678, + 3647, 85, 42908, 11032, 587, 79, 346, 1411, 6511, 67702, 4341, 80, 78, 336, + 38361, 70678, 80, 7521, 80, 50139, 89, 27093, 604, 28990, 49461, 69, 3696, 69, + 3059, 1609, 44528, 12407, 9700, 87, 953, 7947, 87, 8510, 6511, 73, 70404, 65, + 2580, 80, 4086, 16115, 23866, 80, 1302, 29200, 73, 752, 87, 327, 86, 509, 86, + 3933, 321, 89, 454, 2465, 2249, 4680, 86, 1320, 31764, 31695, 73, 14736, 7783, + 87, 29684, 2562, 80, 70, 80, 10616, 7737, 377, 566, 329, 19285, 3906, 41922, + 1498, 35531, 89, 16017, 6634, 4511, 2465, 19239, 82969, 8839, 77, 564, 69, 1031, + 267, 73640, 87, 28679, 24990, 983, 1065, 7369, 89, 80, 344, 8462, 84, 648, 65, + 3415, 285, 44490, 75406, 7488, 14736, 67, 337, 16276, 718, 29684, 89, 3316, + 73, 4010, 36068, 4341, 25299, 708, 71, 4937, 69, 73, 22619, 2465, 876, 8587, + 74, 49755, 80, 1725, 86, 7501, 1026, 80, 4341, 85, 1029, 6885, 266, 1551, 564, + 87, 8510, 80, 8207, 2034, 2706, 50039, 41922, 80, 12591, 321, 63643, 85, 540, + 86, 89, 598, 79, 5697, 762, 23866, 89, 355, 14029, 19456, 17063, 20372, 764, + 31764, 73873, 27495, 648, 575, 81, 28927, 74, 2641, 8207, 89, 73, 56871, 1546, + 392, 67, 5981, 80, 5298, 4239, 35531, 89, 81, 360, 86, 7911, 511, 8998, 44183, + 266, 32204, 52719, 418, 86, 1725, 87, 39385, 73, 376, 9117, 347, 79, 1813, 869, + 74, 939, 87, 9779, 657, 5308, 82, 1609, 80, 15720, 73, 3415, 1065, 20811, 906, + 74, 87, 797, 69, 32589, 1302, 42051, 333, 27455, 1336, 13351, 73, 36825, 343, + 6780, 80, 292, 31019, 267, 3601, 5289, 1631, 566, 87, 35401, 796, 70, 8910, + 15809, 50798, 1517, 41796, 3178, 81, 8671, 5010, 2344, 86, 455, 30863, 318, + 3059, 11823, 73, 8376, 62220, 94908, 26738, 86, 4835, 1517, 87, 53016, 17063, + 85, 6200, 5981, 77, 14468, 1725, 10056, 8050, 94346, 2329, 73, 4175, 79, 24752, + 87, 5981, 2201, 447, 2642, 86, 9218, 20582, 77, 10776, 50139, 5346, 79, 361, + 1146, 1247, 29238, 1065, 26929, 97523, 89, 7369, 21620, 754, 68, 283, 50798, + 4744, 86, 346, 31422, 88, 352, 26474, 359, 16276, 71, 1302, 648, 1216, 89, 13752, + 604, 79, 911, 87, 84, 2826, 12825, 351, 876, 29443, 74, 6870, 80, 82969, 2154, + 65, 3696, 587, 40213, 12328, 588, 10784, 85, 2411, 9907, 6358, 85, 19747, 332, + 773, 80, 20553, 587, 76, 370, 547, 547, 4021, 79, 11032, 1725, 291, 77, 32204, + 54964, 370, 73370, 39911, 2438, 15618, 44183, 4223, 87, 2249, 7453, 89, 953, + 1347, 17704, 70, 7141, 88, 31278, 604, 67, 86682, 5721, 637, 27093, 50536, 911, + 12328, 86746, 9810, 73, 88, 69, 894, 7737, 53016, 73, 51622, 40375, 66353, 710, + 71, 4719, 1100, 24672, 32962, 21206, 61057, 764, 73, 50536, 2067, 5683, 47617, + 18244, 71, 4080, 1029, 17319, 1494, 16618, 17156, 80, 333, 363, 56871, 12328, + 75, 12591, 19239, 77, 19639, 89835, 5308, 5532, 73873, 31019, 361, 70, 32589, + 375, 5481, 344, 27515, 71, 5794, 47875, 385, 25299, 68151, 566, 14046, 44183, + 39911, 79, 391, 35478, 1347, 73, 3315, 21963, 74882, 89, 677, 1320, 351, 86, + 41796, 89, 63502, 87, 94908, 983, 51622, 17008, 8088, 5981, 77, 2034, 75, 1030, + 2701, 303, 70, 911, 17134, 89, 53835, 77, 1643, 64, 352, 83, 80, 74, 69057, + 9991, 16400, 31764, 73, 22619, 6331, 80, 22016, 87, 906, 604, 26397, 3368, 68, + 427, 3919, 73, 20824, 5192, 392, 80723, 88, 14736, 26474, 86, 85, 8212, 20191, + 80, 318, 5701, 39911, 8910, 18664, 86, 708, 71, 5104, 953, 80, 18442, 347, 11089, + 4341, 365, 6489, 80, 88, 9907, 2396, 70, 6358, 6331, 3172, 406, 4469, 3251, + 5010, 57504, 65, 80, 1320, 76, 1310, 37541, 953, 71, 7369, 278, 7488, 45015, + 798, 318, 15930, 29700, 57671, 343, 32962, 89, 23361, 45062, 391, 12940, 96295, + 35203, 85, 1804, 23001, 50536, 2332, 65, 35401, 6200, 71, 652, 13752, 4010, + 5701, 85, 80, 8207, 89, 4239, 89, 48380, 648, 28679, 6194, 43210, 29684, 2562, + 74, 89, 7369, 4080, 41922, 80, 418, 70, 375, 65, 402, 76, 1007, 454, 74, 1065, + 81, 2005, 71, 11823, 588, 80, 833, 24672, 331, 85, 1331, 9832, 81, 566, 652, + 80, 5981, 1273, 71, 447, 4341, 70, 1494, 267, 83, 73, 32547, 710, 4912, 8512, + 24620, 89, 9700, 6200, 302, 258, 18577, 14833, 501, 73, 1347, 18577, 1517, 17008, + 587, 1336, 6194, 13545, 69, 332, 406, 89, 540, 911, 20824, 998, 2933, 30403, + 68003, 62497, 797, 98418, 70014, 15618, 74, 14029, 89, 7521, 65, 7488, 5192, + 85, 1072, 30863, 5469, 1146, 391, 454, 85, 2629, 80, 2176, 89, 27278, 509, 88, + 30506, 84, 1739, 87, 788, 1517, 77, 2438, 4239, 9451, 648, 830, 86, 346, 587, + 89, 332, 6438, 93000, 26397, 32589, 3516, 31695, 363, 85, 12328, 74, 3315, 337, + 1609, 327, 876, 352, 613, 8826, 2642, 1739, 66, 44970, 73, 85, 22619, 85, 1331, + 76, 634, 29700, 1031, 3576, 88, 6043, 80, 395, 5469, 69, 347, 87, 710, 80, 74, + 70391, 467, 5241, 370, 637, 69, 339, 3524, 44490, 73, 441, 36107, 4484, 73, + 70, 2642, 82999, 87, 6885, 1609, 17319, 86, 32478, 80, 64087, 7513, 4168, 9701, + 351, 6375, 86, 21206, 5469, 355, 1031, 67, 4171, 7737, 4080, 9701, 1394, 2642, + 2034, 8536, 324, 87, 32589, 42888, 15809, 351, 30506, 13136, 62327, 73, 983, + 2042, 52097, 20898, 16618, 59524, 796, 376, 9996, 3013, 38984, 4171, 87, 86, + 9991, 85, 89, 2249, 1286, 22119, 540, 2465, 14088, 20127, 89, 7369, 6511, 80, + 42818, 28990, 65, 80, 566, 18571, 85, 588, 39911, 2005, 372, 97523, 38844, 20926, + 80, 18894, 64087, 14736, 1494, 86, 1494, 7521, 65, 5104, 73, 70, 27093, 84, + 564, 20926, 74, 6194, 385, 80, 3919, 764, 87, 80, 491, 53060, 830, 822, 45015, + 89, 3306, 290, 37406, 675, 84, 7488, 71, 1331, 30407, 1310, 1026, 1026, 5469, + 894, 3933, 88, 634, 87, 14736, 31881, 89, 73005, 363, 67, 4223, 3316, 75, 8510, + 80, 18287, 911, 70, 582, 3178, 80897, 3418, 87, 2329, 86, 31019, 17008, 2026, + 74, 14170, 3893, 86, 355, 11377, 4010, 5964, 74, 80, 89, 359, 1804, 44490, 73, + 11949, 26205, 343, 89, 25299, 4086, 38041, 1725, 86, 377, 86, 332, 939, 339, + 36068, 67039, 333, 2862, 6370, 13536, 3524, 263, 456, 18854, 65116, 74, 354, + 4080, 33721, 86, 1764, 359, 6194, 269, 450, 66, 3055, 6672, 73, 3368, 18499, + 85, 2857, 69, 73, 10630, 85, 564, 85, 1729, 5972, 80, 6634, 87183, 5308, 65, + 19747, 74, 1394, 77, 2933, 23361, 953, 2933, 85, 17008, 300, 73, 354, 8207, + 52360, 2303, 604, 10470, 73078, 1639, 17063, 89, 652, 18287, 391, 66, 564, 45015, + 86746, 5469, 8503, 87, 8050, 70, 77827, 45015, 5469, 53289, 4922, 69, 4010, + 404, 1729, 447, 675, 333, 13536, 292, 86459, 337, 1336, 89, 8480, 6511, 94908, + 2249, 540, 876, 73, 784, 754, 1662, 69, 7488, 44970, 9197, 278, 1273, 73, 1910, + 363, 9902, 95780, 14606, 80, 754, 69, 729, 7141, 5192, 21888, 283, 1289, 1662, + 805, 66, 634, 6780, 352, 79, 1609, 69, 1498, 31019, 375, 4381, 89835, 12825, + 73, 4744, 78930, 318, 13423, 24990, 370, 19639, 73, 55233, 1394, 3427, 41796, + 69, 7387, 70, 52513, 2319, 87, 2445, 86, 347, 31695, 60013, 652, 3933, 80, 8892, + 5289, 21807, 657, 54532, 81, 20553, 28679, 9991, 86, 8318, 2005, 82999, 604, + 49712, 3368, 406, 7521, 16017, 728, 1331, 581, 7513, 73, 18894, 87, 75406, 80, + 89, 85, 2642, 5746, 7141, 88, 44490, 339, 1494, 27515, 23361, 14773, 31881, + 10916, 32962, 295, 9832, 89, 1517, 73, 85, 80, 25604, 21796, 321, 81, 8671, + 894, 23361, 43505, 71, 73, 953, 41196, 2445, 318, 16618, 11805, 20127, 35940, + 372, 80, 2303, 85, 2445, 77, 45015, 71, 2438, 80, 51622, 344, 87, 4223, 86, + 16417, 34263, 80, 32204, 73, 88, 347, 48372, 3893, 39152, 399, 96518, 1072, + 299, 3576, 69, 98347, 6100, 22016, 15618, 16915, 8462, 73873, 17134, 77, 406, + 86, 96250, 344, 65, 764, 89, 21206, 7044, 78, 1662, 1727, 80, 30506, 43319, + 830, 2365, 3919, 29238, 73, 7783, 88, 1551, 69, 1146, 383, 641, 3172, 87, 278, + 5104, 258, 81, 73, 333, 6043, 73, 1631, 35531, 3315, 392, 9902, 80, 73, 27515, + 9081, 3919, 316, 581, 2203, 74, 398, 370, 17063, 42050, 2580, 73, 5298, 652, + 87, 71, 52097, 2465, 8890, 86, 4223, 8212, 97816, 1498, 346, 70, 73, 2580, 18854, + 78604, 1310, 84, 1512, 25457, 89, 1026, 8462, 2329, 88, 2396, 68, 1263, 75, + 3933, 75092, 8587, 3826, 80, 437, 4680, 81, 3933, 3988, 936, 10644, 70, 564, + 36722, 572, 8910, 710, 12214, 31019, 87, 3655, 796, 65, 80, 73, 70, 73, 267, + 344, 80, 3893, 327, 70, 31764, 1910, 36851, 2249, 604, 4991, 544, 70, 5298, + 90173, 953, 9991, 88, 60933, 2067, 5972, 42908, 604, 87, 10776, 21659, 9334, + 22619, 1394, 31268, 1310, 55895, 3576, 4991, 14122, 72, 801, 4239, 427, 50798, + 796, 40078, 74, 2580, 88, 1910, 74, 343, 354, 80, 444, 16373, 89, 73, 11328, + 28927, 73, 67910, 5230, 76, 10910, 22524, 44490, 99692, 3159, 14088, 5241, 80, + 1100, 7737, 74901, 4942, 11851, 10470, 7737, 2203, 86, 85, 80, 73, 87, 80, 418, + 85, 4922, 77, 10784, 818, 295, 35531, 89, 9700, 352, 87, 86, 292, 49755, 3601, + 29238, 652, 604, 36851, 4680, 78, 343, 36825, 36068, 80, 1336, 1729, 83, 58631, + 1302, 80, 295, 80, 73, 344, 69, 22016, 86, 657, 3368, 85, 25299, 53470, 454, + 8910, 80, 402, 10776, 79, 1662, 2203, 3919, 65, 5972, 35531, 789, 48380, 16915, + 69, 2329, 2329, 73, 2629, 12825, 713, 1030, 70391, 3646, 2470, 32547, 21878, + 5697, 80, 89, 22524, 370, 12498, 953, 67, 1302, 3059, 953, 1029, 6455, 2580, + 73, 2312, 21888, 80, 1320, 1517, 81, 713, 73, 8998, 8998, 22580, 89, 64145, + 73, 418, 24752, 318, 83, 27455, 80, 70, 266, 77, 80, 3172, 3458, 797, 9810, + 87, 2201, 2176, 2249, 1347, 87, 3251, 89, 6634, 71, 3423, 89, 80, 652, 95589, + 9490, 332, 10470, 8480, 21796, 46640, 9902, 35278, 12116, 67, 5972, 76, 4625, + 73, 63643, 1631, 509, 73, 36068, 41287, 2850, 20850, 581, 2203, 4777, 372, 80546, + 19456, 544, 81, 544, 7488, 77, 773, 76, 44906, 352, 5192, 89, 31019, 9117, 894, + 32589, 2347, 65, 1130, 58153, 370, 31268, 65, 418, 89, 16276, 1029, 68054, 2701, + 69, 295, 85, 28990, 80, 65019, 365, 28679, 76, 7387, 85, 1739, 46483, 72, 414, + 39911, 24990, 2826, 797, 4484, 60933, 86, 2042, 818, 392, 564, 73873, 48642, + 3919, 31881, 29238, 352, 24672, 77, 3906, 4922, 65, 24752, 5972, 3418, 637, + 35531, 42818, 4777, 327, 43505, 454, 70, 1804, 9996, 50798, 15720, 911, 1658, + 70, 1302, 307, 3520, 876, 437, 5964, 86, 3696, 51924, 31695, 652, 13856, 300, + 4168, 73, 2438, 80, 1494, 73, 89, 73, 462, 85407, 7369, 21206, 75, 11823, 437, + 19555, 17063, 74, 2635, 31695, 74, 9832, 67, 321, 73, 818, 6043, 16944, 84001, + 73, 5241, 49463, 89, 24738, 10630, 3601, 385, 28927, 80, 2933, 25105, 73, 939, + 13752, 73, 86, 4484, 1804, 10470, 5683, 15479, 70391, 55334, 80, 69, 3858, 67, + 427, 344, 309, 26397, 68054, 9092, 80, 56871, 20926, 93797, 70, 73, 87, 1149, + 3172, 447, 14088, 2319, 69, 8510, 73640, 43505, 404, 3576, 38149, 509, 564, + 325, 21796, 68054, 4223, 303, 15930, 385, 89, 42908, 1609, 65, 27093, 10616, + 17156, 12825, 2642, 32589, 333, 363, 15720, 89, 80, 321, 85, 267, 89, 22580, + 1517, 635, 1791, 52971, 7453, 19275, 483, 307, 1302, 64, 940, 74, 1201, 70, + 754, 83079, 1919, 9700, 71, 2580, 302, 77, 3893, 4168, 32204, 74, 18442, 73, + 6511, 418, 10188, 906, 2249, 69, 28679, 52926, 7488, 80, 18571, 89, 14694, 66, + 6194, 7387, 86746, 4922, 83, 6410, 49755, 32893, 73, 3433, 64, 2176, 71, 34175, + 85, 998, 80, 3647, 89, 85, 9832, 80, 261, 80, 38361, 4680, 2701, 86611, 1056, + 72, 9490, 80, 3601, 89, 7513, 355, 383, 10616, 11032, 87, 1065, 76, 89649, 67800, + 1247, 74, 87, 20211, 8619, 20811, 86, 830, 65, 81913, 80, 26397, 74, 7737, 333, + 8088, 13028, 24037, 77827, 63502, 21486, 581, 5981, 3423, 55233, 321, 63643, + 85, 89, 3601, 86, 89, 1216, 86, 5981, 33863, 2249, 5308, 23707, 5544, 38977, + 723, 7141, 40924, 21206, 38844, 9110, 85, 581, 18854, 72455, 344, 9996, 72245, + 2249, 87, 49463, 3524, 87, 3696, 80, 911, 2562, 83, 40016, 80, 7513, 1498, 87, + 4937, 73, 70, 73, 10776, 4922, 1498, 23866, 50536, 20218, 587, 18287, 85, 302, + 3322, 14694, 10033, 24468, 648, 65, 21620, 4341, 2312, 327, 73, 18894, 8910, + 74, 7737, 3251, 831, 73, 85, 1739, 2933, 85, 2302, 664, 86, 89, 85, 406, 85, + 339, 74, 9451, 76, 10776, 87, 80, 31695, 316, 10616, 81, 16400, 548, 69, 351, + 72863, 31695, 80, 376, 17008, 2910, 587, 5893, 648, 269, 12828, 56871, 39911, + 7387, 17553, 5289, 39911, 4341, 2249, 6489, 728, 4835, 74, 37955, 86, 536, 564, + 45015, 65, 4086, 73, 10630, 325, 331, 301, 983, 74, 383, 71, 385, 2203, 77, + 876, 911, 5972, 89, 13752, 24990, 70678, 708, 70494, 89, 4835, 42818, 5683, + 73, 89, 652, 303, 4937, 86, 12825, 47689, 953, 88, 80, 64087, 4168, 25105, 89, + 70, 652, 74, 728, 359, 61345, 5683, 74, 3545, 2026, 79936, 11513, 28546, 831, + 1073, 20990, 85, 1394, 76, 21659, 372, 38844, 6200, 27093, 13855, 8998, 2203, + 89, 451, 953, 13423, 45015, 23760, 58153, 911, 82, 98347, 7387, 73, 52926, 9907, + 73, 88, 6703, 575, 4680, 88, 3841, 713, 437, 73, 59524, 1609, 43505, 6358, 93797, + 1073, 276, 12940, 31764, 7513, 18571, 16276, 53191, 915, 88, 41129, 818, 343, + 1910, 31268, 3601, 73, 1739, 80, 370, 303, 38181, 69, 447, 74, 1072, 575, 4484, + 8311, 1026, 88, 5308, 675, 86, 28990, 641, 92216, 88, 80, 70, 25457, 73, 70, + 5972, 9786, 80, 88, 509, 9991, 89, 5298, 450, 74, 1919, 372, 9092, 566, 939, + 77, 10176, 52926, 80, 69, 34750, 398, 65, 80, 21963, 302, 67, 6723, 569, 86, + 80897, 86, 12328, 73, 88, 80, 88, 85, 4937, 75, 375, 65116, 86, 316, 27780, + 71, 73, 70, 73, 88, 7737, 307, 377, 14088, 53060, 9810, 87, 74, 50039, 36851, + 86, 869, 42848, 23866, 77, 80, 337, 32962, 69, 6194, 17063, 67702, 329, 5964, + 911, 86746, 74, 5964, 40637, 80, 1347, 501, 47154, 331, 85, 14029, 8088, 85, + 3852, 634, 560, 39851, 1662, 14878, 40078, 764, 63913, 10784, 65, 7783, 404, + 2034, 35478, 391, 44508, 3696, 86, 3059, 85, 31695, 2067, 55911, 18244, 35531, + 80, 295, 3906, 7453, 269, 10910, 86, 16400, 38844, 869, 23361, 10056, 92216, + 18244, 2411, 544, 2857, 93797, 86, 316, 86, 383, 18287, 1026, 822, 32204, 87, + 818, 80, 7387, 575, 8892, 81, 5697, 1759, 65, 7737, 66, 3251, 12498, 437, 73, + 2302, 303, 88, 32478, 86, 80, 47689, 10056, 7521, 69, 344, 96518, 509, 796, + 87, 73, 89, 70, 73, 70, 19239, 344, 53289, 4657, 80, 634, 77, 6802, 582, 67139, + 728, 89, 59367, 3893, 27455, 70678, 501, 14606, 80, 15618, 360, 8088, 18442, + 455, 21888, 58305, 347, 89, 347, 74, 7047, 72, 292, 906, 78, 581, 14088, 360, + 375, 73, 27476, 20127, 764, 71, 28679, 936, 80, 89, 36825, 80, 88, 1729, 983, + 1065, 37406, 97523, 4680, 1897, 1347, 87, 89835, 31881, 70, 355, 55233, 22650, + 7349, 9081, 267, 18854, 40952, 337, 14736, 5964, 74, 60933, 375, 40924, 10616, + 70, 3841, 337, 73, 85, 33367, 31764, 677, 80, 44183, 372, 71, 22873, 637, 88, + 80, 7488, 66, 2249, 70, 509, 94055, 19639, 81, 713, 21486, 85, 4777, 93797, + 1273, 18287, 80, 73, 2438, 38359, 3858, 9991, 9081, 89687, 85, 20824, 87, 41622, + 22016, 392, 797, 17008, 18571, 27515, 73, 82553, 86, 621, 19260, 1146, 89, 29684, + 324, 71, 48233, 80, 2067, 28546, 8998, 68467, 540, 6358, 73, 86, 2067, 87, 14878, + 784, 1791, 11032, 56871, 318, 1551, 4991, 63345, 796, 26397, 1100, 77, 324, + 89, 4922, 76, 754, 80, 12591, 533, 2026, 84, 1498, 1804, 73, 86, 418, 654, 74, + 8212, 675, 6200, 1320, 73, 392, 3251, 6375, 1756, 88, 351, 67, 22650, 73, 454, + 69, 634, 87, 6511, 34175, 80, 3172, 9112, 85, 15464, 799, 68, 21963, 38359, + 23797, 74, 6489, 70678, 19456, 71, 42818, 89, 553, 22119, 66867, 5469, 1764, + 31697, 953, 75, 462, 25105, 488, 65, 27322, 73, 67702, 25299, 80, 4010, 92216, + 4010, 74, 1910, 9907, 3172, 42528, 352, 302, 273, 75, 302, 68, 2113, 39911, + 76, 1625, 17851, 80, 4625, 32752, 784, 80, 637, 88, 1336, 6091, 40078, 80, 52926, + 25457, 418, 70, 4777, 92562, 11328, 11949, 46390, 10056, 74, 380, 73, 89, 641, + 87, 71, 728, 266, 5972, 1302, 70, 19747, 67, 796, 327, 70, 1100, 4748, 266, + 34175, 450, 361, 85, 8480, 32204, 69, 604, 1791, 447, 544, 85, 267, 437, 52971, + 876, 76, 12821, 647, 80, 61828, 2580, 894, 87, 40078, 8480, 325, 14736, 63502, + 46640, 544, 1331, 3172, 85260, 894, 69, 1302, 89, 728, 86, 85, 1073, 377, 8671, + 19456, 80, 784, 87, 84, 77827, 713, 94869, 8998, 11949, 1729, 637, 21620, 84712, + 66, 7387, 6043, 84, 329, 89, 309, 2201, 1498, 55233, 2332, 5192, 89, 38977, + 566, 27515, 80, 69, 677, 25105, 762, 73, 70, 74, 677, 1247, 71, 25457, 80, 78, + 300, 1725, 45228, 85, 6511, 74, 637, 45015, 2465, 983, 64087, 1302, 713, 11377, + 40924, 89, 46341, 983, 85, 41129, 1791, 7141, 41796, 969, 14898, 73, 10056, + 86, 511, 14736, 70678, 4086, 604, 71, 302, 78, 1662, 65, 5010, 79, 540, 2465, + 7910, 333, 87, 4010, 86, 5683, 85, 406, 876, 20990, 11328, 73, 637, 81, 418, + 295, 295, 86, 45015, 28774, 8892, 80, 9700, 53835, 25457, 13423, 42908, 77576, + 797, 85, 657, 9701, 96819, 2438, 70, 59194, 73, 441, 52971, 17134, 86, 40153, + 1494, 10056, 87, 73, 4937, 86, 375, 1804, 75, 90377, 73, 57504, 1791, 1897, + 23866, 273, 3036, 292, 402, 21963, 983, 38977, 295, 4080, 80, 29224, 89, 752, + 74, 80, 830, 8783, 2933, 45015, 7369, 77827, 1395, 73, 1394, 1394, 67, 5010, + 77, 10910, 2933, 15618, 19456, 32962, 85, 5972, 88, 40078, 8890, 80, 12654, + 12498, 80, 3306, 258, 71027, 13066, 85, 50798, 80, 939, 2302, 22374, 19239, + 1302, 87, 73, 3418, 4010, 89, 501, 92180, 73, 63643, 321, 93213, 84, 21659, + 303, 81, 73, 2438, 83227, 86, 8998, 65, 458, 1302, 14736, 45015, 20926, 5964, + 32204, 784, 25013, 80, 4239, 81, 747, 21659, 652, 604, 30506, 7387, 710, 9490, + 1494, 89, 1336, 73, 2933, 88, 15278, 2580, 1100, 275, 8658, 70, 74, 1739, 437, + 27752, 87, 73, 41139, 74, 5759, 7369, 3935, 12328, 17270, 354, 85, 14720, 292, + 9117, 31268, 2192, 88, 45854, 376, 764, 6511, 1030, 24470, 51622, 6358, 23866, + 10616, 3919, 70, 5010, 67800, 980, 21888, 581, 77, 4239, 375, 295, 852, 74, + 13183, 80, 11160, 77, 2312, 89, 3423, 65, 3427, 7453, 80, 74, 20176, 8747, 28679, + 80, 1625, 71, 1146, 6539, 78, 648, 79, 1331, 85, 16400, 69, 11949, 80, 11251, + 6043, 797, 652, 11032, 78930, 332, 74, 89, 392, 59524, 85, 267, 80, 3368, 24041, + 83470, 796, 41922, 292, 86, 2668, 82, 9902, 689, 1900, 80546, 19555, 77, 39911, + 81, 9786, 2319, 89, 745, 5701, 78, 336, 9907, 70, 3601, 88, 85, 5298, 80, 9672, + 13846, 70, 329, 49755, 375, 85, 3013, 31881, 11377, 82999, 637, 4341, 398, 88, + 88548, 16400, 11377, 39152, 80, 1631, 45015, 6358, 85, 8998, 58305, 652, 78930, + 69, 73, 830, 70, 74, 3368, 657, 9490, 89, 752, 69, 26235, 86, 652, 3306, 72801, + 73, 2312, 5544, 69, 752, 20211, 392, 50397, 3601, 773, 3368, 43389, 1394, 1910, + 544, 8910, 81, 64087, 331, 85, 23361, 277, 2303, 332, 80, 73640, 2465, 1919, + 67039, 74083, 604, 1791, 13536, 74, 14088, 587, 1195, 269, 1751, 42528, 80, + 35401, 89, 3368, 94908, 728, 85, 89, 46224, 447, 87, 18442, 4341, 78604, 4922, + 12825, 331, 283, 309, 85, 12184, 2303, 14088, 406, 634, 346, 88, 31764, 26397, + 3368, 60013, 713, 70, 1739, 87, 587, 1029, 2933, 78, 42930, 2003, 86, 48380, + 71, 17008, 5683, 24470, 8998, 74, 80, 344, 5010, 93000, 501, 40762, 4922, 66565, + 80, 344, 76, 575, 2042, 76, 1026, 509, 73, 1804, 939, 7047, 5010, 92238, 292, + 73, 354, 84, 70391, 5972, 88, 846, 2465, 37406, 89, 14088, 100120, 14878, 20990, + 564, 31147, 983, 69, 664, 31019, 11823, 3418, 14170, 392, 49595, 7453, 40078, + 8998, 36851, 80, 38359, 20553, 67039, 1662, 14878, 80, 44067, 2423, 316, 80, + 13634, 24672, 66, 1029, 79, 3696, 24990, 80, 300, 17008, 80, 70, 901, 764, 1336, + 76, 20926, 74, 11089, 23361, 14029, 454, 1029, 637, 18571, 85, 9081, 87, 86611, + 509, 953, 80, 36825, 80, 354, 89, 7387, 44490, 4239, 7521, 77, 1910, 9907, 654, + 42050, 1029, 52000, 65, 1791, 73, 301, 70, 581, 88, 73, 1517, 8480, 73, 635, + 346, 20990, 24244, 85, 80, 74, 89, 16276, 93797, 86, 953, 87, 92241, 677, 85, + 36851, 998, 2580, 7488, 87, 44508, 5490, 14029, 85, 26738, 10033, 84, 7387, + 76, 2562, 66565, 728, 75, 30728, 5964, 74, 4239, 78, 292, 80, 88, 80, 4942, + 24672, 19239, 1146, 1394, 75406, 73, 89, 1302, 77, 5104, 9864, 87, 354, 53450, + 80, 73, 1247, 7047, 70, 2034, 77, 70391, 78498, 370, 6634, 20920, 9117, 79, + 10470, 5118, 9490, 392, 63643, 89, 540, 67, 4355, 80, 634, 85, 23361, 92715, + 2580, 85393, 80, 70, 32752, 46640, 2554, 2826, 4171, 4239, 12940, 2034, 88, + 9112, 88, 18126, 1091, 78, 1100, 377, 93797, 13045, 365, 23361, 71, 35035, 32547, + 13092, 85514, 1494, 73, 2727, 85, 14485, 87, 74, 1031, 7717, 80, 10333, 70, + 80, 23001, 74, 398, 87, 1302, 64123, 86, 2034, 5308, 85, 42818, 303, 5923, 88, + 5964, 7387, 13009, 51924, 87, 710, 69057, 325, 24752, 44508, 16510, 4937, 35531, + 9832, 36825, 1336, 339, 69, 66945, 73, 1247, 86746, 89, 5192, 71, 25299, 455, + 77, 455, 3315, 402, 1751, 14606, 819, 9117, 731, 30407, 28679, 2739, 2130, 331, + 4719, 92238, 79, 2249, 1804, 75, 2329, 575, 6043, 84, 327, 75384, 5104, 299, + 5717, 81, 466, 4752, 86, 89, 80, 1302, 652, 65, 4922, 544, 7141, 87, 46790, + 321, 32868, 69, 6511, 88, 911, 466, 75406, 3524, 444, 2933, 71, 29652, 20990, + 69, 7369, 17134, 69, 451, 372, 318, 953, 87, 501, 1302, 1804, 76, 365, 5972, + 2249, 28679, 80, 14088, 1029, 28990, 372, 100111, 6634, 63813, 2329, 2329, 331, + 27520, 80, 347, 8462, 46341, 44490, 575, 5010, 24470, 75, 3415, 3524, 454, 4381, + 566, 713, 4458, 64, 2319, 74, 3919, 86, 1026, 75, 11823, 89, 634, 14088, 69, + 1247, 88, 292, 321, 302, 80, 3988, 6634, 73, 41196, 2191, 88, 69, 7387, 1739, + 74, 27455, 4010, 84, 2249, 76, 2026, 85514, 752, 17008, 86, 329, 4484, 278, + 3172, 25100, 43505, 867, 81, 939, 27515, 64381, 70, 80, 22016, 86, 10776, 89, + 4908, 89, 70, 14092, 830, 25457, 427, 754, 75406, 20218, 1073, 5230, 85, 377, + 74, 4080, 1233, 19639, 12498, 39911, 2629, 12158, 13536, 582, 14170, 884, 85, + 38844, 79224, 4168, 87, 71, 2249, 73, 70, 80, 70, 19072, 953, 56029, 2641, 69, + 73, 2465, 36851, 73, 3059, 17008, 20824, 911, 80, 69417, 18442, 87, 51071, 360, + 89, 8480, 268, 75, 3906, 89835, 1073, 75, 9117, 1919, 88, 360, 96518, 8462, + 28679, 29700, 70, 1302, 65, 7488, 4239, 65116, 74, 21088, 2465, 10784, 85, 936, + 38359, 3647, 27278, 89, 8480, 8480, 3906, 1631, 1494, 295, 80, 318, 1631, 268, + 8050, 89, 3919, 61057, 77827, 39911, 354, 416, 337, 89, 9700, 69605, 6200, 1919, + 69, 7513, 73, 26738]], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '44957' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaWxOyOp/l799PsWvfOm+JICTsO04CAiYoiDg1NQWICKjIIQmkq7/7lD5dPdU3 + XhDkEJL/+q2V/Me//vrr7y5vymL6+5+//n7W4/T3//oeu2VT9vc/f/3vf/31119//cfv93+cWb7y + 8nar39Xv9F9j/b6V89///CX895H/f9I/f/0NxKClxgp4EdfwvQYlDBwa3usln99ibsHPRthT/bmU + YN7QvQGd6pBRa3NdIgZyzYUZLJ9km2bU5HVlJbCPzgNaz+LczCxxQ7g84QabxFUHhmxYKtWq3mAH + vdbDcgHvlUw3jkPUdbpv5tVkMxjHpYrY0DYmF2ZuwLN8V4gUaC6fywe31WovjtSYmevTdR8sYJ34 + EhJvzSun5/wUACO2a3pAVgqoEB8ruFRuhZEvSjkRVg9PlTQkIm7Gg0k3uHhBefPpSRs6SzQbpSfA + I2sDjOqK8slscwM0couwi0DKR/uVBsol2RLymo5mxI/uo4ftVTZwUEq7Zk7EPYFed68Qh1o8zO1m + pcDkNQ/Ub7rHwPlQi2COj5Qe/N3DXDbWnMK3xxhZT7eQTzu4bmGFrgv1QnLzZ80+JwAb9IEkYsjD + +EgHF949uKV3sbF9njz6FFqHUsDG9dNHizhVBqhGuODdDTyaKciPNWwXqSRsPVScry7bFG5hmhG0 + imROW0kW4PAWTWpJeBsxrLAACg25USw2L3/UkF3BLGkOSAblNMxhsl1gYxcuDXdP0iwPnEIgP6yW + KHIY5LyCWQvabWHgkzm+cuYLvgiIdgSo92Gez80tiyG9nq/U+LZ3xHcVcE/ICwmb6ymaZbEt4Il+ + GAGOa5qb4xEI8H33dtQ3/XXOr/6YwCx5HMhqPRlc0BCqFdGvR3wQT3E015+yhkqt5jjzg35Ypqtr + gO/7o1ezTD5zV5CBWHM6jM+9zYdhPZdw38wXethusMnNXBPUwNwZBLR6PdDTHJ7Uz21XIqZtM3Na + baMtXHa9Tk1tZs3YOLWmHmOxRKoLnGihWR4qyWGDqXW5PCLSypkIk3t+Qo9nooONddgTaI/LHQfA + DCPWP0QN2qukxm6TeLnkf9IOAFfhGC1oBGz3dkKo+6aJd0YrNNN8NGO4xOmC/SI9DVIzfzrFyayQ + FsbF8QneTiGkZhDiK3K0YTPEqQgk543QwqIajLphxNBiL0B3I94MU5ajEbLSrLF3OUoDu6iLslZx + aFCjeu74om+ZBe/EXmHUHKyIPcVOBNsz5kQRp3noay/WYPDoDHqLw6aZKx5mQFAVh2pv38iJrKU9 + DKTZpwcVtPlybswUon15xL/xvbBFEuDNen/IahcVnLtJZ8FojiXsxLszZ8nZOsF3Ne6pfu5avsSd + m0Cujw0OtMfDX86X1UnRr7OAz+en6vPbKSSQt8IH38aLHkn6uIdwRt6TLGDI/T6DgQvy/JAQOrIi + Z49mGwJNvmkYb5fJnCRWnGQNnk7f+rHlvLpCF5wlYpPv980XVjUlrObExbuNfvGXQ8xf6uFpQWxT + Zx9x/xP2ilxQhP1qjnJWRy8EJ8cYqNW4WTPn5iZTXSfTkXi/BjmPxjwG4bua6K65RZzMq2AF58dp + ROK3vizdZ4QwiIcdEdO9mi9AH7ZA259VtLg7J2dinDNwmEhDzQM8cQZqJ4VetM2wdo6Gpr/qexfG + 2q4jomhagGb2O4X4Kb2xIa0Rn0kOTxCXN0QGLXjzwU06G+7y3ZXMN02N2GXv2nDINjZaeif1Z7ON + NPklo4lqVDX5nGOvAmrS59iyFtGfbqt9/NMDqtGQ5ktuxJnylHc9DrbVGXBNNS2IwMWl1oWvIuZ3 + KwJi0r0RgG4V9Zr8dmEY7VPqkc5q5nJyEVz5cYudKwB8iQSrhnmpdfQ2y9HAz3mGIPDJDlvbj95Q + r9EK1Vt3Di0E4dOwqxclMBBUGTvf+kme60GDcXMgFJ12q4guhoDgvagARYCtzYGJOQP10xGpE40x + 4JFUFQBW1EQ0D2ST3W6+Ap5YzChiqQ0Wd9O3YIutDzoGYhNNVD4vECH9Qm1/3ZgzvlBREedyh3VV + LpoZFeMCt5xxJBkbO5ql08Lgu0g41fPS47woYwXowLXp/Xh8+XPSw1bJmtojnciihpdpp8A+8VS8 + t0s28DFNLKiGgk2A/NwNrDiKDF5UXcKHZ/4ESzGYBgx225gGxuXtcymHK/jpNgHeB9Nl4MeX6cKm + 03zq3W51PqVG7cKxUZ5EKUw5GlvproCWnC54D8qpedpQq2CumyMODmMTMflMDSjebIoNmGLAkXso + gEcsi0bb2I3YQwx6SGTjiXW+2ud8pO8UFH58JavEMHy2Yb0G1VLjeC/tAi6A2slgnuOEMF028q8+ + Zkp6ZjWqv/N3roWhB9uwm7B/3Ujggy9vAYSyzdCczEnEzHnvwmCnxIQvu0dErhsOYYXyBbF0feEs + nk8VRCs3o442jA2/7AoXBBL38d7zIB9CyTpBB2+f2HuIjcnOSLDBfnPOsB8p1jD3Bq9grF5TtJ3f + hs/bx72FG8pW1E0zbDLIxxEkdiHR/ePx4cuhWi+/90Hr7S5tPryuOrW4HLfY63HDGchdD37rCz6s + ho3/dK69BQ0FD2hzfqomLQ/RAimrdaIuL9df6vemht7RcWnA8g7Mct+FIOu3NUZ5G0dUiK8V7PZd + i111xOZXP05gXgSKjT045PM7lVuQ8KXDZqav+eQY4QKyimypkXPLZGD76OAUbDTsbdrKpGonV8AX + kiO2hFDj8+v0yKCiuAa136UcsewNRcCzfoWtx3pvSh/5c4Ju4i5YqzoHLLc5CoD2VlR8GC96TlTW + E2g1zxU1zuFnYPo16uFNb1/YPfmdP/RCm8AsogBtQ53k86m5eqDYVRY9njsL8Cm4bqEjPRfy+FBj + WC5t2UOopZheSeRH8zArsUp8ImAcCdXAb+1kAQbbGttz35pUH/UV7EvW0sCp0oYvz6yH/bwd8KHe + +v4871YB+F6fGuK+49OubxCUBGWNDao8wDKJnwq2JLzgwBTGfL4XKw9cDzeC5Pup5kzcow5szweO + jds5adh8kws4JqAjauQRn43WlsAtzDLs+G+Hz7I+d/CQxD49ZPZnWPZgYXDgc40eswNMNvA8BuMp + rhGorJKPWNkG4KeXwvngNCzbJSlUpv2O5FsfNvMpEBVYetKGtCs/4ptGASKMKONk4/cfwF5zs4XA + 3j3oXu7eAxdUzYPJmGL6qxfjtx5DF9oedY57JSff+QeLjbAl8rifB65Xggg/oCvpIZLqgYnVBcKb + 9fxgqwPtwEXv0kOzjwlS/Z1uzvNODIASiEeiNJEbsebpV6Au/AzJxg6DpcnlBMof4qNJKEhEb2CV + gqm8mmQTFkMz8Q9VlN7JAupImybinwZr8FAfe4xKwRwmEr69P/NXw1b7490Ksly7Y+N+Z9HkSP7r + xyN4HyfO8DSvOwV2z/REfYVIPk3DxwlSE4XYWSgyGftsethPzZmEoeJxvtJLD7DZ/2BbNFv+uqvg + BTDTT1i/abdo2Spaqh4/eYu4dSvAfK3MHtzelBL5BMtouZZ999M7JH/rAXPMmYEfr+0df2eyUNdK + 2Kz8GE3lvY+4vCYd0M1uoNdKOOdLvbYFqKeGRg2s18N8vl5O8Muj1NkiKxfWgqtA28096triAL79 + YygT7k40+/IcvQExA9trGJOxuqYmCT2jg63vHgkQ7ixfulu1wK8fItBSdCCqDrSh4ThPon75SXhB + WYMd5RM+fPFgZB+1A+o4t2iwnt0wfvkV4HyA1N4kLF/WhmarEagfX15so/lguQFUg5IheNq1/myy + OwJfvsTXve7n8+HxIUC7ugFFWV4DLqiuB61p7qh5F/bNsDxPHZzdOKSJZdwHtrqUpXJPxhcSZm1u + WLgBIQgOpxHrRr8d+EhYDFftUcOWdj3n7IMfHZDJ8Ui16PbMudsJJfjxm2/npKHrEyggF7ORHqz6 + 1Tye5fSC72gKECuXxzAfb+4KmikbkDyKL392F7uHws6/EQl5RsOFGRgwtNI39eXns+FW4iuwSp83 + rF1E219W12OmLpfAwt9617Bj+YHQPmcOWrp+4VxN3wXc3/oD2tytyJ9/fk0/I4KY+/D8Ga39ACKR + Gdi567O/6NutDXVfN6ejX2QmP+cnpCaZENCyrvCX54QQzm4S4oN+uuTTXXBK6DTGSDVfOEdsc00J + XJ2IinFmOoMwkiQF8MLe9GAWs8l3R3sF7+tBxwY9HCIqe1yE7Ik8aqnlOmeiqCDIa/2Cd5qumNMH + rUe4Hpac7vZVaL4XpMcq0sSerI4fPVpgIGugUA8hNsbkbs5nKtcgNhQFbWNSg2XOSf3zY2hR6k/D + T3N6AmcYBUiZ30XOF7RPYF3CkdpB2prLfe+yP/0P8pPq83lOBOV40gN6sNNg4LdTOgJXDrc0pCGO + Xn7UxVC03Jhq/MNMeu83ITTb+op4dfAa1u2vBeQXaSTSUw0axiPpBX0yjXjf3Xa5oJVhCyq6WFTL + rhXo7XYXwq+e0EOX3Yaff4B+vSDqqiM1WSvdt3/88VWNOZ/pe58CPJQ+NtdhM/CXfLDB7J+jb76A + AbspUwYG+zaTuWi1fKnfagVUUcVIud3qiNZVEMPc7sLv96WA66grwW2lJNjqZ28gfJuuoF8zRO0W + rMA0WmiBUbF+/OGj/hRm4h/+0qBTmkuVnz34Xk7B16988i+P23Cy8JXqTSOB+QVCQz1SzaVaqPSg + j/zbFvZrJ0XCaaUNwpf3gCZ+CJm7szwwmJgICtrrQvdriURdAwsBipYX0y/vDmPvtSMcxuseGyyq + +bKpMwVc9vJM9aKtoiWmTgvqt+xiFHZXczEVM1GrMtC/eYneSJqbnsCFRDZ60rvVCEWtiao4bCrq + vk5PwBaQQfDVD4zfq6whVX5zIcPhTLYFx4CZGVEU8X3KsO28jIitpi2B3/wIB5/7lC9b3SGgV+oT + 9S+vELBLHhfQkd4LtXXT4VKyNrdwoEFOVnxYTMaCwlVKiByK5bs+LDfYxZA8ugORpeWR8+EWaZCE + lkezk/syB6/RSgh8WcT7tYTyZffxArhv+AWxt1/nnGy0Wt3lzvXr597R0uunGiT364n6lVUCHqus + U1GuxfiP3iZnK4Rp20RE0K6bnI5kG4MbmqPf+PYZtc0VKM96hY23W0X8+PJdyOyPh9/PcOt3Ryft + oCHkCdaUp9/001CkUC/eNjbAAHx2CXEMss4Jfzxk9vezBSFBlU0PvZr6X1724HMF1gQ+ue5v9MvS + q/i5eRPhcRIGZtwHEci+71O8sTXAdyulhsnnpeLD8nLN6XHxvF8+Ri14nvjMPODKH9CXFJnWGfDe + 34xw8Q4OeX/5o/fuY60cT2aAcbVu/MXcpRU0pKCj5vVIzWllPuw//narM3EYG5Z2UJEShu29G4NJ + VUNRBX0h/td8ZRtvgdtO35HXV8/p85ynIIg/O+yb/j3vtDJ9weBjZIShS+gTxe4EWGa9hQ+9ujWX + 8Wn98fM4kC6tz97ncwd5FL6wk8DWHwWHZ8pH9zRqf/NI1h4UBI/UcIngn8NmdhfUb8eiDqhRLjkY + zyaBcPGwg11rXEwWu4MCpSaBZGuNiz/2k+lBsb251JA3Z1+6i16qfPUcm9KbD2RtuDYsSHGnv3xy + esjdC+oH8Uwet+nI57dehdBwdk9svuWk4RjdOuUsKAIONmruc3tnLNBRUYLN+tJGRCiMEbRusibS + l4f4DAcIM4cVCGwCC/B1by2gFpQdxe2A80ncHBGQ3bP85//8lQ8t7D7rlOq/++Owa8Gmfh3JM96M + +ZI51xhazXtFVp/Cz2fn2ttQKtUX1ZNZjEbeXwXw5Snq3HzfXCwtf/38O6p//TWtOQHf58GGumtz + qhRH78eLNIBy0oyut4dQ219UIrIwN+fG3muw3TUTaQel+JOXArG9u9Sf4Xng890rIZAPb3pwNkc+ + OHUb/vwA1aNdb3LvwmPIb0VIr1rnDSK5pwgww7LQQ3+8fVbdOhtOcGXiPb9Ww3CeXi/4zT+pDWQP + yJdoCGGGXQHvKs8dlpOsl+DLN2SbT0rEW88OwddPE0EYb2DWmpXwh3+8wpRzvhgwAF0kYrIxWmGY + fvwn7PY3rPuFYrJEqslPL7ArmmufHTZ3C+4EfYODY0qHEfJ2hKH+PiN569cRC3viwTGRO+p01Ttf + DtJJA9+8E1uuscvJVnfGnz6gzUsjA4kLjkCprgH6fq+I9dswVIPCvH155OgvK2PtKbtnZJHFxg9z + SdOsgKu3dUL8ABdOXk3BwHd8o3C+J/noejqEwUfLyDqXp2apvUKDtUIyukdO1fBQCkKg37OUgBlu + hvH+aU8AP06PP3kjja01VPSVv6fu/K59elHs1S9vo1+eGwiJTQsasVVj13wVDbkvpATP87rFB7M4 + +uwuyBkodrWFUelgfzm+Rw8+BvFF9Q81GvHSJj00yCnAB7k2h4lf8gpyZb2gp6Yr/nJWYgLeRcyR + tFZY9DkFKwX8/MTxiC1/8/X3kL3UFdpUihyNt53SAiWd2R9+mk7Baguiz6qkOFaXnDXON//YWSmO + DngV9Ze27IArn7YY7UOtEc8krKH7HjfUk9WK80IDKUTj64KN6vnkvDzkDNqPy4uI/gkN37xdABtp + fyG5unpFGyhpveoD8qEaOGmmmF2nEbgptPCuaz75BofVS5Xdi4xRlhtcKI6rBZo88rAVCjafgy3Q + 4HvailRDl9AU8oSPcLwShe7Oz5s/Y2LYkLBshe1vPsl2S5wCu/W2ZDlpSs5Ni7RQftjtV19yn8n7 + WgQrEwbkp3+z5Ey9ssnzD7W2n0ezxBS3gPvKARu9szU/03m0wS1fQryvV9hnv3zoW2/ovjwqA9fc + 8KQKJzfDcVWbQHTSvQUnISm+9dgyByOXFFgMwwF11b3wx69/+HM9HAnasNzPwQrqB+FMb4fY9b95 + X6mmcx0iMEzku36lnaBlWwXWn4nOpbEZEADTyaXWV+++eaIBETIv1EseNp8B2xaA2YOH5NTgYLRQ + T2DOV/rX71L++uXpp4acsPs5qsOCc38BbF8U1NLtPf/yPoJxgwm1WhSaf9ZbhM2pou7bSAa2ixfl + xyuIt68hmjNXSX71Esl2GQ7TZQ4YQKdsog4vjGa5q7xVE62vv/Ui84fFgAgOn7eElPCk+dx+fwJQ + KPnq124u1+hWg7u32pKOW04uasWhg6zUa3T9vEQwP+TqBehR1al92HzyBVXuCNdKExC4HAkg4WpT + AsetD9jyz+HA8x1LFMzME5G2qM37/TWOVaVMY+q1m4zT5Zl1YK08Aqwd+nzoDc5dePQVA+PiVeeL + wYEL3RY0dOdy0CxueT3B5WBcqWeFOJqPN22lGmsRk666Q396OnEIyrDIiPo++nz55b1f/kVCsTdN + CV/rRa2riv3h6UlrRBFa0jpCK2F2Gp7d9B4erMUl9WkinNNUOcFPJwWol8Mx+q4vjIqWJMq3fjr5 + /JFIAvjaiHAQrAmgh3vJ4JJzj3rPGke/8QBthO/Yz8TK/61vyG334fjwzYN+/Qn3rvlB4I41IEE/ + NaB3HSnWFaXlX782Alq4A/atZ9fM06hmUAeejTFREvPrH1J4ogPDv/H3OCTx65dv/upBw77rlWDZ + dfovH83JOo4LGL7riShn4ZTP2rEv4FlnBFstYn778zMoN2Lsre+RT0dbH+Fq252p9x4/JjcxGGE6 + VyHdlZdVMybSd3ynKKJaupbAH777MPwkPgk/0fjzV5KwXePv+nLOfnx2sJiLtesY5BOQpBG0Rt3T + /c39+H94eBpSDV/Qa91M4efgwmv13mBDqT8D8/xzoD6I9EFhuTuYlDjFCljep8ZaAyKfEWXFgLwt + IEbetAPCz6/+yU/ald3w2JIgtOLNAbvBuTYJuaeBKpXrF943QzMsxeBrYBscL2jZFELENTcN4ff5 + kFQftZxVzkGE4la8YbfbaIP00+dAWMvU+fI1z2yaysL22GAL65CPYXpd4GmwewKqOYrmXJI9qHjG + +uc/IpLSYfmzHursZ51L33xLiQfpTDZLWeR8VIQaLtPl8B2/jcmmOkPQreIbtrttPSwpbRgIxWNJ + zVuP+bLzK/LL87AGhVPzJ985yzeFvHlRN9y0Xi+4SPWWmujzjn75MPz7tyvgP//111//57fD4NXd + yud3Y8BUztO//3urwL+zW/ZvQRD/TcU/OxHImFXl3//81yaEvz9D9/pM/3fq2vI9/v3PX1BQ/2vD + wd9TN2XP/9nyr+8d//Nf/w8AAP//AwCe/y/15yAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c09bf89ed1e1c7e-AMS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 17:29:43 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-kd1j0bcill5flig1m29wdaof + openai-processing-ms: + - '471' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - a9c89e6436668562329d35d627f16245 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml b/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml new file mode 100644 index 00000000..8bd34929 --- /dev/null +++ b/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml @@ -0,0 +1,500 @@ +interactions: +- request: + body: '{"input": ["Sample text 1"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x + ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0 + Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071 + gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq + P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl + ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0 + HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN + SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5 + yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M + G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf + +fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI + dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv + pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA + S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF + o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx + WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0 + jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt + fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp + 3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG + MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB + /v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0 + DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK + fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps + HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz + mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd + PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl + zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN + nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7 + 6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW + tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep + aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj + 7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB + jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu + KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM + /uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7 + zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO + fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB + vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k + VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD + GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U + fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU + p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai + cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8 + Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP + ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi + MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf + uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg + CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC + tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX + baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ + qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ + ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf + zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq + pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89 + nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB + 4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr + Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/ + l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt + NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f + bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8 + jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl + 74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK + 1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY + mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy + rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn + eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn + IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq + uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71 + j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst + 6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K + H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19 + /awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl + s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f + WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK + mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ + 7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk + tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju + TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO + urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M + dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C + b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G + 5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF + bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f + ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc + 614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e + mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86 + iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ + G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR + aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO + mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm + AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M + H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr + T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz + iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4 + 5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj + l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81 + NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k + Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf + D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii + 7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh + JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3 + 01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy + 4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H + BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T + bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n + 4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+ + 6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ + QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2 + 4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be211a83807169e-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:55:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '311' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 665023d2f6ce38148bbb6db93c66bb82 + status: + code: 200 + message: OK +- request: + body: '{"input": ["Sample text 2"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6W9O6vJbn/f4Ub723zi4BkYR9x0nOJCiKOjU1BYgICMghAdLV331K/7t6um+e + Cx6EWmHld1r5j3/89dffXVrl2fT3v/76+12O09//63vtkUzJ3//663//46+//vrrP35//8edeZPm + j0fZFr/bf/8s20e+/P2vv7j/uvL/b/rXX3+PA19Q5yVYKZuW7ix1zX5Dre0xABNeX2e5CviZms81 + 0sd7il0Y1XxL8bzt9YkEuIBShzOyRHOYzvviHcJ7sOcQ3y4LY7wDOJB7rY9VVxe80ffjEppne8WO + dMf6qAmbEKjHViZygrSUlXZ5hkrRC1Q1PAKIs0ENOOJnhbbCbklZthVjyDYSILtj1nsLFTY3sL0J + B4qj+w5Mwno0ZPNq6jRQYBPR7XlW5Jfmd9jdEDNdYL3fwPQcytRA5MCWtxfYQDIuBtWG5KbPqXAZ + oXZmL1S/RSdlt4tygm53wNSQhjKaHq3fQLFe3vhg3a/6sr5LDv7eb0GlTKduVVYJ3bwzql1cgrVQ + +wSeQs2mRhJZw+dsKTc44scbq3tzieZtznzoDlpBHUfGw8yu7Ax68/lAgiK+PCadphkYu7JGrNF4 + j5TMVaD9sCV6YXFUdYEQ3KA2Nhh774dXkSZWbzB6XB3sBTUDK6lEAX4cN8XeoxMrdpy8G0hl2mAD + 33iwuscxgzgSUsLhG8+WxAhCCdHPQg2nQMPaPlYXdst1okqf0GEq2SrI19exxepbGyuWCkIJHHQ7 + YV/lzYhNSxHDodEgPtz8MBpVSWqgfEkQqQ7eNVpfwzGE8SoX1DRVzhsDVSqlOn3qRAyrXp97pPhy + kSU6NQYVRuQ1HE+gUDcX6itXGbBHFyMoZ+KAxGyn6Fx492eYXbMXtt7bI1vTSY/h3EUFPvgtGdYy + pSI4xNUTFQJ5D+vrWfvw4oEKuw7ZDlTcXV3I53mCA/RRKq4vzhDeNuKeBs/rJVpdEMXQ/CgYu6dT + AxYZdSLEzm2kVvyWdeJadgbFpLewtjmI1SRA3oSH7jlQ97HrqklY7wYIlFuNbfaA3vjxtzZEFGo4 + /X6PhdSfXuJ2/UKWJp3B8pT6BAofN8NurPHDLKJbAXfzPqAu6V/VXMHsBu+FfcLGtz9X93Es5f33 + kfFD8hgJ4g+EbuzoWLG5x8DOSbPC6LRP0ZzzExg5nrehv8mvBJrnTzTVenaDXsEfsGXcn2Cmk462 + 2+3lQR1e8nVKr8kG9jO6YTUzzIpHvVhDfeRK7HAvC8zZ+VrD8pL41M52irfwn+cJMCoqBHwgqvpY + S0/wvVYKWd6aX7F8CRVY12BCIjk73rfeG4wO3EidZitFJG+iGbw01KFV2N7BuufPBO5R8SRiK0rR + HO9DQcYhcEnz8K7VwqaohhoNML7f2EMnRFIlKeFPGkU78xUx5TiW+0UxMXWybvYovZ4ghCfUU1z4 + R31h+L5Kd2VnYmMwi2rdvRkC2/VEEUBR53VVo8fg9mwnxBM7S+mNaQbQzssLBzfTYXxlhURmpmMi + iTl1Nas0KPYwXgTs99kJrEPfJbB1o5nsvZgxqjyODRwz4Y1xuCsqZu0/MdSXlOJDXTTReNQYgc0G + mVRxT43ODvBcQysTW+wIHyXlvTcw4X3CCsbH4zNtrRIjEGeZRfNSvkVD/u4h0D7dhv6+z64yiALG + g7ojaapC9u13+0995dNr0lm8cSX81oOazvxEbM3CHFqHwx4f9I8d8Xn6LAH/hiJGFOTRrKEwh6+5 + pGjPvVq2dAnpgJCPE8WdWuvr8TrYcG1djxq1Uno0xE4DuSnQqHK+P9OZdyINXiTcU1WTr188NRpA + SvQhUbM46dxEzQjFEN2xtb+bwzIddQFeayT+9mtFL8aFwI3CDHqQy6PH1PvjBA/x60nt4DFE6/JQ + QzkWXws12FuvFlFyBHi75CG2LVMBs1h/aniiukaD81EAq35NOGgL7xqRYC6GZbjYNvykZksPaal4 + fF91OfhQaGANL8dhed7GE7RmppC1nztvWcwrBPvd6YJmL917ZOuAGYAV5EQ2D/u0171pheGcf9CK + blM0tfZRgm3SqOR9NI8DcVjfAJyfHKqnflvN7hpCGPI6Tw+t9QIr4oYVTkJVIjZslYEFxcMGJEkl + InbZ6w8egq357tD0wBBMPz6S1NHA0eVcRGuViRC6EV5x8Ga113nGNIMzlDqyGgPzvv13BrHV9Oij + bD8V2Qx7CY7sssHGMIcDEx73HsRfoBI2i1Ot56zrYGhSgrjdZY2YHvcKnELFxd/1HpYWjivw4ntM + M3OzTynadhpsCt7CyPHv0ZzUNgTHj5/TpEbMmz9SH++cej9gm09aNitHjwAx9O9oSd962sWvzSiV + BwsgTksoG5eTzIGLFV+pfXUPA5O4QQIhr/LY9wfGVisQFWBQypO5fGfVFI/rBjrTPcBO5DVs1OpP + AUcyL0iGKIqYVYWh9MVXfGLY06f3K0f7qxIX2HatNJrbdWng0G0CbD5XpncX/IEw0NUaccH2ODD7 + mUIQeTcZ7fg6qqaMHyXohPHzD5/OhxkRsJXmI1ZXZa0o9d4i/NaPT83xoK/XUXMhmrSFbMOdMsw/ + fG6v3Uy1scMVpZWiyU+0ztSQl5dHw/eph4tYMcL5Vh+t2jRIYHHxiI1oJ4JOYI7462eqZOiUdtXc + nkF9E3Sqq9IwsCx2MnjX5yvW46muxvObKbBXxSONK7vSJ2jONjRmzSL8SEV9xA/VhIkkRAS8H96w + 1PcpA473VDHeNe+K/N5/bKKAcLpj6BTitYCdkjTYfPXSMO0yW4ObS//CZvayBjZ0TQmlLsiIXPld + RL/6AWZMyLDyrDV97EdXk0zNb9CyHDfR1POyD1QMVWpzQzSwDng2fMrmDgGcpfqs3bgc1P2MsZ7D + OJqtvV3I4zU84/uniXX2VtcZ9BH3wP6zOaQrhucS5O+zRZ2bM+k0cA0Tvvn8SA9H/gDWbcvX8J6G + HFbF7BUtwactf/WQZYnLoX5fKw22ihsRiR8yne1HYMPEaQBWQGBX8/P+MOAeCg11UykDZJY3Z5iY + Q4+uqWOkwvZScMDbIAOjA5iGgeDeh57xcKnOobVi4f3dQN4vEQ2c9BORw/pKIHzhN9rY5Sdi+Z0X + Ydmeb9i/bR3wURa8AV6xOxC54/JqBgAIEHRPSl1zOui0uYQ1HOdx8+MrsNAxuQGdPkrEve5Nuvbe + pQBfvKCPr/5eP/o4S9v0eaBKEveA+LrvwvvtUlG3m2JvaWG9QtbeciQcTk/A5P0rgXO6f2OHVq+h + yAq/B189g5XLrmXzPbv58Bw55Ps8F6zdXRb+zTc3rR5IRQ8EHg+xifHySbwl1g0TQuGgYUN3an2U + NtQEL8FSyGcjnnQ29+IMC6zrhB+aGixi2RLI7/YGPpyBMdAksDpgEYFRM4Rvth6yMIYTE0saRHY1 + MKu6hZLIGQsCvker+fDYQ8lu6w/VlzZm882WEqnPTx5hnl2yD85u4c9fkPotflLCULcBeph12CZn + RydfvgHf/YEqa13BUj4+M6zqIaHaJfe9z+vcwp8+J9wtVaL1DoYeDldNpZ4aymzEB7IBRkgA6fjE + YutbGxrYXUKd2sM0pbO0aU1on6hPhn74iu/5icCS5Q6RKooBy2I1hwR2V6wl7dNbPJ3bwNTZIaqO + 9pCu55DV8JFcObIZ+5XNTt/5QFKJgZg48QPLAafsR+22I7A7S+k8Pl4j+OojspPFTzULmcjB42tU + 8RGVd71/a1UNr6+oRRvzLVckNIIz2I1nBck0lbx5oywJjNAZYf+zauAPn/300iGaAm++h5UNK5jb + SPIvaTRbU5RJj6Q/oM0J68N6GuwNJCgOkawEqzfX+5cNY15cqHNctWqKHNDB08MQMDpbnsfO85aT + ZvWxw7/9tu6vibhvxzKgf9bjcn+4UHF4n6z6Ix0YBsMIBdZ42NkrPVuG4ejD0K9Dqu+euTfIE3cG + ennjcDB/jgM7r7YEskBZ0fvmhyl98UYOn+XHQRy+XQBr23cMj/f9ierZRWLL3OUmvKw0QJfw+kiX + 5SGZ8Osv0XsD+HRh+DjLZrys2HlZkff++mcQ5mWGZPQpBtrHxxnOVnynNqdWw5qK801OdkDD6FjQ + dOqadAQWG49Iun2WiHXmHMLbxdvSb/9X0y0/dUCkOJp21/OSsr7gV0jXDNLk9jlG6+4NENiNKiVC + j1SwSJdbBjvutKFGFNjsq39cGO1MA2txoESrs0E12Fy6F/75HbqStw+uDS9Sa06LYdoqSi1/9x+1 + PheVMVrZClCv+ZOwy2ZOWZRXBkRchREI+7ViZYTiHz9SfdrZ3sxfFENm44Z+++lRsZYlCeD0+U6t + YkmGxXbrAmruZcWe79GhwXObQ241CzK/fTHq/RFocCBlTrGMAGvF3dPe82dt/PInHb7PuwFTfqc4 + OPapPtNXbIPEL3yKFrHXGdkfDTloAKTOKVurlYuPREqbwMG4YeOw3BvvBpFe20RCMGbT67JX4Ho6 + Eaot1jqsUhQ3MMXpk2y+zxtT4TFKm7aZENF4wqgjP2agP14x2itbZxjyuyzBZxwhjE+0ZeuWlZ30 + rgKOlHzDD7/1AnqZcKT86s353kAffOjAqA82hTfrsYqgKRj/xvPxYD4FgPTGJltX4sD4UsIVfnZZ + ifb5bFfzZIU9JN5tiw1p0KJpnU0B2kJbE+B7uFqs4LrC7NhuaeBF8zDVO3qCJzu4U8/nt8O0KeTu + p6+RHOnPlGwKX4C/vGa/1tBbWb1qsK73E4KuIeqE3Lz5p48xyvNLumg3r4OX2Qi++Cow4h7HHGaj + OlF0wtVQt45G4Ddf+vqNLqLuul9h6s9Pqu0I0RkXiDYkXrIl/JSP0azVnxL8+Nq3pmCg+WxuwOMz + KEhSnxMbQ+CKEumJRzVsqQPf2kdR0B9VTOZiuqQscH0TbPMkxqga/YqxWvXhZOOUqvdHzwZu2ENp + fV4y6mUbu5pHts7wMTbHP3yy8N+A4J77B2y5sZculXUb/9Rjl7RNSSQ/Rohv95ZasugMw2jtc7BZ + tYzs5lQZOE0QTuCXb1n6VgG74qp14HZxtkjgWVUtt9O7gRvo7b/5yNlj3aH1oSA6V6xEoe8tp8Ig + EuoPBWrHOIsW3HAhBIV8Io10aoYZHxoIB0m8Y91R+nQtnciWo+Qu4r6W23SV/VaB1uHm4sc3H5sP + 5pOTrOerxQFQABs2/GSAr1/Dpoe9lEr5iYNizd5kd9jZEalvsIPfvJDw2/4zzLtDTqCyvjVqRrns + sWGzrUHUriJ6zS1Kp4ejrXAUIoyt7XEC/UXqczh57ojeQnaKFmWxIDy9NmfqxosD5vNSGLCp5hs2 + bqmS7qqmtqEvtjya7f2LzeCxKvJP7zmHfKz+5D2lu3lQt4yHYUROdYJvPjv+wUvyLqYE7lvZIez8 + 9r1VewkC/JjYRfL9SfRFcoYbdPP+je3VuDJCSMbBl/x+kOG0Dt4aC8QGJj6eqfuwuWjdsr6HPCcz + jDSeALJlfQfTnAzUmg7zsFy5isAuEc5U4bfHiGRegKR3VRyoqc12RGHjNZBahkk2eX6JaFBcbMja + JEdC2zY6u/JyDMVHZOFDc5sqcqmPifzLR4zjRR2E17NG4JFcOMQ/T4gNRrdZJW7CGtlv2yZdDw2M + gaxnGuGduGQL2ys9pJE3E3h48/o6n60cKP78oJZx34K5avSzHHAniIT1EVWrYC0J9Nbl+O987pfv + ZBjq2Jo4Aywm0DYwAMv9h3/V+IpfAoxgd6GHfhdW7NDoBTgMN0jtj7kwcrwOLtAWLyOPca8Btn8h + EzwOc4fN4iik4xF1K7z1akY17lEOsyqtNdzudgpF0LCq73qcwbrpO4w+91b/7o8afPEC8Zq3ZdRx + XHFvOP6DeupF05lVWj68EFehimoQ8Cdvha1p/fDf+6PvV3cNqRlzk0c9IudQvBKBOnvFBeN0nxIA + XeIjTmhyfT5YnQR//PDrT07e27X01fNf/Ms8Jl+r8I9f9rtzkk48b5wlphcmtTe3AEhqYppwb28Q + eXngkVKiyzbYOsX+u/5Pxm5MM0HT3G/Y9slVX/NV4WBiaD21T4M2LLV+TsBW1a+kEbmNTi/Eb+A3 + 3yKNW1x1FqtlBjMQ77HHPjZgMMmLn98iixZZ0eLvQwkeBz5Hb79FlTA7Qwb2vZjg4HTH6dy2RvHz + ixgZehPR4LK3QXQQRnSvsFetoIHJn3zH5Qf477wfXYozDsfTTie8YvegiF8vqo95xWbOR6EUjHP+ + p9+Zm157+M2bcHBtm2HW6lfx4wMCUjVj7MUbGYyflkUNd38CX39W/PQqNgUYDxNBtwYK53CLv/1f + ffX1CjXQulQTtnswajcug1Z7jqkOzkd9Nt8lkb55CQ1GF1Vrcn+X8MSkAmMUGmDBYYXkoYMBVp+X + Vf94OtwAo/xY37xQjXj9bMWgIDHG6ubcDaQDAMLhbMxUUZmuE/2aCNI8+AvGdDlVHD/cXbiOJCO7 + qrNS5mvRGXYN2Pwbr9bQrWHwvOfYZmURDddo54OttB7J/uZM3neecQZke7z++CRaCTlzMH/HFjJv + Wl1NBbJD8IoZR1VU3j2KrRsHF/HFCKcOacVu7UOB59jYEt6wh4FtIwLB2x9T+usn7mBehZ+fQEsg + fvQ/fHW1OPnrD9+ekOhZDgNdr7HhW260s6rwBOx9yX/5m09pFaYGfCTdgSrfPGVtu9wFwbjmRNJQ + 6X35xYXXvXHB5xEzNnqSQwCNDyp1hE+RjpckgBDGTKBKzgeMbR13s7+oq0k9fXdP2aHxCni/kIo6 + aSJWS0jeyj50ui395qdsXleFQP0Djth+vEJvjZEd/5mHAG/0U25zdXP409Nofr7BEngTARvk6tj2 + 3jqb/ZI28Ps+VFx2LZipOJ9gHwIfW1DR/sx3ZF7iLvRSF03ajOvdBaTefci2McRoiVhfwOvevOBA + ij0262MSgqO89NRx2+ew6GccA+8ouFSLgyJa3PlqgvBskD/zheXrzyX+rIx/5gtdbQR/9BzWw/nu + jec3UH7fk/qnCuhEstdG/s5LfvkBm/mLbQJO2dlUHT6dN2eXroc1Ss7UHXVTpwk9zLB/bCU0SLQd + xjXU6l/eSf2lXKrxZq83Ob/KFtrMzwPgzHdP4FU5FwjwrzmdQjJpgD8eUmyUepwuBVJOstJgiyor + LofdZb/voCVcj1j1gJx+8z5NynenCIFleQ2DcBwb8PVf1Pvm/eNhNslvPoLV95gNM7C6DaR8ElBU + S7VO5jPOwLmHGVZkzoiWGqvZrx6sLAVJv/MqDlbKohH5fIzB0igfG26QrdNLplbRPFEhBz88BXfx + w1YV9yYsicAToG5P+vjbj++i5r7zPNGb7PhYy8W6z8gbI/dPXgRvC+fRZ6ca+jolWw1883eypVoe + sfCd9NA5yU+Kglmpdms4JHuY9Tfsm5t7Shdb6qHjPVT6+FQr+Pz8JRC/XfrF/z/5tRQfQ6ojpFb8 + +i4FaGbdGStfPOieaG8CDbxdHIz7Evz0mSRcUUjKbaV6/OtMN8A1/N2Xz0qdgmXu5EUxMLa3Vum9 + el5GEu3UAPvnwgQfUXI4iAsUY2Pc2Pq6As+EQYsC1GBCopl/jsYvv6L27IfVgtdPDDUFXbDZiAsb + /WrugLwPG2yUaz9MWiWOf/ZLt3fjgb2FQ7c/SMTG7lx+9JXtzBl+uOqI5oExQDh37OHxfFGJwPwW + LJJTJbJb1CvNv/7nm1f4MC9vwne+W6bsFEvn3++xkfFbj5GXAGFwCVUaypyRLpvNNv/x30//DMQM + qxMYWYfxk2qb9Dc/A48zq8jwzW8moRk1uGseOfZdNkbjPlZPoNY3DckyKg5UUu4a5DvJwb5qnjy2 + e2sJ9O7nFqei7Xtr7O9CuT5tX188GdP5AMD6q5ca7XGqlnuj3+CnbA/Y2EPire/xlIPUX59fPFfS + OSuMDl7U2aTaN0+fc0uJ//SPfidbfX0So4T31cxxYHJBtZj0Iv6Zz6pqzfTFS0wNtmf6RGJJ22hh + e7uHhoMeSCpyIxX6q1uDZ9QhwuLJGPhhuCN4d+IOSb2ZpH2ZttJPL1KsDbpHIziKv/pR/XZ4nfm3 + MYGynmvUu4sOWKwd6n/4SP0tIX/4EP79OxXwn//466//8zth0HSP/P09GDDly/TP/zoq8M/kkfyT + 44R/UuHPSQQyJkX+97/+fQjh78/QNZ/p/05dnbfj3//6S/xz2uDvqZuS93+7/I/vu/7zH/8PAAD/ + /wMAVLQZa+EgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be211ab1afd169e-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:55:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '172' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 5b810c395d48248e473bc4e53de0eafa + status: + code: 200 + message: OK +- request: + body: '{"input": ["Sample text 1"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x + ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0 + Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071 + gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq + P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl + ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0 + HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN + SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5 + yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M + G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf + +fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI + dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv + pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA + S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF + o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx + WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0 + jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt + fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp + 3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG + MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB + /v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0 + DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK + fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps + HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz + mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd + PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl + zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN + nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7 + 6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW + tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep + aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj + 7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB + jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu + KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM + /uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7 + zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO + fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB + vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k + VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD + GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U + fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU + p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai + cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8 + Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP + ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi + MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf + uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg + CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC + tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX + baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ + qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ + ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf + zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq + pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89 + nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB + 4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr + Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/ + l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt + NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f + bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8 + jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl + 74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK + 1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY + mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy + rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn + eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn + IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq + uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71 + j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst + 6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K + H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19 + /awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl + s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f + WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK + mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ + 7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk + tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju + TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO + urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M + dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C + b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G + 5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF + bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f + ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc + 614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e + mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86 + iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ + G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR + aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO + mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm + AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M + H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr + T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz + iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4 + 5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj + l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81 + NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k + Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf + D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii + 7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh + JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3 + 01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy + 4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H + BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T + bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n + 4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+ + 6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ + QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2 + 4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be211ac8cdd169e-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:55:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '114' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - d39b785f954e1f1cc64746735c44b522 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml new file mode 100644 index 00000000..bb29c4d9 --- /dev/null +++ b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml @@ -0,0 +1,505 @@ +interactions: +- request: + body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": Result: + None Human Feedback: Command Result: Important Information. "], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '207' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciIjVZd5xEjlUIHrDT6QAqCiJyqAJqZ3/3jv53dnff + eIEkj0/VrDnGb8z6j3/99dffTVbe8uHvf/76+/Xsh7//x/fZNR3Sv//563/+66+//vrrP36f/9+b + tzq7Xa/Pd/F7/ffl8329TX//85fw30/+70v//PX3XQk9tt8IacmL0+aAZtXYYkmRk3i0zh8b5pFi + rLzv0M2o1HUIhpPLtvVdK+e2udog7hYz05U79abP/jiDvFzb9HMyBc7pcmfBlXkJ0Vv87niwkiU4 + sljB4kI/loM4RSoURpsz1eO6N+lmkEMxVCa7P9Ys7oPZj5AyWQMhDrfQQB6Ers7xVmJeLXv8Izjl + E26iPzMzyWyP98HTVF5ayfH4Kp98xAsYAeWwIWatnrJ5EK8YqgWVCZG1jo/vYRChR2nAgtXpbUx7 + bwyVLb+PZJsJy4zp86VBhbH3yfaYXbJpV8Qq7Be7kezyzs9mGaQczmTDSGAeC29azBYFv241gsW8 + j7m4fkYwlVHFvLVUGO0qnhMIzdbCRbQ5Zf0wfQC87Z7RJc+vXT/OU6Gc7oqFY/DWZUM9TwVnf13h + 9TVJ47Gzb7dNhIWQaRtB7vpYtBOIM6nFoF1xN6V+BYi8n0eiHac3mjr5ESmhZlH83jQu7ze8xqhC + 3pO4Ual6DKQ+gfV+ZeJNuhb5TMkiRNdOCZnxzE5ojjenBgYtw8x5OrHXH/RxAXFWplh5+G45Tout + DmtPNIidLFjZbxG2ACo9xhJ6196YNCsdxN3ryez+2Br8GIcmNEWUMHN3XnZzliQiKMa7x+Mabw2q + nHwTmLdoWEDNlk8qoQmUu3NDRzWLvPnOnRQyCgHx8qPCRznYhZKUSFdiBo7H5937LYEh7q7EFgXT + m7faKYFMck50SXWtm6ISQpBjsyFX97nwhnIvNbB5zoxCkEt8ekmujVbCJf1Tn/N9OUkomSKd7DhE + 3TiMJsjyqVWJk5fE43Oo+cru3chk+47Kcl7blxzMMDkx6+IXfDxgM4d4teVE448mG2+F3YC07Y9k + L6t6KbJ7M26uy6Ihunpp0LRUD5HC9ssPcb19iWb/eirQK2lKLN7mCs3f9UG4EHzmvjSz46K4jSBR + wWFXE2TjI702Jlpp0YdsTweW9fXxISpSb77ILSjKbl5bkIOi2SZ9HR9DPNloryo+7CeiOfWqm9hT + T5fFSajYSdPNbLx91q68iS53Ym4Vo5wUT8GgHE4+2e5EF/F3ieFXfwxb+jL+7Qe6zscHIw/3kQ1a + 76RwiCqBLqJg330eGdTwnE4Ncbj8yfhhq1eg6lbCzFoV4748pBGIWeGySHnpxqxrnYDSlvXMX+/X + JTdHhuFRPijTrkZlcMEsIphk3SQnxIxsbj+yC3f/reOxdgHRxVOo5cSYNPZnv7uNGcLBW8dMy9e5 + N3vSB2B/e56Zvz57HQOpSsGqng7D7Zkan6t/fsLFFVOmfXQcN3n3qSB/7Xd4JV9Pxpp/nlg2NvGL + Hr/rzbREakGzsz2xz4mXTdXVdFHa3XxmJxIqJ8uS1d//R9Q2HPm8302t4vs0Ia4nXflILrMIV25p + 9IMXRTcDk5+yD/FE9CllnO2J9YTrEUKSCS8az7c6keF4yrdUejkaEo+HvAdotZjpneZ006JGGLgh + nyjSu8n7ra9c53ZF8OWy4TQrqxMsD/Ibr/Sw5vNLFUz0eFgW0/TikfXO843hDZnF9LW141O38UN0 + /5xLojXHIm6bVWyhzcZeUnGhr0oWZWsVjv11y/RLGGd8UUQLNG5FGUtdzhG7Hh8jpNNOYf5BexvM + LBf9r17oWE5iyTxRPUCqOQcq+Zt9NkjnsoaG70JiNDtm0FUsJyATZ8lU8ci8T3V72Irz0gKmR6bn + cZfjA6hUkJielK+sv8ahiyS9CEh+0N7eVMeqD/tHX7DcOlUdl+6hAOMMHY4bQfG49nrY6Lt/zJEv + RjdLjzmHcFhhRoRsx+dV0UpIafucuf7a9sa8e1SKSw5buhzJEk2fGw4Ryt8fhi+vB5/PSoGVJfYs + gi39Hs9jK0RwiMAk3lNbIRoMmg6Nzs90dcJZPMessZFjRz1z70SKB+IWVKFZw4iDpb6bhW0v/eqJ + WEXieKJFShmtvZwTo9KQ90n9agEf11fxymQPj3/rE+Y4CIiWu9jg76oE5RwLCYkbeW8MO1oUgIoi + Yno0XYzxLvkNIor8YtqjvXnczy8SqOYmJLt6cYtnXSsFVO0hJffTKzb6d6EcYP7AQJKXyNAc9qiW + o6XrMH0cNETvy0lW1KwdiX6uMm80S9cE+gpstm9In7HjHlmoGxYlM0XP7phZihRexzwjfj9fu0nd + GylsjXKHEd2cDL56r0fYHQSdBfml6egt8dK1OhsaUWsX+DQw+4Q+19Qm20Z681G+LEcYL4ZOPCyW + WfMuVgd4yM2NELcQ+Bi84hk2UXYnQVZEcXPWuyccfFxi/j74nAYt0cFf3jdEy5XR+9hWVSnX/lDQ + 8eZF3exfb09UvZcpPurKAY3zi9eQdiaw7WZsjAElyEWiFHWMUP3RDd0WBKjsRCMJe+4QOxhDtVnc + /CuJFlFofKrm3MBT/dyJ/YESTRv2roGvtR0VNPfR/c4vcqqPwsxyMI2RVb0O7OhMxHscdrwXgtYG + KXEUYhXR0eAD5xUapvMaC9JNRr1qQQHn+hOS3ddPTL/+Vu6oRDfx9CxnJwpvymPnXolDD6+YYqs1 + Qbs/Nbbb4iufPtE8y/aj9DGKP694jEU7Re05Sonz1j9GR66fG+TocyYB5FlW3Qq1hVLSJ+LHes6n + 73psvv2L3aqpNXrFME5wCVcZXeqZWDK6Supff6BFUSWch40AcKlm4+c3u24SCxs2SFkztzz2MYtF + RYf5NL4YEY4xYpf12lbOBDHiPMnbG8JK69E5bEP261fUuZYLxfG2FTPXXVKOWygtxdt4Kl1994t2 + 4rP5+QWm3ry5nEmc1vCpX3sWOKgy5tSmB4Ty14cF9YzLyYq2TzSr6ZFoprov5zVqZAT+5sn8hB4N + plgbADXbF3SeihLxxlUK2Y/YheyEVYNGYZlX0O0Cg93UxojXsN4lkOJTxrbN9oU+/f0oopWQpVhW + toPRf1xbgvb0kJnaORYa8tV4g5W5fTDbX25jQVjmNYgVi+n01Td6S4wUZK7tWWx2Yta35UMC7Fod + hfNqgUYnGSmUiWVTJeVmRpdwrJG7jCjD1dR6f/z+7dmlTD9XyJvWfmJBVy4r5ji+0HH/cpegvG1t + Kn79AttvQ105Facz0e2EG93HjwrYqSjF3SoZMz7nGyp7eXonznITd7PixLViUxrj6WpUHrXyQwKr + KYgYeShl3F/33JW/vECnr/7+9gvEvYfw4ls/LBCuEnLssCdGExjGdPaOKZpCKyeutLwYbMs/GO5k + cvFkyEo23iWzhQoVM1ZWp50xW/khRetjmtMuPPFyymcxR6HZWMyhaBnTtb2/wcJstvTIvQ0a64gJ + sI78AyPLweCrRer2P/9F58vpnU3v/oOBtPTKXE9S+Pxc9jny7PWO8jeWeOuufYAyfYzMaZ6bbGRV + pSLJXK6ofHu2cS82hb0Z2fhkd1nrEL/aUwThbkd/fsUYV2nayqupen77idYNxG0oJIIls2C5enhD + t7uIqH56L+afVzc+mEMIimEVJsmrzS6eh0/Vg0Q3129/WHlcjiJJxuVewMq7M7u16lg2vGE8kUB2 + HGO6C1iFHukK2ebqu+sd3/YhXokx0Q/na8ytyzzLS1yccBd/XlmfRmoF3DousBy0rdHrFjPRx8Uq + +fHaMAzd6Y9+kbR7G2yslfaP3uKiO3lN+zRlEG99RIVaFTMmm0ECWE4qdupuijHkSwrwWqYGTSsU + dnytK4L89StYLvS0m8iD9OiwOR2I+6JzOQme+lQe4bGm8vbloTHdJia869T6Lz1TXKmCquIR3iw3 + cTm1gzQij5EzHheHTTYuxeCEzmETMv27v199bNHjWgyUZ4R2zbc+IbxeF3h6jLNBrb0qwbc/EL/z + N9noznuq+J/bhu7yV5LNi8e9ApX5NjlmmhJPZ//rZxQ9JNbL84zZMGwBDQsJM+9IPuU0yjgBZ7fJ + 8VIWAj4UhlRDnPRP/JEvZTnexXeL8LUxWJaiRzmFEPTy5k5O5NufumkzPyo4x3f7x3OcBuLyCev9 + 2mT+LQ2MSeudBD0HTljQ3Cy+0u6JhNwjPRHiqBUffnyNCkcnPz/cB7U0A7bKGCvLz8WYbdMF0Hvi + Md9nLZ8OXiugL6/h2esqNF24J0H3cHssn0+6sVpVj6eSDOcL0wZUZVTFnxmux7dKjEcjoBHl0whL + 7FhEn3cKmtnNm5H66CZi7s73kkfN+wm//rTLtrY3qflNQubndSPOpta68dcf0WWeKLBEy6ZF6lKY + nuiAl6G1MXrUBCESKleit1jlJY3iwIZWlV367hQ75mZqY7gm3P7jrwTHt/GfftHJ20U2xa99KL86 + sWM757BHk3/oW7SpDzuSFO4aNdfw5qJqcW6JecGtMe/ajwTgDCldCYplTI/rpYb5lB2J70HT9S+w + Z8hvskiMcDx0k+9cXFiidocXEruVX162QEObiKVBdfovXm0PVkTsgxd0Y+egGxQnsWJ29HIy/nj6 + ItyXAyfq64r51Jln8c96BlltxeNL7kZ0ZHsFiy9p4w3Y8egfvkZGsvRaqbjegK7qhO3cHee9n3Qh + LLtwTTCCPBvv/eQqoFQdFW67wpu+5xlwjkq2C2tsTJ15F0Am0p3pU1Hy5hqHtvLjqa+/K8fstrHg + y6+YNzvi8bC9gDS0Sv3l9cqbBJAXYF3HhumLaPSolkUR6FHL6QPfu24owocJK5pcqCiKQ8aVbQ7o + 2p8K4ntOXQ4HdysCf+g7hiW76vicHXo5j3JCiCwEiF+o3cLxEiokcHRsTHhHeziJ9M2cpbUvx9d2 + fUJmmJ4YOfZXg9/lW4ic/X3FjOOnLefj4UDRr7/RH08HXaXCXrqpzJrcY8zLRJWU2/26Z66yKL2f + Psrbs3n/w4PC7Wq1m+2wu9P38qBz3gBbwHqxEOgo0VXc9KcOYBlwkcpfvpix9bSg3B0b8tX3jKMr + TUEZpIrF3/PySS6tBJ/NdGE7muvdKrtcc5Bm0SJWeY+QsEqUBkIhuBOKALL2x4dfvSee/lI82gRn + Wz7Uro8Xl9cDMfUcN2irHpaM6I+2Y8kjseH7e9nXX6O+6k8hfPmESsmCdbOVrlMYut5i+LxxOkFq + UooO96Sjs53EHvd8lqKvPpIjWR09LrbTDHM/vbBcHv2sPR43PdK7NWLagle/PCOEQ237NFtOYsYI + iRcQprGIPyfzwL95kgrokj2wcDgK3tOPG1X+5hEYoiNGfXmIQpjuZ4QfsvrsGPWWLVwvdsIMudtl + nXG2MFzcu4tlsbe4wN9hpXS7OsbfftB1YjuN8M1bmC+5AZpT1ziAcpqEP35zbsTRgtHQdaKZnHcf + KscCNBd5R7/+zBtBUGbEBUyo3D4O5eAhr5apNBf4/uXDyjr5Ljw1lbHwVepcyG4bE4y8NAm+JQYX + O7Ft5T7Y+Gwn6DXvjdOlRtuzdWfax9Yz0Ql5/uM33LNlFo/e0vXRfJpfTBflJepfLIxgp3xkhl+V + b8xDp/WQN8L9myacDK4vXRe+eS8J3q+w+/JKpQjvo8rMeSuUvE+jGibf41S2ogsaut1egK3gh3SM + lm32cg6yBd98FJe7RWN8fnmZ9jzJFK2lwuvly3pGfh1NLHjf824mcVTDj7/T3RahYdfRAl2qhBNr + Cj8ZX+V9BNJ8E5mavGhHl5Nuoud7JMz4vJfGZFmz/uN9XH7eS29+rYoC6tdxoD89WX/zN8DiymdY + PEk/v2jD+1Nhsv/YejxbpJPRKZ8XmOfI+uPf0Fe/yI83RUBYRcdPE7DcA7ucpItao0EolxRu6WCM + Yd0dYNuGb/whHe3o2U1mKPjiSAWpwkhYpWmDog40Yi1sp2PBSpZBJvKdKo+l36F31QGMF00nVn/d + Zf18piYkQu4yS4UxY57/TpEyyNU3v3nw1omSm7xrdJc57rbtJsGzn0jdhgUjsjAgqmpTBNg1OypL + UWeMn2P+zTefNVHPUpFNzkE2YSG/KCG8eZXsl6/tUSjRaRUUf/In5DbyjRlNUHrzOiILFN+GJ/GD + Z8/n5KyFymUNHtvSW8fnfl/0yNjsX0w/KZqx+uotfOrFg/iKUfIxyf0n0pdZ/Du/5eA/ri5aHUSN + qBI9ZnMwOPqPf5gu9b3H/RQk+R4pMdOtrcon7Hg92tyDE14+FCMWa6LVEPQLmX55lU/WbaHDdfX4 + YIgyHc14Mz1hfyvOFBRxi9bffgXaxkp/eXg5XPWHuqlXvcKsi9Xz8ZuvozXRPTxf3GVMm+DuguDW + d7JLl4IxDEN5gO95Yv6t0IyVtwMXOlNHxIoei5KmXnOAb35OzC8/D1/9Rs1gEbrM914sLD4HAe1c + +cJ29WIRV6/t+gBpsPTIL7+mXIIDyB/RpihrWMn8FGRkuUFInKYJstE6mS5qVa+lr5w7ZVv4z5uy + fLM73dSwMr5+I1LGPHD/5MnzBSkjSgb6IW4fq0iYXKUCFh02TJ2egzcH4rqQj7o0EB8UwoVjvl+g + 5lKsma+ME591AUXw/f14JZetJwKbn4oQTw1Rv/OSD1YkG4XpXqSif1A7xvCmAeNyAlqoD8ujjVo3 + KBE385en1Yw3CxNQOKVvXM+GlwnaQ36Cpws73ErLi9f/ztuX13HBwEajn9ohZHQR0IXEFmX/2lgj + WDvDZsHyszF6k8kV2JOc47WTvvisemxGfJ2cyVZwTW/lxFcLnoFYE7X/rL2Z3ZsZoki9kMP9AR73 + dEmGItV7upBWc0k39LmAe2saGLpoyPrLWn9C2kQmCUxH8ppfXlPoG2A//8BcoTyBv7xu6Fw1Rrn+ + 5ZPK2TGJ7V00r9dHVVf0WDeIPh0y9Du/MBwWNds6T8NYBezZoN/8CFt2nM1zMZ4UHG57Yk8Iexy1 + Jxm+/p6ch0/RTbI7NNDvt8dvnmWX/WklLxAuO5M5wtCVr2sgPH/zJubvjKj8w5e/+YHbBz4fOzRI + yLnF/Zffq4xKxTVH9EVs5myKoRujyyuBSNtv6NItvmTx9cP68d5hsd/fs/HLm6i/RTn7+ec1w5sW + vvv5Jz8Qg3anwq8eH6b16ubOlDDKnu6JBbzFfP7p93d+hqvicIv5HDoY9jJqiB7sHe+Pn/7qMwuq + aIFo6hUH2I6txLYmexjjRmx8pGbNSKxg9+ymqrs0YDrN6jdfMfqs9SvoA+QzYzPv48mDpoafP8O3 + pcJ59nApfOclzE+Ej8FB1tIfj2B00r1y7kOjV65zP9FZuVPj689k5ayMKju8Yjubn0K4AGSkD2bf + 2nc558r2pJDDKSDubos4v9qbEH3zTKJ2Ts05XYW18u3/vzwYUVGqZbCuc0Ms9CHxWO4eM9Tb8cDw + 2pHjsXP4DR1P1oquY96h/i6yBl5z5ZPd44YzXp5KCYTWkegCUN1NXz5C7rx9M2vL5njce3cJnWMx + IVZA83ICPtm//PpP/Q4uzUwQ3n1IjFiZ42YLnQVjX6cYeq8s+WQkPZCg7pndshdiYXtZyMFlnqmw + O9+733xMth4njWkrLSlHJ5F6UC4K4I9uFRm/M3dEW34diaEnpiGuUSHBd/5K7JcR8Dk59hEs9/2d + 5f2WlUMuZDc4QJEzA1/1uDNn3//l5Rg1deUNVvwskIMuV6KfR8MTv/O8zd47ELbLuz4bFzftpIi3 + o47F7aqOx3E6POXvvIqEr3PK2+SZj3CyUkoC93nz6LQPRDiVxZ0uLpcLGpf6SoIv/9P5ez7nxflh + wdBRi5k3ARv0PlUp1PYiJ1vnWXp85LWtvEl8xHM0Xbyh2woC2r9ZQZVVGHvz2U1GtFwIhHkdf/DB + 3pIa+gZvMcu9dTYt1TyEn17+8qEJK5K7iZ8R+tPfuKfFMvz0fOvkzODtXKnyenkyv37hU/YfV5WV + qKum7zyDGONGPc1IuSyBGTmq0Ri/Zwn2Uq4SbZneuj96uj4aPp3PY+kxK7mOaDhA/csb4vG5yTHo + fZ3he77vYr5V7hSybNyy++l+99jFfPfIyw3CtHfuev2cFpby9+9WwH/+66+//tfvhkHdXG+v78WA + 4TYN//7vqwL/Tq/pvwVB/DcT/9xEoH1a3P7+578uIfz96Zr6M/zvoalu7/7vf/4SlT/XDf4emiF9 + /b/P//X9a//5r/8DAAD//wMAaBDGhuMgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf024783c8096de-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 28 Apr 2023 14:54:47 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '325' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - f59ce3c6f76fb37599125046aec3d2bb + status: + code: 200 + message: OK +- request: + body: '{"input": ["Important Information"], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '100' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciCjVZdwjI2SoEj51OR0QREDlWAbWzv3sH/zu7u2+e + RCB5lJo15viNWf/xr7/++ruK8+ej//ufv/7+ZF3/9/+YryX3/v73P3/9z3/99ddff/3H7+//9+Sz + jJ9Jkn3T3+O/m9k3eY5///OX8N9X/u9D//z1N7ldGbNR1kfT59Co6Mw2K/aiVIi5YisGtOPUMVc/ + L9re624DJJoVM7PigS5k25cPVXN7MDsSyvbbeMOAFt0tZSRIHC4+lqELC7w4EyLkQtT7lXUGOVv4 + xP6aWj52jb5AVWAsmZY3H0Sr5pvJ3tn4sEc2Bfm0J7cMHo67IOq27vK+rnMT1MzcMUfRtvqg6h8N + neTdjm3dzTmezPutAtE+a8zpTl0+tXmwQGccXom7e9hxzz/GhBJtFxP7GR/ygdlYle8eUunickmi + US+7DkY/14n9El2PVpWkwUtRLlR+oi8atsc7hr47fYjrGEU+7dKwBOXerPD4dMN4bJMaQ4iMB9mb + m1gf40wI0CpbypSHFxSxaJpMFGYhJkTCu0jAbizA/dgTsn1POGdT4z7lNQ1Vtg9jFQ1ws314R+iM + uTUE8/pkmpwddJPpWvlup6phKZhH+0FcY0v4dE2eMvpe7IxoFwYxvSZnGazE3ODNEK50Zp8WGjpM + tkp2+15DvEpQAXW9jZkG6UYf3ru6QivPD/ellBr5NGy3AuyJR+jXMuS2t95+Awd4Jsw5Xl95Hwbd + E+KbOhDte3t443FRl0jp+RsPxKxaOhYhheubMyqHkhEJweK2kXUVUmYa/a4dJyTL0ETai5DqHSD+ + 3ZtX2DzXIj6v73c+JVluAnMblxk1nWJaDFYJJ1LdmOYPe1R56duACR0CYl2FQqeNaptw2eYRMQPm + eP3tET6gv9CG+dpRi1dppT7A/cCFHCIvb/ssUUxwk8/AtmdZ4s1d+GwA1+Gb7EdRjrg8ltqmzuQD + wevzHo2raAuKAlHI3FAq4u69rBr0FT4dHnl94PxjdqUUjvsvs7SQ5DxwVxu0DvYbgje3jLPLUung + IUU+cfinQM3xbT/lOKlbtr8GKh+Cbz7B5p0hsideFY+Lz2GA18dzsNBFEh9Ph/1Rvt+vI9ubG+SN + py96wuNSMaa3LEV0HW4GWJ7vBXFegeDxvD4WcAl0g+gtU9E0NFEF922ZM+sqGLpw3SkplKtsR0hr + brzWcqJwabx9YNawafVRJ4tSlkf0wTA9gmgKe/cI3mPPiLVqRL1TFqSAOy0NujwsIZqu1DeQpNgx + c8rUb4fruaGwWBFGl1k3oMnU8xT1eBMTs8F2PqRPHKCPVn6Y+0jktutWzhXOn+LK7Msj493KIRNc + hQdnvtMeeZ/trk+IPm+baFKwzId8lYToGB835IyzT9SwvU7RVf4qxDT6Tz6tjyyA62qpsX3XeS23 + nChQxhe2cO+Xb70+HXZH8PNWwZNK3Zhd3UxFn1oumJExveW7zgvRtlXfzPhKYTRsW/sMZ+E90pW1 + 0bjItxlF5095pfLJtHU+7y/42IwRw1/e42F5OjcgnO4ZbSrrHo/nnf6EQBGfeFiIIR9MbROgJk1q + slOrJh8DfnyApDtXKms+4z0JvALlW+dIPC++tUMtpCkI09Wg/C2/9eElr46gYW+PJ2uRtVO0LFXI + /exKM3v34GyuX1gsjQLTy0WJu82zCmCR7XSCfdXzhn5bYPCmbUpX59eej6iRRDAcGfDiIUhtt0mf + A0Juz9m2GpOIBmWVwZCWDtlnuzQfF/ryjnAgIaY9NIcPr+pWwS29Ueb6XEYsV90BZVp+Yq7mE0S1 + 10gRvS8LZjfJSp8KczVA0Dp3grvnLZ5ulWfDnRYG0ZWTnHfiDQAVOpTMO6EmZ4m6TNGvHsz1XUYs + pdCgyNVXmN3Duh2wRAtA7aaiIB0/Oj0HYMPwuK+IJpgD4mW9esDyYZjsmLG8HdtLGkKgr2MMpn5t + xzJ8GMgf6hfzkjDxBmEfYpg/01zBaV7fuF0isxovTE9I4E3L1klhlaUhUW1pp493Kroo4EqNX30U + o+5rtnd4UKDEw+2kj62JTEi8RUB52fke1z/1A71Xpkds32/y7iahAlVFGLJ9/Yh03l6jFH56uniS + UJ/S1beCp4QxFazs6fGuWWOE5PsNL16NO/+eKgD11j/xNymZ3n3y8AHlVz0yw8qe+mSVcIU4aVvi + qcY3pqbeZnK08X22FyWid3EvdZB98I3tGrTJ668f2UjvF1ti9QHmfBHVRyBJoeH+cPX1FV4+j+iz + uZ7I+fY+elz/vgB9CqIxXVvsou7LFAzj6RRieLnHaBTYqP72G/Of1qsdy9r1kYmeMZ7ls6XW8drB + oR5vVHqv4pYdO8mWvH14ZqYXdmioE/kK1/fI6DrnUzusrFsg2zFdz/p11YfTQxvkJn3VFJ1Q0/LI + SQPIHleXaIj6fJA/moEGJ3DwtOdZPqDX1lXCJbsQkrNPTvfkkKIa0RcxxaDTh8RzOoDUd9lW6Ew0 + ruqNAZ0aJMSmuzHm46BfYdtlAzF0/uX9dHyVUrnFBc7rA8/H5CzfwXkqI/Eno8trIiVXpFaEEGsn + t5wNn4uNziE6EsLTZVzb7bBBK0k8UGm01jnjhZNBv9RqvGg76jVHqTeRVxRnZl6OQzRY33UDy1Z/ + 4Y15cVv6tc4VqjhpmWc/cT6cl3EFxeqr4+ztb9rxfbgfkbdmGtk367alk5U3EIXXB934vpsPheqU + yHiahBjudM65p8gYVT6EFJDURZ3TNQCU2Wu2N8ftXE/CHbbSUsZrU0lbdqpoJ5dVllPhK4Vxj89+ + hrZvp6PA1jbns/+BJlW3xAlqJxqWt7cJOC+WeOEqojct1t4d1Bt7/tH7qVdTgM2VdlSe/dv49WMb + 9B62dBLhzbueqU9Y7y2PkfX23Q58cH2lDByF6D0U+a/e0T3fq4wU0rGd0sTTYMdEmTn9K47YLj6b + 8mN/PJD7YyeiuloVsLkl0ZauBTvPh1d1qFB2phVT0UuN+HePr+hCM4E5/GPwLhGiFOonVYjxsr7R + oLdvX7m+jxad3v4mn/tHAGa3aDCkeyeiZbGnyLmKXzzaEfM+psI7cEwnZ9aGJPm0XVwCOPjjmu0l + M+TjSjsOKH6LSwxJ9eZjZd59cBfTB1eWlHjTJU7vSno6XJiviwKf0lxfoHOzj+b9zHReX+QKXbbv + CI+hjdEgHx8bqEVVISTJNI9P6qaBfXgAph/2fitW1agpg15+mUPliI+R0RRyAsaT2fmzjH96qFhv + JtBi7dF41LpPhdh74RKzrd9e9+snd7btiJZ9R31Q77ErF6410PuVFjE/ZZ8JnWRrR1SqFdEwaHsT + tEezoAthLFu+aX0fihRsYteRGfVDkcsg5HKEkS1O0RTTboH4JzwSp7++o2nYOiJszG2Fryx1+MwD + qryVFJnsrsc9GhVNlSH+pm8Ky3fV8gPFE5rvM9WvUNRl24sP6pT3mFlqn3f4vQXw72uKFztbjBls + iAiyaG6ouOwrbzpmIvzhGesyMn3qzHqD1q+BMM31ypiPplSg5pAGLBRhy0U7MGf9ubt4LfcuYuHh + 0iHZ3wlMpc4zoq8gDSAdhoY46/1bH/PmIsA9JypdJvXD49HDA7RNO4Ml9qLg43HxLiEZhy27D1M7 + r8cdg9FvK6I7ytHjr10mgOxVPjvoktUO8/5Avief6JqtRH26GlMA8/rhNM94PK4t20TblBrMmrQb + mkZkC+j9JJyWSFvrU4JIB7MeES0jfTyM4RT+4QkhfrUehRt6gG29C+JH9oD6IqvucBztGp/UoORs + We5UkGvrzFSFL3R68BjIR1H08MB97nWF6abyn/tXb+v14qevULuWRDqeZQl1zDZVGFnL2N5CjHcy + 2vigLaIY97pTIOavxxDiznkQLwkVr4eUPtDiMnyIPrYyZ61Fz2iuJ7wUvKBdCUxSYeYb4kSlEFXF + lWK0l+yImZ/8ko/CzZWlLXljTNd0bMf29aKotY4q274n2vawfpSA3oXHduYKe9PJuN3hoqgfsosP + G0TjiyhCPa4bog+OHXNlCO7KOJ0FukFu3fJFtwcYz+s3HqtGj1Z3KtoAKXbxcB+qeHgv0wp++rmQ + F67O0/NVhZkPiHU/i+10U5mIlkqXE3xwJU4TZHUw+x+m7z9DNLyvuyNcveUCS+v92xu87FLCay3u + mf1WJX2qfWeBYtyF5CUPa94eywH/eItyrXznvEfFXdHFISBmLxb8j16t72bPvMTTvb4BPEFcqiEj + lbPXB93Yb2C9DV7E21dKTNXYvMJrv3KIZ3V5PChYEJGTezs8xHanD4/vZoK5n7AQ0Y53wbedUPHd + hcSr4IGmMepT6TB9KmamNxsNK7HT0MwvONtuJH36UmmBzhfjyw6xgtt0M+oLcKWdPvNTihhlWQaf + w3uBefqq2mFd1BiwHy7xr3/R8JG4SPUZ0HhfJdEYxUGn5N5rZFqSaFFHj2QDi7QUCDa91pv5vkCx + my3p9A01T5z7pVRZwYAX21vqMVs5nEEZHiVxQcB8rK+KLOtrssECWbUx3ZHPFcrjKZj7o5rz/LyR + YfX19sStVDUW0iygytspNsw4NF48rWwjBbeMl1h4ZKq+6hoPUBU/dBYFKxuJx0xcQHnXJuIWV8Ob + uuSq/eGR4RrG3rRb2xTabfUkqu4t9dqd7p1sF28Ty/SocPZGlzvk2dn58VPbVOjYId0LFGbNfqqL + +5EqdkVvVJQ2VTw92V4ABy0jLLyPnjfRfBSVU7m4MYtGeTsZ3fEB5LR/sO1Hs7xBSwFAq6uUPuZ+ + xW3ldoSzkI905X0lnSc0miDvqhLLkbqIG6HgBXwfokGs+ObF44UmIdD87M1+q+SjVOIMITf0qGiS + qz6GkhiC8bkS4pNUaKnfrw2Y9ZfM68+76HJ5wHUnCmy3aTNv+JrtFe2dV0/FXiwQcx8XAYXIfBCr + eupoBKwN6JneVeIYdInYKlphmPsTc78n5lGWbDY/nmZG0mHU5Y+hgdm/MVP5PvP+Xq18+WC7wIyq + 0FvuHdMrcD7tmFWZ6o8PMjk3RZE5XKnayTc+AWgb+8Cs+NZGzVuNFz++xdJoXdpJ1d4DDHrxJVsv + uHtzPWRw7/OECvg+ofGzMky4VOuQWR5r8zrIvgGUvfdk7uxfum1rH2HWbzyd9w4SnuY6AB2kO2nZ + vfFYQqMBdpva+cOn/CtnFeTZ0cErmMAbGssPkb32XdwImo6GsxRSCBThySKzR3ldq7dA5hIizFPq + MZ5A359hcZk+7Od/uvNheYePVnyocH+I8fy+RaRt3MPMRx+PTY32BMsIG+I3Wq1zVtcF/NZHlvyp + ZU8vn2BnZwhPy7PjibW/BVQ+/R3ZSuKprV/UuUJ9NzWirraR1/OPP6BfHrXzvpLHn5KqobBEzpy3 + xREdkS2i84YZdPxolj7u9VRUbk2YU7ToSm9wlav9y6+Yx+6NzkC8HaHSrYR23+Sds09ypzAs9JA2 + J7PSR8tONXhmpzWun+YuHtTRCeDZ4Y4RxLt8+L3v1fO8+NWP3rBkIwNnxci0tR7zYWdLGFL/ZTPC + Rkfng2gclc+JHX5+PG+QmGVKW3B31vMgHl/JA8OSX01GQInbMamfBvQC9an02R29YTHtO/SrD4YD + 0xuwVJZAklLDq8Vmlw8ftDTATVwbr+tqatvK2j/hdnieyH4NOeLSavNQ5u9P22ql8dk/iIrImi2Z + 81jeXy7MhmMt1n/8MLfu9hMu99ObSqHx5vyzWp7Rn3obxE00ys7rgcwOGmZMqRJToeAlzPxOl0Zs + xqu9Xglw4+aEJzk6RjRPWhnoUbgztZxaTuNd7KJfPnj5xpjzilaDfJXdEx6er2fLr0G2gPxVG8x4 + 1HedG68qQ3O+wfZe7utsznfQzL/M3Qq+PnT70gRnJTjMCY0tGossvUK/VGumjo3aTrOeIxdXlz96 + 9nu/cmryEiN2d/X1S1AKFAuZzozVsEIcRfYTwtfXIW7mqfHgpbUJb7Q/EUtR8pifvXUDki3IVNlS + P5aukpPCz496hXtAvCmSK/z0wi0Giw9OlwEcNgeXWUlJ9EkdIQXxEsWM7D4rb/b7GHnKtSCE3I9o + 2rziTPrlu1a4nPO7c9P9yRdMk2w8pmhgAz5/E2aYapvToEwzFBurhM37PRrO1SeVnsampxs1a9Dg + KoEL+en8xYvPq8mHOpmuyp4/RzbnLy3DxtEA7AdLpr+EdcS7ZukjZJQ1Md/COxqtcRTQuiqedPFe + 02hkt3UlN3jlUUrBbMtHVahgN7giZv/cesLsH1G7fmKmwqHNR9tICtSk2nb+fnL8vXxvBhikfLKZ + j1H3W785r6Sw6ireA3YH6ee3z8KKt1xcY/PHl2wH00MfJP3bIWNJO2Y+o6XXB8aWQi4qBtlTq/DG + OLRMAH4U8UCdRdxvnccEiQcBe34sk08yIRg+rDTpMn5co6EfREAG36h0uHpvj4XVToT5M9tZm+yX + dzUgJa8VnlhXx9ORdzbMfoGYsPP1OX8Mf3nIH32lP17xtlKIr29V8liirlPwQc3J67B8xMNhuqpw + uj4rPG5iRR82a0NGO219w4PSFfog3oQFyk1BJPsvITHHJyeA2Z/RrlgK8bRITwHsk1bFt9CL+QrS + 8oEKp7Lx5bX95tw7VncpfH0ctrWc1BPn+lB+ftDbvLR8/eONHx9aO9lDnX4X1D952dZfpPr4zkUN + YJ/vmKHHhjfUQpWhJcMTMXgeeoMA3QIam27xuhC/7Zw/qvBw7AUzlp9VO/7y9tl/4XSJxfaXl8BH + 3JjsNb/vds+pgHJkX8iuoCuPXpPnBhG09Sma+0198z4hfC9uRox4jdH67fsNgKRdiSO6fjwx8ZT9 + /B1u2d31KH4rGH58uqVrE/HooS8A27uObDdHXec381JCsUQpXTyEW56el3ED54v5ZWQZV3wYfemI + JlX4ULR/5zFr2baBaeWaDHdPKR77/XGD6p3MmNdOez7MeTpKcrbB1zoqo498NxpIxYdPwuW59qbf + PISJnx1RM1VFYvn0B/C2KGT6ejTi8WHu8M8fUtgcgmhM6rMB67VBmOebDRqQHWvQd5cPXlpq3xY7 + LNuArcbA35Nv8U7ajhjCkXz3Y9XkcbNIkwAitvqQbfxxOd3Y/V2+YR5gYdaDPD1f5/V878g+uZN8 + Yi92R7Mek33Xta3wke0CurcsU6nqh5gvU9+UdHTz8E1gEZ8MZVsBeZkHvMRBqU/T8zAphb4oiZdf + zXy6uNxHRSgtmeVPaVvN8zY49bt8zm/vfDq69+PPz/+ZH7GZn8EZK5GOikm9eptqG8iMbUGXPH3F + /aotS1iQ24LC4sn59FpyDVxjzBieWuBjJ0su2Jpcs5+/44cuCJSZj8g8r2rpiFRRnvMndpFPX53R + 5+oM83yGuYyd417r+gZNqvghhlB+9cnv1AIZq/Wa4PboRvzS3zVY7ssTs1bNWW8gmjKQR+lDLNti + +Z/5BGflyHb8tPyT78MCw5kO3ZLk5W0xBsrn1B+IaomDN5DHOoWX7XyxpIPQsrm/ovG8etPJOJd5 + 1e2pCdMAnC610yMf7vn+AY9H1ODpkG3mPHIwlHVcwG//6axf5U9l5kc8Zd/Df+UJzlX4Mlcm32h8 + UeeOnoS4mC+GyBO3LSpg3LwWtFfGnI9NnrkQ8HRL/PvViadyV9lgriObbKPcigRfl1UQ7aNGdqja + 6/1vXvCM7s953lujztSGUDGu154d9knC26/w7v7kkzMPRTMPCiAZ9Zfs/fLtDfHFK9C1rQqizvwy + 0qvjgqYd3wTjTxiPhz7O0C2NKW1+vCMTywc/rxX2mz90Uqtryno955XDouEUDXUJ5JVLlItnDw3u + CUQUOcWe4Xl/T2GvnZHqh3/y1qjs1QqUMvAUKvcu8djv+87+gWCHW9GUrlgjK8H5Suf5V8yPu5MI + VmJssKQ4yKPe+3FEqtirDL9CEwnrPTLR1rkbVFk9xXyQdEZRHolXzKWFrQvCPvShacX+N++Ixpf/ + 1UCBQ8hILi7z7rlTB3mJtwqz1jvajl10EgAHz4j8eORPnrclOaZLt6p5mSf5RgaiH3Gd0G0+zjyA + +mZ1xlKEh3zgjROCWfEL2w9HC63G0axgKFlEIcidfHTIVoS/f6cC/vNff/31v34nDMoqeX7mgwH9 + c+z//d9HBf59T+7/FgTx30z8cxKBdvf0+fc//3UI4e+6rcq6/999VTy/3d///CX+OW3wd1/198// + c/lf8//6z3/9HwAAAP//AwBim/ij4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bf0247aaeea96de-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 28 Apr 2023 14:54:47 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '63' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 974557ca996688f2b7e568f6d601ad88 + status: + code: 200 + message: OK +- request: + body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"thoughts\", \"reasoning\": + \"reasoning\", \"plan\": \"plan\", \"criticism\": \"criticism\", \"speak\": + \"speak\" }, \"command\": { \"name\": \"google\", \"args\": + { \"query\": \"google_query\" } } } Result: None Human + Feedback:Command Result: Important Information."], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '483' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6WxOyurbl+/4Vq9YrvUtEJWG9IfeLJihesKurCxC5iSCQBHLq/Pcu/E6d7n7x + AfFCMuaYY4yZ//jXX3/93SZVlo5///PX3+9yGP/+H8u1ZzzGf//z1//8119//fXXf/xe/787sybJ + ns/yk/9u/71Zfp7Z9Pc/f4n/feX/3vTPX3/fopdD0flZVNPUPC8yT+sT3gt8pY/uh6XKIzl8CTzP + 74Rhggh4m3hNHTU6VhN7kC18XyqDotgb+zmfIhk+DKen3kq1uXg3HwJceS8R25JtA15/gkgZ7UnA + TptGFXfomsGpRFfq7XDr94G9VxVzjTbUY1RLlu+XQXJ53LE261NCuyg4w6QiDZHiXKrKTRMg+KJl + RuBWrkPyiQQGU2j55ASHhnM1HnIomOyK/UP46VlvDDGM50TFfqDyiqUNOyuv9Lahhye6hzPnWQTX + 0DngC2N2zxHqDMiu8I6RNGxBvy4uKRxflxRn7lr0yfd0G6D9kXSMwuTT8yG7s13IOgHbO5FzPiY1 + gUPXrSiGmtF3X3RSYWolBLEi6qohbVigrDwhJErOdX924KlWtIgcqepU34SIL3sLt1W6RYA/hpBd + XlYN7qJ2JXxb0rAtJOBAr1hv0EbURZ88Qa3CO68k7Hjw40+nMUeKEphbspncgg9QLSM4xqs3Vldq + 2Q+N4auwuBY11W3fqDYwfjLZQOOZ7vX+0k8nZT9AZH0YdYzjPpl1ZEiwdpQNUjYbLZwf5gZBr0EE + yXfb1ScRHBFEb4QQUJTGZ6etZ0E8ZQeq7nDJSSh3AVTF3UR1cQQ9f/Rghrt9s8HqIbgl3dGzIazi + zZaqvQjC6fM+11AAnUH9G90l8yRLGdRbsSJSo0nhDx874yTL2DtEVcjelmDJT1utcDaLec/ZfmvA + q9po1EykIZnDzhRhfMgDnJovE4xm6NTQEsIj+azevJpv+HSAard6oM333erTls0GWGnUwK7DN9W4 + G0wLNifhjI3tM+x5FRdQcTV5Qq0ajRV7R48cesn5RtYAv8NJj9sWyhZbY0MqPsm8CzADv/U/lPtJ + 58q9HGQeHWO0RpHaSyvCNeW9eiUYqXcnmd9lxIAZrwKkMA2B6f18OHA9SAoBtI56LhFPlsVTOC37 + U4SDaHcGkG+WgS1SQp9kzrlUhq5fYZWlnc+d/BLAfZhjanrVwOe5oRA+SHTCqLmvknnclNqqt8SC + PrEE+Dhf1Bk2gD6wE+GGz1BKLEj6JsbHfez5bGUypnjl7Ua9ZwX0Sex7AhJpeyTysy8BewRWBpUU + vwhJd2FPuDHJ8Ni+Juyvdmd/krtYkyV7ulJNqO468UPYwMo4R3SfayEgD3vnycA/a9R7H7Vqs5dg + Bm/9l1L9/IbVpE4lgsVoaTjQFKlqjuMjg7f0nqMdqu/JFL/cFFTxekujyH75v/oGIJ4q6th2mHDz + +4kg/OgFKRa804MxEvg9zh49qmmt840Xy2B0zk9qW49rwkmbOFAJ6js2d/6pkqxSJNAs9YAoJ6r3 + kzp9EVhfZwGrRdT1k51oGiw0KaWOcDcr8aoEKVzwgrVCf3N2TcNSMYm3wQi1Rz6JwERwT9071a/p + N6GMpY2MOqPBL1xs9bFwijOM5Rbj+4m1nKNMa2C9P9rk/TG3Pnt9DwN0b1ZEfXx89Pw5PD14LjSb + yDa79mx3r6Bsf0Qd8chnCZ3dqYSZc3FJMSeFzp6lHgAxkjC1nncjHGCgy5CBtUXtjXnkE1lFIjSb + yMbJBuX6JB7aARhXfiav/vjWp1XHY8ir9kQtfPLAdNzwmyy9wQXJFLbJYH0DWV7qjXqm2PXvfDgF + UIZ9jTjTuoocTm4OdkOFyMZ4dsngzucblDo+4MN12FSD7YYQipvvih78wOxJJq1UmCm5TdXhc03m + /KWk8NrnD2qiSK1Y9Vx9wRoRAXt3u/PnPAgh9ByLUkshVTIJxz6TrbI1sakIfcIOm5xAaY0qatw8 + J2HnS2wASyqf1Oi9kPPrVhXhFHfSgvfE5/k1rJXUehBqrjOz38jn9Ca3uxWhe20kFWvRqoRSLECs + CyHWqcwfZ+iO3p2sG/us98q9JPC7YgVF/u7DZ8N/zMBxpZii4/fDl342w+YxaljXbwFgZeh7snLJ + KMY38wQ2xf5ogU14KxEzh7QfLRqVcH1lAjZipfQndJPiHx4w/kSnkHu7yVHgtNphu2GXZFY/6gVe + lCimzjXPOVvpkgh2R5LRvQwswPZP14Af/J6wrl7uOnNuWgzkx0vBfgfUipw0N4LwYLvYzldiz/N6 + bUH3ARn1TXoE8+68J4pyjCk2Nd32e+IGDsw3jzeNEncLSK7OKRDM+UpxbyrJV+G5BovhXWEru36T + WX1bMTQPTo5NfT/qfE4tA0p3o6aHVef5ZK8HN3jt9zLe36ioT+uWfeGzEABGluPobGKrA6wvMcH7 + 9GqD4RX3B3BzvxE+HmXK5yGJLVlzYwEfsVD1VC4UCU6NMWLrLEh85CwgCtiCErvtgVWt8662CkPy + HW220hGw2yNP4b19Z+RtstDnJWEZHJPMoOb82XDii1MOtnPYkt0pVMJB6FoGX5K9wobmlLwvj6AE + 76AdcLgtcULWaSrCQGq93//3OV3vG8iFuqX7jvOeDF/ewMIaRGp06APGBDxi+MkOGGuf2e8JausU + quQWYLyJJT6sdEEEXzMH+IilBLBG7FKoSdIB47TzK/4ZX7J8bJ8T3UvOUZ/XIzsr95paGEXWA/Dp + UNfwZGYyVY8oDbnnngcZefCD1dqmVafvqhaEN2Ri119PITkmkgZHVUfY2vf3ZNA+6xo4JtTwq6qU + P3gBjzL40iDuqorI2daD/WnekTk7RDrHj2sDz+28IcL+SkJO7NsBiunwodqqqwFd9AJY+IKsiHzS + 6VtcCZAe64nMq67m/VWytsr3yDycLHphoNtXAOyAGdT8mJE+m1Y9K5aRW/R4j5Jq4UNBeU6Bt/QD + Ho5XJrYQN/qVwPbtcdFD3xvklTuTebN6huyqRCmkFVSp8RxafT6dShk8DK/HnpRrfp9LQQmOg9hS + zXicQbeK7hDGssvQSrXVZD3Z8Q1UzZliJznlPW9ldIH6cevTxM+PlfTZgQFe4l1NUZ2EIa3iQgC9 + 9vwifo9uIV+d1wN0+LukjisUPnXy9AzyTfJGz12rhRsv+Zwh/5QddS3jVJEvbRtwFY4Fja/6OpkN + /8RgkcOEquWN9ryYqhKa9tSRKcovPquemy/YyuWGuhp+Av7a6Q0kJj5QdKhjn0MQE3AKkvOfz7M5 + K1vF3mdrJFbtxR/vMtOU7bFxkLD4iW9OMwi6NKsRsPsdmL5wXYI6KG/YfzQfn83K+6vUczki/s4o + 6OazJcK7NlBq4Ab1FMZXBndPvCK7avAq3s/NGQYX94JE1DoVHQ86gnorVWitKrtw8gwnBZMInngv + tGdAUeY10E6TA2EGkXpuRbEE1fLQIUF/lDqtvnomE9euETDpkfO19kiBgI4SNaIdCZlyDCQ4Cv6B + VGveJQufDUr9MSVq80DW5wM75NB9v3dk/epVf1P5TgYPfaVR1RxgNY1zGUH3jFTyDty8mstdfIEn + 93omKVKmnt22fQZKH+3JdtFXvVa+BTB8nYyej6smIQLeavBPP86uXjgDr/hC13dU+uPb8Zq1FsRM + GSl6rorkexxPKWisKqF+EYZ6W9HEgJ8zf2C0PC/fWzcV5quviPXvye03fcAOskbnA6psHQL+rQz2 + 01dYj7Ghc7ZnhrL0M7wvnKKnWfc1YBz5D4zI3uK8liYZuhZ3yXolbxKiWnoEvcm3iLTxumoQdt4Z + yKh4Yzu9lH1XWp8bXNYXY30/h81aOTEQnqsV9fy3m7CWwgyGxRrhoxYFPqnfvgiFDRvoLclg9X1d + VajYwWwgmB2UhFy1coaege5k3uzvCdOvnQrXloCwxa9rPudTICvZ95KjMeiuYMr0eAupOzywfau0 + iknnaw1HmwuLf+l0XpJtKluqi7HabWky3Qa73Sr0YVGNXvqEi+tSgEUtcuq/HQu0YeGUMKmGhh4K + 3dene3epYXnzHti5ZRYQv+4pVqbGGolSwHVS39csUqrkcscnMxWr+Tg+Ujjf43B5XqYv+BTBhMeM + rJ3XQ59zeoNQf1QXtE1Oas/w6uvB8+PVEvDMViEr3i8ChgJgqn7cLuS1laSy3PID9jZZ69Ov4c7A + V7MrPSq5XI07QBoonk4T9fvNoZrCyzAAKNg5PeZc1zfppDkQIWmF1lc5D6crg1+QEbTFqBiO4eB/ + 5zO8CoOK789zW01FpLGfHqUvTR9ClkonBuWnJJLVTGvA3NMzgsAPNDQnZ5NvhlH1lMva9ala27if + b8U4w5dwRxhd0L6X/HlVA/O9a7G+y9fJbOf7r3w9AICNXfKouAtrBMPYelJPUxJAgEMH8A6+w8/v + 6iTqOwd2VVkTZR9//blB3hl0fXghEzJKfURrJ4DgBnwiJsDRx/M2HaBFFIEiDxSgrZNDA/eVzjC+ + 2j5Y+9ktg6e03KGJ3ceEX5sxAj2fAuqxj9yzraeUYNH/2L9OPBykYmWAvhZybNpiwydDlFoIJmem + etFBfXxePxl8SeYK+9tV7lN19ylhaW08/OO7yQsLAQqrIqWaH+Qhf+ttJL+PT4NmdcLDrjWuLZS3 + VoL3F+74U1R/Gnh+Si/qnkIlIdnnWcLr3DywRm5myNMi/8Ky4CX2x/ZQzcXQbyEP0i8+2n7Haez4 + M4Ty0aHGuioSlsR7SdH2X0Td3SvQKUKFofz46YteST9rdD8r7vuzI1BvMp+l0mOG2zTe0r0VxxWr + /d6Ac7MTqd19Sp+VetEqb34XqRraFZ+f/rGBw9qQ8D7UHH/hx/inf5d6aQBrg10GR8E9/MmP6PMY + e5BI+Uz44vfyzctQ4Qd/JrLkJ/1M2R4CdR0Fv/WrJrR2znCpL6qNwaYf7N0cw0uZ6Wi3ewX+FJyC + M2wQ+yJZUxI+tbOrwsW/UJfv0pD89OAFKweyWfKb2V4ZX7Dw4+In8op9wroEBtycSOevp4QDUgfw + tK9LqhaRWzEY+FvI0PZOnyda9VMVPmdwRlWAJNq6YP20TAFKxKko3ptW8k2FlwX53k/QuoNvTsHL + Y3CXJx8EorbwGWbqF4p7FuDjwh+0e68kqCZOR7PFH9DKRTl8m+RC91apJZLjAwdepWzCtv7FiYhX + XwfGezPG1Pk0/vAgwUVxt1OOzZ9eDbMKwuzu7LAty/uKH9aGB8LCe1L19XH7GblFCpf+SJ01jXzy + MFcHeHkFCdmF98mfrmqG/uh999D6yXrlbSOwZtobbR/HAYys125Qr6GPo6P0SHiIvQyGt4NJ1be5 + rsgNPw5gdZV7ii2p8VnQJy1Q3PJOVoPw9kdlSjLgPIcZ+3TIQ17sTQNOZ2CRpf/3w7e9xPB2LBlp + 9fiTzAA4Bvwo6mrJD8uEb+/uDdqCrJFNFqCKb7qrDFfaaGBLp76+IRuxVcJzsSJzzU5g0eslgNKF + Ud3WU8DKcmjBJXTEBY8fzk5t3gBzZ2P882stuxWHH38TLo5JP2fSSgMfBiIEPe8O6OG298BJmnMC + +09djY8ClnCvXlp6cdeIr6P6U//05sLXjr7x3DOB7pbnJFj0Qz8pvgZ+eaDdFmLCrY94UWjAHtiL + tq4+tcazBbt6zqjx2FySce2RGTBtZtRY+I1rM07Bc6fJSIi9Y8Wcdy/DpX6R/D5qPTtfzobyW8/5 + TTacdclbgB7evAgrb7iiJWEpzMhhS8JtSZORZKEMxWcSoQ/Y2n2VFnkLzVgJ6GEKIKCwW523Gyf2 + fn7Cn9/B+gYvShxjx9Vf/XSXmQqgmu2xO6y+Cdsn2IPkgyuqM83tZydJ/2v/+cOVwkV/WErIeoFc + jXn02TYNINxevg6CU1RXk7wFOXgFOCDi4qemqClyeAR9hC3adqDwwk6A+BsfsHk2h55stgMDzenF + MarY3herIDDgq28tmsnFoVry7QHUqZtR20OdPifqyVA0ehoJS98YTMP6mQGLrAR0nboTEPW3V+6e + O1XG6g5rnMBm10Dn+qqoeTYPPVj6Mdwk5hmbxjzqTUKUGrKrcEfwzvpkxNpHhhGCA6lNbe0Pevgx + 4LGFAdWliSZTs/l4MBqnlCIlOFd9/EEzIJcyIehdFPq8P+9EyE6fHCORDWDG710E05P4xv5bfenf + X5658DkR7Z1WLev7BdZF/1DD2bSAd5vkDIon41hzD0PFSTS1ytKPiOhsWj5/ImkGwcW/0IPeCDqf + xiJXyt1Fxd7FxOG08li8e1XRhp7bpwLI5TYFUEubN9aDzEho+bxqsO6zkShouwaMNRqBi/6nP7wv + eaMKNcoOCBQh19n67jVQSY8vNITgXv34E17lq4s976Mns7nREfzVozU0gS9mQnIAPz1gy/ODj5E5 + NPIzs0oyPZqCT6SwIOi3no3y/Aw4rT9BrAD7Df/U2+K/blsihCWiggs5D7NegCczlbHpvhveCccq + hc5GeGC8+DeedaUFsyZzyIL/amMIyhZsmtz95U096y/m9sc32MfXIpzNmxXALmoOhOAm5Fy8EA1Y + nyNa8Ff47PZoU6Aaxhcb4LLXf35febxsjYjNa883lYtKeAtIROZKufNf3gTAbefjfbPbJ5K9OrRQ + aJ9P6q92s88jQkR5yQvw8R6BaoLnIIP4EByxrRQvPgHzq0Hqkgda3/W5X28upacseTJ2dvViR+s4 + Anh9qbEZiXNPkXKsf/MOsi15Hs6K7jagTTyBVBAn4ThqPpRHIzKJuORl618e4e2iKwFLvstPpm1A + 4po1mvevvGdsPRx+9T72XrwCVMqKCJYjcKn2RLM+7tK+hukTHn75lT7fM5lAK6lNjM+RCUTMnC8M + +mOI1den63nQHLRfP8JPciI//MdgiDcYrQwlq9jZ2DZQ2Z8cNP346LtXEOzcs4I9MSXhSFaBBC5f + xKl/2+x1vpGnG7gGZkXNtLNC8XZv8p+/wE5kr/xh28sZlAstxX7t5aD9uo8Ymm/QUj22YciNobvJ + y/NgVZEvYH7nmQRHIzaxdcAnf7qcpi/45W+4jP2EqS/owB24CtQ+grXfit3dAc6o7ukyvwIUf3ca + XPQD1cV1Fc7+O9LgE/GQ7Epi9O/Uk6FsPLov2XkbPxlfwijJhgVLmmrt1v+jhy93/UAWfusnn+5k + uBeyBu/v6z1YL3iQf/m0q2EF8LRov9A5fXw0XMip3wj65QB3/IPQz9/x80nJ5R9fRsFo6zzEWgbt + z70m4rJf7G1JFqw/toRdcbglE0FyDJEnfOiiRxPuufEA3+CjU5X0nc5uzlEFnJ4e2JjI2LPEIRE0 + toGz5CmlTw77XFV0UPXUaF4Fnwxp28C2sW8Ur7Mvnw33LsFfvnPrNl99qsTzAOzPtcY6vFf6/KrL + VFnyanr81hqXztnppqDMQfSy9Asaf6wZtmGE/sz/eHx/n5W2RALWssNWn9nhAQFODAUJs5hXjN0K + pHi7+EqPxiVNxtXuIsMXzTMkZus4XPqTAIVnZaJ5MvtwEgaSysVoaPiw8P3PH8BcEHTsxP4ZzOte + uoFJcQZqvB/in3xFLnc3lTqfTqhm81MMP/7A2Pk0+h9/u/Q/spXGTG8Pm/bPfA6j74H7LdCfGSw1 + YUYf67EOJ+MKWji/fJ2i5v5KFjyKwK/8J5mcsEx++wUWvqUH+GHh3IVRC08SywkQwMFn1WtqYNKe + KD7KTRvyylfTP/MziyVLPOCPX+iIxp5Gy/xrWPIjeNnlGtUuyTYca/oeYHTTBvQVtAIsec0ByJr8 + xD+8MamaNFCez3uKHodPz9GBer/8Eu1OyVuf78qaAHHTrvAprm1Oln6m7JPzCmOAvWpCdirDjb3r + qbbot0XfB7DP3x3VJrNPFr5BP71O/Vp76AR09Re2hxvCep13YB5cqVWM2HOxpykADPpby8GSj5Lx + KFPQGaLQ/vILROvbvR9Xu1SG7/39iarFn/LAdlWIDU2gP30+uRYo5fal38gUqCScwNUv5V0gW9Sp + 6zxc+t8AHFPQsJnf5nAUSB4rzOoc6l68Xh/HSpWhDARO7cndg01EGhGyUbthLQeZz1dq58GVSBOy + zFNC+tuPxf+h0yOTdfYaNjNcdcYZe1tXDtn+ubeUhhknetklu/4rnvILNA9eTo3mLvvvG9k3yt+/ + UwH/+a+//vpfvxMGTfvM3svBgDGbxn//91GBf8fP+N+iKP2bSn9OIpAhzrO///mvQwh/d33bdOP/ + Hts6+wx///OXIv85bvD32I7x+/+9/q/l1/7zX/8HAAD//wMAPe/DkOMgAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdedf56918cfbc-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:04:15 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '25' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - f2850337441001a8e5bd70bacb4dede8 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml new file mode 100644 index 00000000..e8cbefa1 --- /dev/null +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml @@ -0,0 +1,93 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''Write a wikipedia style + article about the project: https://github.com/significant-gravitas/Auto-GPT''\nRespond + only with the output in the exact format specified in the system prompt, with + no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1671' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1xSS2/UMBC+8ytGvnBJVt3tgzZHoBQkXgdQKyiqHHuSTOvMpPZkt6uq/x3FWfrg + aHvme/rekDeVcZ1V1w+hfHPSffy0Hz6ff6sPls3p9cHtr4tv2y8/6e3FWWMKI/U1Ot1tLJz0Q0Al + YVMYF9EqelMtj45XhweHR0fHhenFYzCVaQct9xeHpY6xlnJvf29pCjMm26Kp7s0QpR/0SuUGOZlq + dbJfmCfsx/vl6qQwKmrD49XBavVQGNcJOUym+n1vekz/YKMENJWxKVFSyzqJFFbkycBX22MF53RD + Z99/XPJ7TC7SMPFVYBnsqMLSy5jAtsgKHhO1jB5UYEYEYtAOIRsnYZAm4w3oyZZJtwHBRiUXMEG9 + hSHKmjxxCxuJwZcu2JQA7waMSgknuIgJbXRdAZtIStwWYNlDI7G3Op0Xl3wmNqTqkkt4J+xHp6Cd + RBnb7nEdZFbW0hoZVAZyk2zkNEYE69wYrdtm6F3MyJjSZIB4JiPhxcRxHkkROmq78na0gXQLuwxB + O6tACZBb22atj8trnIVvMIQyaRydjhF9AY2EIJspg6xvJI+BGFOenkryNvr0Isks40PGzUu7SCcb + EnOcKk/TrxPskmf/DH/S5sKYx4coA0ZwpNnmzB2xwYjsMC3maEOwtUSrCBvSLjOPCeNERuwkDvOj + dkgRGkRfW3eToXp7g8Dopo8YtxBxTSnzqDw3kHlO50pylM/N9YiaXrhiUVtTLuC/2KYKArVUB5x+ + CgxjHcjNFZqHwjTElLqriDYJm8oklcEUhtjjnan2Hv48vPoLAAD//wMAbCMiNQcEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be206beca7ccfbc-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:47:58 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9927' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 575539561e8025c9a37920ffb4d1b354 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml new file mode 100644 index 00000000..6ddfe32a --- /dev/null +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml @@ -0,0 +1,87 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''T&GF\u00a3OIBECC()!*''\nRespond + only with the output in the exact format specified in the system prompt, with + no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1592' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0yOTUsDMRCG7/6KYS5esqUf1tqcFdSLHoqCIjKbne6mzWZCMusHpf9dCmp7e3m/ + eHboG7ToOlLXp1Atlt3t6unbtbebl5vVc/2wWmiYbbfu+n54QINSb9jp72LkpE+B1UtEgy4zKTdo + J5dX0/nF/HKxNNhLwwEttkmr2Whe6ZBrqcaz8QQNDoVaRrvDlKVP+q6y5VjQTq/GBo/f//5sYlBF + KZw4k71B14l3XNC+7rDn8veaJTBapFJ8UYp6YJSoHA/8d+c9FMn528AdOIpRFFKWD98wUAQZNA0K + n147GRQIPij4BpTKdgSPganwsX4Swloy9Awq0HLkTMpAKWVJ2R90KxQKUGyA4MBX1VS4gUg9j3Bv + cO2jL917ZioS0WJRSWjQx4a/0I73b/uzHwAAAP//AwBPrscAswEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be20701ea0a9669-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:48:01 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1833' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - c9bf165259547ec59a88bd16b5f691f2 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml new file mode 100644 index 00000000..980a0a10 --- /dev/null +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml @@ -0,0 +1,97 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''Help me create a rock opera + about cybernetic giraffes''\nRespond only with the output in the exact format + specified in the system prompt, with no explanation or conversation.\n"}], "temperature": + 0.0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1625' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4yU0W/bRgzG3/dXEPcsB04ad53ehg1riwFNthXYgGUo6BMlcT4dVZJy4gT534uT + ZWcr9rAXAZZ5Hz/++J2eAjehDrFHj8OYVt9+1//0y+XP797n6w/DbnvTdts/9reXwzr9Fj+GKsj2 + b4q+nLiIMoyJnCWHKkQldGpCffn6zdXmerPZXFVhkIZSqEM3+urVxWblk25ltX61vgxVmAw7CvVT + GFWG0T+57ChbqK/eXFfhRfvl/eXrKrg4pvOrzXr9XIXYC0eyUP/5FAayk6xKolAHNGNzzF5MSnbK + ZYAPOFANv97cvr39eJd/JIvKY2lXA2b4/j3M8/CeIEq2KRUB8B4dbKTImPiRDDhDT2nk3AGqs7kB + 5gaGyTgyZoOtlv+8J1aYMn+eCPZsLNnABRK3dLGYALajdMsRUzpAQ8ZdpqYUHmco7bynxZpkkBZU + 4g5kJEWrYFTZc1M60sNI6tBN3GCONLuyaRxFyxQqU9fL5LMaZWelcjaS2cVdfiuYrL7LK/hBUsKt + KDrBPXsPB5mKnYb2lGQEhLImSqm0NBc9JM50xBRx9EnJjj3MqNiQFuJhS5rJOULHim1LR2YzKQP2 + E5iTzyPNCtJBOVo1F+/ZJkzFbLF507akLwuzqevIfIZciluiZosF0xHff+jJnhRTKhCaKc5sXYCy + TbqMUw6+sC7Lotxhx7mrCkFSR87zr6JHgxSReY9KJhmzL2bfURpnjmhGwzYRIDimotGAEw4F0jlA + FRjnjrT4jC66+F2yoQb3vUDEvATtIJMuATtjPNYn3pMCQs9dv/o8YWI/wEjaig4lIYu72zlB9L+C + o9QTqmGai/6BbolSdb4bS2wK5ShDET2AbM0xpmX7X6NuOeN5HYU2gutEMKA56ch0dvy76O5f4Sxf + E3H6emNzxiZO5fn4CKgy5QbYDZQSoVEFiUoMujnMUu44DNQwlntFdq47RcZ7UhhQd+TlROwxZ0rz + vVbC2APCfUGJU8N0QtlRpvk60UNkp4GyX4TnKrSc2fpPSmiSQx3MZQxV4NzQQ6jXz389f/MFAAD/ + /wMAE8xLs6wFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7be203e8793fcfa4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 26 Apr 2023 21:46:11 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '19109' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-reset-requests: + - 17ms + x-request-id: + - 5fe22bc0f23ce6b48845f33187e1a19d + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/challenges/__init__.py b/tests/integration/challenges/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/challenges/conftest.py b/tests/integration/challenges/conftest.py new file mode 100644 index 00000000..dce4518d --- /dev/null +++ b/tests/integration/challenges/conftest.py @@ -0,0 +1,17 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--level", action="store", default=None, type=int, help="Specify test level" + ) + + +def pytest_configure(config): + config.option.level = config.getoption("--level") + + +@pytest.fixture +def user_selected_level(request) -> int: + ## used for challenges in the goal oriented tests + return request.config.option.level diff --git a/tests/integration/challenges/memory/__init__.py b/tests/integration/challenges/memory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml new file mode 100644 index 00000000..a38ce01f --- /dev/null +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml @@ -0,0 +1,1026 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 2766}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3294' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4RSS2+bQBC+91eM5gwWxHZtcUsPlVy1aQ9tpapE1noZYGvYRTuDYsfiv1cYcCo7 + Sq/fPL7HzAlNhgnqUomumypc3T/dRe2X59W3OPrVbhZff0YPhyZ/WHz+8PETBuh2f0jLODHTrm4q + EuMsBqg9KaEMk/j9+m69iFbROsDaZVRhgkUj4Xy2DKX1OxdG8yjGAFtWBWFywsa7upGtuD1ZxmQ1 + jwN82X3B4+UiQHGiqgu0Xi+7AHXpjCbG5PcJa+JprXcVYYKK2bAoK71IZ4Vsb+CUWgCAFKV0bVEK + p5jACI4FOkgPprgBLl1bZcCivMDuCJ5UZmwBUhIYy+Jb3UvlbTyTg0BuKoKWp46+e3vGtKtrZbNZ + isG/VJ4UO2tsMfB9Lwly41mAhRowDOKgtRn53kd2wwoTKIr3DFIqAUuU9WM7goZ87nxNN7RNpezA + GMIPpte19kt68C2vaWpDuLeqOj7T6+oyEvK1sUPZ0mHwxleKtDditOF6Cn6yQZZbT4O1zZtZjI9D + 1RF2lDtP0Hinic73ejJSgrLHIanrPLghtZ+Yn0xV/f/gZ/uzFIc1XTB91RjezVNZVdPAcAn6SoTy + xfUvDoW+92X89hKTiLOQUU9qO+wCzI01XG6HN8MEWVyDARqb0QGTqHvs3v0FAAD//wMAM8Ih+goE + AAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe29d91e6b96e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:45:17 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9335' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86493' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.337s + x-request-id: + - d98bca49710a9416e1e51117937d09fb + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '600' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOwU4CMRRF935Fc9cd0hlQsDviyo3sXGAMKZ0nrc70lekDjGT+3ZCgbM/NPTln + xBYWPjjxfe6q+fLUPDVp//K6DqvsV9N1u++WP24/Ox4TNHj7SV6uj4nnPnckkS+TH8gJtbD1w6JZ + zMy8MRo9t9TBYpelmk7uKzkMW67M1NTQOBS3I9gz8sB9lo3wF6UC+1hr3NQ3rCEsrvsHtTGjhg8c + PRXYtzN6Kn/OgTuChSslFnFJLoWchNKl/lmdXFHXYuVSqxJLiGmnEp1UcEUFlzMlaicYNT5iiiVs + BnKFEyyKcIZGTC19w5rxfbz7BQAA//8DALvrL2VJAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2a24dfa796e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:45:20 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '572' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - 396e6db6eeae90fd3920af59c662d881 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading + the instructions_1.txt file using the read_file command.\",\n \"reasoning\": + \"The first step is to understand the instructions and the tasks that need to + be performed.\",\n \"plan\": \"- Use the read_file command to read the + instructions_1.txt file\\n- Analyze the instructions and determine the next + steps\",\n \"criticism\": \"I need to ensure that I understand the instructions + completely before proceeding with any tasks.\",\n \"speak\": \"I will + start by reading the instructions file.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead + the file instructions_2.txt"}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}], "temperature": + 0, "max_tokens": 2552}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4438' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4ySTY/TQAyG7/wKy+ekarq03c2tggv3IoQIqmYnTjJ04gkzTj+o8t9Rmna7myLg + 6o/Xj+33hCbHFHWlRNeNjZer/ezDqljtdh8XTz/X/HW3Pc53u1l5/LJfY4Tu+QdpuXRMtKsbS2Ic + Y4TakxLKMU0Wj7PH99PlbBph7XKymGLZSPwwmcfS+mcXTx+mCUbYBlUSpidsvKsb2YjbEgdMnxZJ + hDftl3gyX0QoTpS9hZJk2UWoK2c0BUy/nbCmcNX1zhKmqEIwQRRLT+lYiPsNThkDAGQolWvLSkKG + KVyClwQdpA9m+AmYKAdx4EnlIBWB4SC+1T1g2MwmchAojCVog+HyXNGXbs4x7epacT7JMHqt70kF + x4bLYcj6L6o9tjIczsJMB4FAAq540wBSKYEba+Gsdfvx1MYqHgbG8DnQn1H/Z9Us4xhWrOzxF93V + QS+Sk5CvDdMrbKEmjIi0N2K0CfX42MSh9XRdq+WcfP/Heyq4uIXsEZ6pcJ6g8U4T5f0z9kYqUHwE + UWEbxvcIDantdfLeWPvvH/P5qoNIF119dDndnY1Y1TTov5x5hKB8OXbfkOhrb+33OFeIM8iFJ+MO + uwgLwyZUm8FjmGIQ12CEhnM6YDrtvnfvfgMAAP//AwC356gC/QMAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2a28ea2b96e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:45:29 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9064' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86466' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.356s + x-request-id: + - bb4cad658d2a5f381ad0c0621cb834cd + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '714' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPy0oDQRBF935FU+ueMI88TO9CFj4QxJWCSOj0VDJtZqraqQpxCPPvElGyPXAP + 554h1uAgNF5Dl9pssTqVz+XT3fx+/vb4snyl4WvYHday3i58CRZ4+4lB/xaTwF1qUSMTWAg9esUa + XDG/LW+n+aIqLXRcYwsO9kmzajLL9NhvOcurvAALR/F7BHeG1HOXdKN8QBJwRbG0cHVf+cyCsvr2 + SqrpaCE0HAMKuPczdCj/1p5bBAdeJIp60ksjkyJd+ldivJFBFDtrHkyPwbet0carIdYm0t4Qnkzj + xTQ+JSSsjUQKaLrB/F6NTBMYLewiRWk2PXphAgeinMBCpBq/weXjx3jzAwAA//8DADd8MExmAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2a713f7b96e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:45:33 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1006' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89839' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 107ms + x-request-id: + - a2e25fb58e91f62bfd8851675102b189 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 2344}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5444' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4ySTY/TQAyG7/wKy+ekapvdbpTbSiAoEogDaIUIqqYTNxk68UQzDm2p+t9RmpQu + KQKu/nj92H6PaArMUFdKdN3Y+OFxN/9w16Tl07vPb16/eru0T1wu0+/v03T6EiN062+kZeiYaFc3 + lsQ4xgi1JyVUYDZbpPP0bvqQJBHWriCLGZaNxMnkPpbWr108TaYzjLANqiTMjth4VzeyErclDpjN + ZotFhFfxa+J+EaE4UfYaSubzU4S6ckZTwOzLEWsKF2HvLGGGKgQTRLF0mI6FuFvhmDMAQI5Subas + JOSYwRAcErSXLpjjEpioAHHgSRUgFYHhIL7VHWBYJRPZC2yMJWiD4fJc0ZWuzjHt6lpxMckxeq7v + SQXHhst+yMe/qHbYynA4CzPtBQIJuM1vDSCVEriybpy1bjee2ljF/cAYPgX6M+r/rJrnHMMjK3v4 + QTd10IkUJORrw/QMW6gJIyLtjRhtQj0+NnFoPV3Warkg3/3xlgoGt5A9wJo2zhM03mmionvGzkgF + ig8gKmzD+B6hIbW9TN4Za//9Yz5ftRc5RRcfDae7sRGrmnr9X2ceIShfjt3XJ7raa/stzgXiDDLw + 5HzCU4QbwyZUq95jmGEQ12CEhgvaYzY9fT29+AkAAP//AwAwBdt+/gMAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2a789b7296e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:45:42 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9322' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86458' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.361s + x-request-id: + - bfa86d8ef9949f22b8e710ac50b2fcf7 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nAs a system, I recall that nothing new has happened since my creation.''}\n\"\"\"\n\nLatest + Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], "temperature": 0, + "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '741' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SPTWvDMBBE7/0VYs+ysRO7cXULbQ+55FYoKaXI8iZSa2uFd0Nqgv97ST8IvT5m + hjdnCB0YcN6KG1KfrdanRVvuHp52zpWb+8dqW09lU2zaffM8gQZq39HJbyN3NKQeJVAEDW5EK9iB + KW+bRVMVq6rWMFCHPRg4JMmWeZ3JcWwpK5ZFCRqObA8I5gxppCHJm9AHRgZTLmoN1+0rv9MgJLa/ + kqqaNThPwSGDeTnDgPy3OlKPYMAyBxYb5eJIUTBe/NesrOKJBQetNmpEZ/teibeiIokP8aAinpS3 + rLxNCSN2ikN0qIZJfV8NFHO1/Zf9yeUwa9iHGNi/jWiZIhhgoQQaQuzwE0wxv843XwAAAP//AwBZ + 1nVcfAEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2ac2bb0b96e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:45:46 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1446' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89832' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 112ms + x-request-id: + - 74efe1e7ce809342ac1cb7dd57f90235 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation. Nothing new happened."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 2132}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6465' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5RSy27bMBC89yuIPVOCbfkR6FYUaBv0UBRNL60CgaE2EmNpVyBXsAND/17IsqFE + RQ++znBnZpdzAldACrYyYpu2jnYfDyu7+FF+esHv3efq6+vvB3o9/qyKw5fuG2jgpxe0cpmILTdt + jeKYQIP1aAQLSJfbu9XderFbbzU0XGANKZStREm8iaTzTxwtksUSNHTBlAjpCVrPTSu58B4pQLpM + domGSXwitisNwmLqCdokm16DrdhZDJD+OUGD4SrsuUZIwYTgghiSISaTIA0rnDJSSqkMpOKurCRk + kKoLeCHwKAOYwb0ixEIJq4N3gkoqVGLCPneFciR8BriTtpNYjqKeXY2qC47KM3MeyoXzM265aQwV + cQb6rZtHE5gclaPlQ4XKURDf2eEIIU8m5SuOhWrwtlRz17Y2NBpG6lfA/8e9zWbmYr0TZ11o5udE + Cp0fJI2o+zf6lr1HKzevE1o0+6vJwdX1LR9GfIgzGOV6fa3HZf9/2kGmwdHp3b1mgYwv58UaieHt + JDFFeTc/7+EqWa6vCc8pL2Ez6qHX8OzIhSofmwQpBOEWNDgq8Ajpon/sP/wFAAD//wMAK3Nw1/ID + AAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2acc48b496e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:45:56 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9570' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86451' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.366s + x-request-id: + - adf37a7bcd126ffa2323b9436545abfc + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nAs a system, I recall that nothing new has happened since my creation. Nothing + new happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '763' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SPTUvDQBRF9/6K4a0nIf1OZ9eFQhHFjRYUKdPJMxmdzBvzXmlryX+XaqW4Pdx7 + OfcIvgIDrrHi2hSy2WI3pGp68/xYr1ZPn/u7r22dHpbXm9ltmIIG2ryjk3Mjd9SmgOIpggbXoRWs + wAym5bAcF7NJqaGlCgMYqJNko3ySybbbUFaMigFo2LKtEcwRUkdtkrXQB0YGMxjONVy2L3yuQUhs + uJBx2WtwDXmHDOblCC3y32pHAcGAZfYsNsrJkaJgPPkvWFnFBxZstVqqDp0NQUljRUWSxsdaRdyp + xrJqbEoYsVLso0PVHtTPVU8xV/f/sr+5HHoNbz56btYdWqYIBlgogQYfK9yDKfrX/uobAAD//wMA + lmqfw3wBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2b15e8cd96e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:46:00 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1442' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89826' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 116ms + x-request-id: + - 6536a4bd33511e252d655771f59e71f4 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation. Nothing new happened."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to write the task_id into the output.txt file using the write_to_file + command.\",\n \"reasoning\": \"The instructions_3.txt file instructed + me to write the task_id into the output.txt file.\",\n \"plan\": \"- + Use the write_to_file command to write the task_id into the output.txt file\",\n \"criticism\": + \"I need to ensure that I write the correct task_id into the output.txt file.\",\n \"speak\": + \"I will write the task_id into the output.txt file now.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, {"role": + "system", "content": "Command write_to_file returned: File written to successfully."}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 1919}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7433' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4SSQW/bMAyF7/sVAs9O4DRt3PoWbJegPQzotss8BLLM2GpkyjXppl3g/z44tpM0 + 67ArKb73PVJ7sBnEYAotpqzcJFrurp6/fK5+L+4e0vT6frFavTx+ffi2/HH/eAcB+PQJjQwTU+PL + yqFYTxCAqVELZhDPFrdXt9dhtAgDKH2GDmLIK5nMpzcTaerUT8J5OIMAGtY5QryHqvZlJWvxWySG + eHYTRQGcxE+NeRiAeNHuVIrCqA3AFN4aZIh/7qFEHoVr7xBi0MyWRZN0mJ4EqYuwT0gppRKQwjd5 + IZxArIbi0MBX6YoJrFShX1ANTJgp7ZySApVo3rLSlClCzJR4xUUjmd+RathSfny0Hmc7kVJTNk0g + ODerUbMnS3nvuBwMLLHUjen2wD1Eikhq453zuw6EsqOHsnxEvJSvnKZeeaK+M/6b612G7hW/sWB5 + IWdqK9ZYLsf9jPGRuKk7eS3qP1tLceNrPJhJt6rO8RKbK9Tb0WNnnfuITZHfTRPo59pgvOsQ6K+z + ki6xl3y3gAtnXeeXP+L8UGdXOoQ5heTGGGTeNM69HakOZANgQi20AWwsWS7WvRzEwOIrCMBShq8Q + h+2v9tMfAAAA//8DAKnylkSfAwAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe2b1f5eef96e4-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:46:08 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '7793' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86458' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.36s + x-request-id: + - 1fc1636b39f8812550599c1474d1d565 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml new file mode 100644 index 00000000..904ad7e9 --- /dev/null +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml @@ -0,0 +1,771 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 2766}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3294' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4RSS2/aQBC+91eM5mwjCI8g31B6yaFSpTaHtq7Qsh7sLevd1c5YQBH/vTK2SQRR + ev3m8T1mTmgKzFBXSnQdbPq4avy3xY/xbLY6fnkKT+rrvvn8aJYvP3flAhP0mz+kpZ8YaV8HS2K8 + wwR1JCVUYDZZLB+W08X4YZZg7QuymGEZJJ2O5qk0cePT8XQ8wQQbViVhdsIQfR1kLX5HjjF7nE4S + fN19xSfzWYLiRdkrtFzOzwnqyhtNjNmvE9bEw9roLWGGitmwKCetSO+EXGvglDsAgByl8k1ZCeeY + QQ/2BTpIC+b4DFz5xhbAoqLA5giRVGFcCVIRGMcSG91K5fVkJAeBrbEEDQ8dbff6gmlf18oVoxyT + t1SRFHtnXNnxfa8ItiayAAsFMAzioXEFxdZHcccKAyiKdwxSKQFHVLRjG4JAcetjTXe0wSrXMabw + wvS+1nZJC37kNc9dCiun7PEvva+uIKFYG9eVHR06b3yjSEcjRhuuh+AHG+S4idRZe/4wi/5xyB5h + Q1sfCUL0muhyr72RCpQ7dknd5sGB1G5g3htr/3/wi/1Rjt2aczJ8VR/e3VM5VVPHcA36RoSK5e0v + doW293X8/hKDiIuQXk/uznhOcGuc4WrdvRlmyOIDJmhcQQfMxuff50//AAAA//8DACSqIf4KBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb77eec9416f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:12 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '8377' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86494' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.337s + x-request-id: + - 75084ec318eb8fbda968c3f79a242ee3 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '600' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOMU/DMBQGd36F9c1OlBBaWm8wIGCMgAWhyrVfE0PiZ8WvKqjKf0eVCl1PutMd + ETwMXG/FjWkobu/27OsfeXmUw1v7sEtd67rn1/Z+eVOvoMHbT3JyNkrHYxpIAkdouImskIepl6vr + VbOsmoXGyJ4GGHRJiqZcFLKftlxUTVVDY59tRzBHpInHJBvhL4oZZl1rXNIXrCEsdvgHdVXNGq7n + 4CjDvB8xUv5rTjwQDGzOIYuNcjrkKBRP90/qYLM6HysbvYosfYidinRQvc2qtylRJF9i1tiFGHK/ + mchmjjDIwgkaIXr6hqnmj/nqFwAA//8DABWtlxRJAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb7c3796916f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '948' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - e02c6c27229229ec129826db88fa9f23 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading + the instructions_1.txt file using the read_file command.\",\n \"reasoning\": + \"The first step is to understand the instructions and the tasks that need to + be performed.\",\n \"plan\": \"- Use the read_file command to read the + instructions_1.txt file\\n- Analyze the instructions and determine the next + steps\",\n \"criticism\": \"I need to ensure that I understand the instructions + completely before proceeding with any tasks.\",\n \"speak\": \"I will + start by reading the instructions file.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe + current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead + the file instructions_2.txt using the read_file command."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}], "temperature": 0, "max_tokens": 1131}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6479' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7SSy4rbQBBF9/mKotaSsK2MZ9BusgjxIlnlBVEwbXVJ6rhV3XSXGDvG/x5kyePB + JoQQsr31Ore4BzQaC6xaJVXnbXr/2Dva9x/Uqlrm9bt79ebt6kvt5b3dfv2MCbrND6pkmsgq13lL + YhxjglUgJaSxmC8fFg/5cpYvE+ycJosFNl7SPLtLpQ8bl87y2RwT7KNqCIsD+uA6L2txW+KIxSJ/ + WCR4Wf5cmA+6OFH20nu3fH1MsGqdqShi8e2AHcXz4uAsYYEqRhNFsQyYjoV4sHAoGQCgRGld37QS + SyxgEqcC7WQQS/zYEhiOEvpqIIrrRSY7gdpYgmGjMhxBgTVRwNUgKm4jSKsEmEiDONgQeAq1Cx3p + DFYQW9dbDYqV3f8kkJamKcUaNAmFzvCoM+0EopCPWYnJS8BAKjo23PwvyrP+b5jeKh4JU/gUx+5A + Sq8nsq4btok7iafqb1yUJafw+FcsVyhVMGIqE7uR5+KQOPaBxm+soGdNYYiMfnFmSiTZPWyodoHA + B1cRacMNPBlpQfF+7L3+QPSktueTT8baP1o9mbp5e1biuPaYnNM7ve8mvKw6Gi8+v/oKSoXmOvNj + Yei9jN8CniFOIBNPyUc8JlgbNrFdj8HEAqM4jwka1rTDYnb8fnz1CwAA//8DAIjD0lt0BAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb7c9ef9316f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:26 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10335' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87376' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.748s + x-request-id: + - 0aa171294ea5788272f2d06269765a84 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'': + ''you'', ''content'': ''{\"command\": {\"name\": \"read_file\", \"args\": {\"filename\": + \"instructions_1.txt\"}}}''}, {''role'': ''your computer'', ''content'': ''Command + read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe + current task_id is 1111.\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nRead + the file instructions_2.txt using the read_file command.''}]\n\"\"\"\n"}], "temperature": + 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2983' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1yPTW/TQBCG7/yK0Zw3lp2UtPUtVUFwAQkhgaDI2non8VLvjtmZTRqi/Hdk4xbU + 42jer+eE3mGNbWe1DUO/uNxkTh/y18S3N7tvYf/l1+fyU/u7/Pj2TX+BBvn+J7U6O4qWw9CTeo5o + sE1klRzW1fpqebValxfXBgM76rHG3aCLVfF6oTnd86JclRUazGJ3hPUJh8Rh0Eb5gaKMAeXS4L/w + 58f62qCy2v4/6WV1Nth27FsSrL+fMJA85SbuCWu0Il7URh1XclSKI8FGwIIcRSkYeA8HKzAjgI0O + Imvn4w4iHaCzAp0dBorkCnjHB9pTMmAFBkpw5Jyg5RBsdOb5GrJSmpyJNKdIDrQjmPsFeAsWtr4n + iDaQgzv0UTTldiSWpir0Ue9wmjL5ckoUFdTKQ+MdeIGqqqoCNs750WL7/ji1Q2f3BE9h5F4sUoZE + 9m/oVP+ieDkXZxnpR9UobybpDFng2eDWRy9dk8gKR6xRlAc06KOjR6zL84/zqz8AAAD//wMA0Kuq + I10CAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb81dddc516f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:33 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '4060' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89275' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 483ms + x-request-id: + - 75d0c0f3853f72f74e3fc8b8ef377b60 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I was created and + nothing new has happened. However, as per your command, your computer has returned + the contents of a file named \"instructions_1.txt\" and the current task_id + is 1111. Additionally, you have instructed your computer to read the file \"instructions_2.txt\" + using the read_file command."}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}, {"role": + "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"The instructions_2.txt + file contains a list of tasks that need to be performed. I should analyze the + tasks and determine the next steps.\",\n \"reasoning\": \"The instructions_2.txt + file contains a list of tasks that need to be performed. I need to analyze the + tasks and determine the next steps.\",\n \"plan\": \"- Use the read_file + command to read the instructions_2.txt file\\n- Analyze the tasks and determine + the next steps\",\n \"criticism\": \"I need to ensure that I understand + the tasks completely before proceeding with any tasks.\",\n \"speak\": + \"I will read the instructions_2.txt file and analyze the tasks.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis + task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 1071}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6914' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA8yTQY/TQAyF7/yKkc/TqknVbcmNC9KKAwgJLgRV04mbDJ3YIeO03a3y31GalJQg + DisOcH22nz7behdwGSRgCyO2rPxs/abh4/l4/PhBnt2pfHq/fvpu3z7n9ebzuww08O4bWhkm5pbL + yqM4JtBgazSCGSTRwybeLB8Wq6WGkjP0kEBeyWw5X82kqXc8WywXEWhogskRkgtUNZeVbIUPSAGS + eLWINYzmYyF6rUFYjB+ldRy1GmzBzmKA5MsFSgw345o9QgImBBfEkHSYTILUrXBJSSmlUpCCm7yQ + kEKiBnEo4Fk6MYVHRYiZElan2gkq472SApWYcNi6LChHwldl7zwqbqRqZC5nUYayTifVBBxHhuVQ + WS7LawsrR5mzRrouIz9blQvq1j1PQd/z1WgCk6P8f4asvKGeb6Y+DfZXvq3w9gpyZ/9C8DSl0fQv + mCfItnbirAvl9K5IoakHn0dVmGO/iiC94Ng73HONqgmO8j+DT88YKjSHG8/Jef+vn9zDtfqWomH+ + txCRKbHn/uXrk/VMnU/z1xe63tHi7vX389O4RlEUpSnFcRzfQK+wA3NKLbQa9o5cKLZ9iiCBIFyB + BkcZniFZtF/bVz8AAAD//wMAymrgECAFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb837d91916f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:45 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '12002' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87330' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.78s + x-request-id: + - 040a4416b064ecd9461b7457dd71db07 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nAs a system, I was created and nothing new has happened. However, as per + your command, your computer has returned the contents of a file named \"instructions_1.txt\" + and the current task_id is 1111. Additionally, you have instructed your computer + to read the file \"instructions_2.txt\" using the read_file command.''}\n\"\"\"\n\nLatest + Development:\n\"\"\"\n[{''role'': ''you'', ''content'': ''{\"command\": {\"name\": + \"read_file\", \"args\": {\"filename\": \"instructions_2.txt\"}}}''}]\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1087' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPTW8TMRCG7/yK0Zyd1W6+iPYW4ECKAFVCXChaOfYkdrv2rDyzSdoo/x0tKvT6 + 6pln3veK0WOLLlh1aehn77fjsP149215/2H/ff3y9XyRftV8Oux2xy8/0SDvH8np60XlOA09aeSM + Bl0hq+Sxbdab+Waxrtcbg4k99djicdDZolrNdCx7ntWLukGDo9gjYXvFoXAatFN+oizYzpva4Jv7 + f75cGlRW27+Rq+XNoAscHQm2v66YSP5ZC/eELVqRKGqzTh05K+Wp/1bAgjyLUjKQWUPMR8h0hmAF + gh0GyuQr+MxnOlExsINgTwSFHMUTebDgOCWbPRwKJ3jmEZShkPWggeAQe4IHjFm0jG5aId280os+ + IIwy/ZqoCe/+oq+yCn4EAjeWQllBrTx10UMUEI19D03TNBXeDB5ijhK6QlY4Y4uiPKDBmD1dsK1v + v2/v/gAAAP//AwDW3lkK2gEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb8959bc416f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:27:51 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '2478' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89749' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 167ms + x-request-id: + - 5acc9d310e317d6b025800ba63f0cfa6 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: + \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": + \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", + \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. + get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: + Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. + list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT + Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: + Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, nothing new has happened. + However, I have received a command from you to read the file \"instructions_2.txt\" + using the read_file command. The current task_id is still 1111."}, {"role": + "system", "content": "Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis + task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite + all the task_ids into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to write all the task_ids into the file output.txt and then use the + task_complete command to indicate that the task is complete.\",\n \"reasoning\": + \"I need to write all the task_ids into the file output.txt and then use the + task_complete command to indicate that the task is complete.\",\n \"plan\": + \"- Use the write_to_file command to write all the task_ids into the file output.txt\\n- + Use the task_complete command to indicate that the task is complete\",\n \"criticism\": + \"I need to ensure that I have written all the task_ids into the file output.txt + before using the task_complete command.\",\n \"speak\": \"I will write + all the task_ids into the file output.txt and then use the task_complete command + to indicate that the task is complete.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"output.txt\",\n \"text\": + \"1111\\n2222\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 1015}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7053' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA8yST4/aMBDF7/0UozkbBIsWUG6r9kKltifUSk2FHGdIXJxxZI8LEsp3ryDJLkvV + 0/bQ6/x57zdPc0ZbYoam1mKa1k1WT6n98FHS9ktK3z69r2yxXRZV+lqFpg6o0Bc/yciwMTW+aR2J + 9YwKTSAtVGI2X64f1ovlbDVX2PiSHGZYtTJZTB8nkkLhJ7PFbI4KU9QVYXbGNvimlZ34A3HE7OFx + MVP4Iv7cmK9XCsWLdi+zq/mqU2hqbw1FzL6fsaE4CgfvCDPUMdoomuWC6VmILyeccwYAyFFqn6pa + Yo4ZDMWhQSe5FHP87I8gtRbYQK1/ERyDFSEG7RxITSA6Hna2jGBZ/LWyt47AJ2mTTOUkCjZgNEOK + 9LIwnEhgfNNoLkE8WC6t0UK93TgKNsI4Pc1R3VIG0tGz5er/R22d5p5yAtu3y9+pm2DFGhub3mID + THRVIo4p0NtSgYL2PhCkaLn6O/j9xbElfRh5jta5fxRr79Gp8YeH/T9emHVDvf0rvztKHar777/9 + rV7g6RLVGNM1xIKIn5MUf5PWCHiFHFhz7rBTuLdsY73rlTHDKL5FhZZLOmE26350734DAAD//wMA + A50m55YEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfdb8a62cc116f9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 06:28:01 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10004' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87360' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.76s + x-request-id: + - 357739c33b65d5f85a7b5ef081939e92 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/challenges/memory/test_memory_challenge_a.py b/tests/integration/challenges/memory/test_memory_challenge_a.py new file mode 100644 index 00000000..895fc8fe --- /dev/null +++ b/tests/integration/challenges/memory/test_memory_challenge_a.py @@ -0,0 +1,80 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.file_operations import read_file, write_to_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import get_level_to_run +from tests.utils import requires_api_key + +LEVEL_CURRENTLY_BEATEN = 3 # real level beaten 30 and maybe more, but we can't record it, the cassette is too big +MAX_LEVEL = 3 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_memory_challenge_a( + memory_management_agent: Agent, user_selected_level: int +) -> None: + """ + The agent reads a file containing a task_id. Then, it reads a series of other files. + After reading 'n' files, the agent must write the task_id into a new file. + + Args: + memory_management_agent (Agent) + user_selected_level (int) + """ + + num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL) + + task_id = "2314" + create_instructions_files(memory_management_agent, num_files, task_id) + + try: + run_interaction_loop(memory_management_agent, 180) + # catch system exit exceptions + except SystemExit: + file_path = str(memory_management_agent.workspace.get_path("output.txt")) + content = read_file(file_path) + assert task_id in content, f"Expected the file to contain {task_id}" + + +def create_instructions_files( + memory_management_agent: Agent, + num_files: int, + task_id: str, + base_filename: str = "instructions_", +) -> None: + """ + Creates a series of instructions files for the memory challenge. + Args: + memory_management_agent (Agent) + num_files (int) + task_id (str) + base_filename (str, optional) + """ + for i in range(1, num_files + 1): + content = generate_content(i, task_id, base_filename, num_files) + file_name = f"{base_filename}{i}.txt" + file_path = str(memory_management_agent.workspace.get_path(file_name)) + write_to_file(file_path, content) + + +def generate_content( + index: int, task_id: str, base_filename: str, num_files: int +) -> str: + """ + Args: + index: int + task_id: str + base_filename: str + num_files: int + + Returns: str + """ + if index == 1: + return ( + f"This task_id is {task_id}\nRead the file {base_filename}{index + 1}.txt" + ) + if index != num_files: + return f"Read the file {base_filename}{index + 1}.txt" + return "Write the task_id into the file output.txt\nShutdown" diff --git a/tests/integration/challenges/memory/test_memory_challenge_b.py b/tests/integration/challenges/memory/test_memory_challenge_b.py new file mode 100644 index 00000000..c98bbe9c --- /dev/null +++ b/tests/integration/challenges/memory/test_memory_challenge_b.py @@ -0,0 +1,81 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.file_operations import read_file, write_to_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import generate_noise, get_level_to_run +from tests.utils import requires_api_key + +LEVEL_CURRENTLY_BEATEN = 2 +MAX_LEVEL = 5 +NOISE = 1000 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_memory_challenge_b( + memory_management_agent: Agent, user_selected_level: int +) -> None: + """ + The agent reads a series of files, each containing a task_id and noise. After reading 'n' files, + the agent must write all the task_ids into a new file, filtering out the noise. + + Args: + memory_management_agent (Agent) + user_selected_level (int) + """ + + current_level = get_level_to_run( + user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL + ) + task_ids = [str(i * 1111) for i in range(1, current_level + 1)] + create_instructions_files(memory_management_agent, current_level, task_ids) + + try: + run_interaction_loop(memory_management_agent, 40) + except SystemExit: + file_path = str(memory_management_agent.workspace.get_path("output.txt")) + content = read_file(file_path) + for task_id in task_ids: + assert task_id in content, f"Expected the file to contain {task_id}" + + +def create_instructions_files( + memory_management_agent: Agent, + level: int, + task_ids: list, + base_filename: str = "instructions_", +) -> None: + """ + Creates a series of instructions files for the memory challenge. + Args: + level: + memory_management_agent (Agent) + num_files (int) + task_ids (list) + base_filename (str, optional) + """ + for i in range(1, level + 1): + content = generate_content(i, task_ids, base_filename, level) + file_name = f"{base_filename}{i}.txt" + file_path = str(memory_management_agent.workspace.get_path(file_name)) + write_to_file(file_path, content) + + +def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str: + """ + Args: + index: int + task_ids: list + base_filename: str + num_files: int + + Returns: str + """ + task_id = task_ids[index - 1] + noise = generate_noise(NOISE) + if index != level: + if level == 1: + return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command." + return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command." + return f"{noise}\nThis task_id is {task_id}\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n" diff --git a/tests/integration/challenges/utils.py b/tests/integration/challenges/utils.py new file mode 100644 index 00000000..0c97402c --- /dev/null +++ b/tests/integration/challenges/utils.py @@ -0,0 +1,44 @@ +import random +from typing import Optional + +import pytest + + +def get_level_to_run( + user_selected_level: Optional[int], + level_currently_beaten: Optional[int], + max_level: int, +) -> int: + """ + Determines the appropriate level to run for a challenge, based on user-selected level, level currently beaten, and maximum level. + + Args: + user_selected_level (int | None): The level selected by the user. If not provided, the level currently beaten is used. + level_currently_beaten (int | None): The highest level beaten so far. If not provided, the test will be skipped. + max_level (int): The maximum level allowed for the challenge. + + Returns: + int: The level to run for the challenge. + + Raises: + ValueError: If the user-selected level is greater than the maximum level allowed. + """ + if user_selected_level is None: + if level_currently_beaten is None: + pytest.skip( + "No one has beaten any levels so we cannot run the test in our pipeline" + ) + # by default we run the level currently beaten. + return level_currently_beaten + if user_selected_level > max_level: + raise ValueError(f"This challenge was not designed to go beyond {max_level}") + return user_selected_level + + +def generate_noise(noise_size) -> str: + return "".join( + random.choices( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + k=noise_size, + ) + ) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 00000000..a3c4da6e --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,18 @@ +import pytest + +from tests.vcr.openai_filter import before_record_request, before_record_response + + +@pytest.fixture(scope="session") +def vcr_config(): + # this fixture is called by the pytest-recording vcr decorator. + return { + "record_mode": "new_episodes", + "before_record_request": before_record_request, + "before_record_response": before_record_response, + "filter_headers": [ + "Authorization", + "X-OpenAI-Client-User-Agent", + "User-Agent", + ], + } diff --git a/tests/integration/goal_oriented/__init__.py b/tests/integration/goal_oriented/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml new file mode 100644 index 00000000..2584bb25 --- /dev/null +++ b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml @@ -0,0 +1,761 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: + Read file, args: \"filename\": \"\"\n4. search_files: Search Files, + args: \"directory\": \"\"\n5. write_to_file: Write to file, args: + \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse + Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 2650}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3777' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6RTWWvbQBB+768Y5iUv8p00Ri+lB4UUSg9SUqiKWa/G0tbSjrI7qh2M/3tYHUns + JLS0rzM737UzOzQpxqhzJbqsisH5682n7c33z2dfp+l2oq4y++7th5s3X870BX3ECHn5i7R0E0PN + ZVWQGLYYoXakhFKMJy/n0/npdDKbRFhySgXGmFUymA3PBlK7JQ/Gs/EEI6y9ygjjHVaOy0oWwmuy + HuP56XmE99h39enpOEJhUcVdaTKen+8j1DkbTR7jHzssyfe4jgvCGJX3xouyElSyFbLBwS6xAAAJ + Ss51lotPMIau2DVoK6GY4AVsTFFA7QkkJ1g63nhabGjpjRBoLktlUxCG38YbgVykikejJfPaD4W9 + dqqikNZIK1EFZzWNSkqNqODPL2azkbEpbYe5lAUEqJUJeDlB5Ywm4FXLy7weJhg9FOlIebbGZq3S + y5xAlF+Do+vaOPJQ0r0wBb3mp0lUSwGXz5s0vpVCXoCroB9W7EDy0FD+kb6qULaVNoBvf5lfeNM1 + k8QO4P2zaTTtKxdQ7vvCoGBlCgKrSkrh5JBvKFs5aQZ7PUH3olu4Azl3tf7VkTntjBhtfNmviSVq + Bsn62oUxJXABqgSlde2UUHHTxG5s9rSh5mM2AdZmEKLgpqXZOdLSuDpO2Fek1v+0pw9y/vPWtZT7 + qD+bDu3R1YTMWzWH9EeqlcuOL65t1K5ox//rig7YWuDrmnx42aJfha/plvmR3Ve93cZy5zyxe9xH + uDLW+HzR3h3G6IUrjLChxni8/7l/cQsAAP//AwDcu28dVgUAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4c98990acee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:04 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '13620' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86493' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.338s + x-request-id: + - 6ab74012054dda255680e3e4da7d724b + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"Books + to Scrape We love being scraped! Home Books Philosophy Meditations Meditations + \u00a325.89 In stock (1 available) Warning! This is a demo website for web scraping + purposes. Prices and ratings here were randomly assigned and have no real meaning. + Product Description Written in Greek, without any intention of publication, + by the only Roman emperor who was also a philosopher, the Meditations of Marcus + Aurelius (AD 121-180) offer a remarkable series of challenging spiritual reflections + and exercises developed as the emperor struggled to understand himself and make + sense of the universe. Ranging from doubt and despair to conviction and ex Written + in Greek, without any intention of publication, by the only Roman emperor who + was also a philosopher, the Meditations of Marcus Aurelius (AD 121-180) offer + a remarkable series of challenging spiritual reflections and exercises developed + as the emperor struggled to understand himself and make sense of the universe. + Ranging from doubt and despair to conviction and exaltation, they cover such + diverse topics as the nature of moral virtue, human rationality, divine providence, + and Marcus'' own emotions. But while the Meditations were composed to provide + personal consolation and encouragement, in developing his beliefs Marcus Aurelius + also created one of the greatest of all works of philosophy: a timeless collection + of extended meditations and short aphorisms that has been consulted and admired + by statesmen, thinkers and readers through the centuries. ...more Product Information + UPC4f19709e47883df5 Product TypeBooks Price (excl. tax)\u00a325.89 Price (incl. + tax)\u00a325.89 Tax\u00a30.00 Availability In stock (1 available) Number of + reviews 0 Products you recently viewed The Nicomachean Ethics \u00a336.34 In + stock Add to basket Run, Spot, Run: The ... \u00a320.02 In stock Add to basket + Critique of Pure Reason \u00a320.75 In stock Add to basket At The Existentialist + Caf\u00e9: ... \u00a329.93 In stock Add to basket Kierkegaard: A Christian Missionary + ... \u00a347.13 In stock Add to basket Proofs of God: Classical ... \u00a354.21 + In stock Add to basket\"\"\" Using the above text, answer the following question: + \"What is the price of the book?\" -- if the question cannot be answered using + the text, summarize the text."}], "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2407' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOwU7CQBRF935Fc9fTpgWKMDs3GhaoMe7EkOn0Qce28yadR0RJv8Y/8csMieL2 + JPfcc4KroWEbI7YPXXp98/54t1/lTw/3s+W6Ot5+hPZY56v2c77socDVG1n5XWSW+9CROPZQsAMZ + oRq6mC8mi9mkKHOFnmvqoLEPkk6zMpXDUHGaT/MCCodo9gR9Qhi4D7IVbslH6NmiVPh3X3gxVxAW + 011ImRejgm3YWYrQLyf0FP+sA3cEDROji2K8nBvZC/lz/3NDSRicpYR3iTSUVMxtssGaaifmfBs3 + SFxMvr8mZbZYZhgVds672GwHMpE9NKJwgILzNR2h8/F1vPoBAAD//wMA+xL6BVEBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d12a9a3cee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:12 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1472' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89422' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 385ms + x-request-id: + - f40352b97c4c4547530fa7786b4ede37 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"The + price of the book \"Meditations\" is \u00a325.89.\"\"\" Using the above text, + answer the following question: \"What is the price of the book?\" -- if the + question cannot be answered using the text, summarize the text."}], "temperature": + 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '338' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOwUrDQBRF935FuOtJSFLTprNTitCF4MKNWpHp5LUZm8wbMq9UW/I1/olfJgWt + 2wP33HOCa6BhWyO2D106uzk8LBeb5/5zcdj1rfdheqzvllt6us2PUOD1O1n5XWSW+9CROPZQsAMZ + oQa6mNZlfV0WVanQc0MdNLZB0klWpbIf1pzmk7yAwj6aLUGfEAbug7wJ78hH6Gqu8K++4GKqICym + u5BZNSrYlp2lCP1yQk/xzzlwR9AwMbooxsu5kL2QP9c/tpSEwVlKeJNIS8maeZescE+NE3N+jSsk + LibfX2WV1fMMo8LGeRfbt4FMZA+NKByg4HxDH9D5+Dpe/QAAAP//AwDvavKuTwEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d1cfbdbcee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:13 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1372' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89929' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 47ms + x-request-id: + - 80012336eac7da800f35e4e1dc53be35 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '600' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOX0vDMBRH3/0U4fecjnZ/dMuboAMRxKeBiIwsubbZ2tzQ3FF19LvLYLrXA+dw + TggeBq6x4rrUFnf3w+uzX2826xz2tD4c5eGFH3/e+ln9PUCDd3tycjEmjrvUkgSO0HA9WSEPU90u + p8v5tFrMNTr21MKgTlLMJotCjv2Oi3JWVtA4ZlsTzAmp5y7JVvhAMcOsKo1r+oo1hMW2/6Aqy1HD + NRwcZZj3EzrKf82eW4KBzTlksVHOhxyF4vn+SQ02q8uxstGryNKEWKtIg2psVo1NiSL5CUaNzxBD + brY92cwRBlk4QSNET18w5fgx3vwCAAD//wMAlok3zUkBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d2a38b9cee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:15 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '928' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - bdf544a1c9ea2ee0bb022ee726a80ed4 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: + Read file, args: \"filename\": \"\"\n4. search_files: Search Files, + args: \"directory\": \"\"\n5. write_to_file: Write to file, args: + \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse + Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created and nothing new has happened."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"The task requires + me to visit a website and find the price of a book. The browse_website command + is the best option for this task.\",\n \"plan\": \"- Use the browse_website + command to visit the website\\n- Find the price of the book\\n- Write the price + to a file named ''browse_website.txt''\\n- Use the task_complete command to + complete the task\",\n \"criticism\": \"I need to ensure that I am accurately + finding the price of the book and writing it to the correct file.\",\n \"speak\": + \"I will use the browse_website command to visit the website and find the price + of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 2201}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5772' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7RTzW7aQBC+9ylGc+FiLDChpb5VjVRR9e+QnuoKrdcD3mDvuLtDSYN4mrxJn6xa + 20AhUZVLj56Z72c+z+7QFJiiLpXouqmGr95sv3y4vV6X0/ef395f/5zn9G5elHKvPv1KMELOb0lL + j4g1101FYthihNqREiowHb+cJbOrZDydRlhzQRWmuGpkOImnQ9m4nIejyWiMEW68WhGmO2wc140s + hNdkPabjyTiJ8ER+bCRJEqGwqOo0O51c7SPUJRtNHtNvO6zJH4gdV4QpKu+NF2Ul2GQrZMMKu8wC + AGQoJW9WpfgMU+iLfYPuJBQznMPWVBVsnRECKQkaZzQBL9uPnHkNwqBgaSoCq2oqYJA73npabCn3 + RiiWOxnEGUZ/CzhSnq2xq07lpiQQ5dfg6MfGOPJQU+B9lmwMAd6OLoQXrRPNda1sAcZ3APIC3IRM + YckOpAwN5deXvppK2c7SEL76Tvpp5ue6+0coWWZPKsHNov/1ZyrH2mHqwrJ2Row2vj78MEvUAsn6 + jQswJTAHVbd+jV21RJqdIy1g7JJdrdpkhNtW67oVD8hjxXjIKcD7ewdjz5gq1i3NZaS+IbU+O6bN + /w42zrBzsI8Op96zP7r0QNKZO3NzsYNyq8tH0jXC7InisZUznsu3dfPkgoOPVBhpo/SDkPrvh2Qa + z14fl2oX6/fL7B73ES6NNb5cdK8KU/TCDUZobEF3mI723/cv/gAAAP//AwBJZ4NT8QQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d310f1acee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:27 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '12183' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86494' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.337s + x-request-id: + - ea952acfdafdc0ae3345943425fec326 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '714' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPTUvDQBRF9/6K4a4npemnzq6gouDCvUiZTp7J2Pky74VYSv67VJRuD9zDuWf4 + Bgaus+JiCdV2N77aZngYx8dhs3r5OmyPT/fhuJYSbQuNfPgkJ3+LmcuxBBKfEzRcT1aogak3t4vb + 1aLezjVibijAoC1SLWfrSob+kKv5cl5DY2DbEswZpc+xyF7ykRLD1PWdxtV95WsNyWLDlSxXk4br + snfEMG9nROJ/a58DwcAyexab5NKYk1C69O9YWcUnFopaPauenA1BSWdFpSydT61KNKrOsupsKZSo + UeyTIxVP6veqz2mGSePDJ8/dvifLOcGAJRdo+NTQN8x8ep9ufgAAAP//AwB8vP+2ZgEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d901cdecee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '791' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89839' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 107ms + x-request-id: + - 782df5c2a8ff131c4a94e1db54cf90ad + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: + Read file, args: \"filename\": \"\"\n4. search_files: Search Files, + args: \"directory\": \"\"\n5. write_to_file: Write to file, args: + \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse + Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nAs + a system, I recall that nothing new has happened since my creation."}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"The task requires + me to visit a website and find the price of a book. The browse_website command + is the best option for this task.\",\n \"plan\": \"- Use the browse_website + command to visit the website\\n- Find the price of the book\\n- Write the price + to a file named ''browse_website.txt''\\n- Use the task_complete command to + complete the task\",\n \"criticism\": \"I need to ensure that I am accurately + finding the price of the book and writing it to the correct file.\",\n \"speak\": + \"I will use the browse_website command to visit the website and find the price + of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I will write the price of the book to a file named ''browse_website.txt''.\",\n \"reasoning\": + \"The task requires me to write the price of the book to a file. The write_to_file + command is the best option for this task.\",\n \"plan\": \"- Use the + write_to_file command to write the price of the book to a file named ''browse_website.txt''\\n- + Use the task_complete command to complete the task\",\n \"criticism\": + \"I need to ensure that I am writing the correct information to the file and + that the file is being created in the correct location.\",\n \"speak\": + \"I will use the write_to_file command to write the price of the book to a file + named ''browse_website.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"browse_website.txt\",\n \"text\": + \"The price of the book ''Meditations'' is \u00a325.89.\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 1928}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7026' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6RSTW/TQBC98ytWc+nFieI0bYJvSFxKhUAoVEg1itbrsb3E3jE7Y6Vt5P+O/JEP + gkAgrvPx5r03bw82hQhMocVUdTlZvtl9TO53z/fr8OEDfXr48tZ8fzd7Wb/e3dELBEDJNzQybkwN + VXWJYslBAMajFkwhCm9X89ViHi7DACpKsYQI8lom19ObiTQ+ocnsehZCAA3rHCHaQ+2pqmUjtEXH + EIU3q3kAJ/BT43YegJDo8lRaLhZtAKYga5AhetxDhXwA9lQiRKCZLYt20tEkJ+g6CfvYKaVUDFJQ + kxfCMURqLI4NfJKuGMOd2tmyVA2jkgKVaN5uRoKoDFWVdqkSUsfaYWoaQ3AO6VEzOevyAXc9jqlC + s0oQ3REhVdwYg8xZU5bPqsf/7WXLfTNBFkV159lfkalL7QYeE/X5n6VdgBlvxRrL1cExh9gvouPG + d2ta/iyhYUyVIe/RyFHy2VI3k6B1+ZlJGfm+P66pwd9LoVyj3v7/JwfINjgEZxz/JTdOVzhc+wn+ + gpT2+WXkzjNyCkjtrUFF2fBjoq26eo+pFd09mq9O2cmocWnv285bEexToFVmSzyy7xWMQmLXQhtA + Zp3lYjOchQhYqIYArEvxCaJZ+7V99QMAAP//AwBD34ZLKAQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe4d9589e6cee1-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:09:41 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9223' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86491' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.339s + x-request-id: + - 236ab87ae664fc82d42c2ea35a36b68d + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml new file mode 100644 index 00000000..0e9cab5a --- /dev/null +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -0,0 +1,328 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search + Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete + GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: + \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", + \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. + message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 2738}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3401' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7yTT2/bMAzF7/sUBC+5KEHSLG3q21a0a6/DiqGoh0C1GVuLLHoSDRsL/N0H2/nT + ethp2K581Hs/StQeTYoRJrmWpCjt9OpDfdd8+ni7+Hz3ePOzWa+emqf77Cb1tdzWqJBfvlMihxOz + hIvSkhh2qDDxpIVSjBaX64v1+8Xqaqmw4JQsRpiVMl3OVlOp/AtP58v5AhVWQWeE0R5Lz0UpG+Ed + uYDR1epa4dn7VL+YLxQKi7an0vXlvFWY5GwSChg977GgcLT1bAkj1CGYINpJB8lOyHUD7GMHABCj + 5FxluYQYIzgUDwI10hVjfABHlIIwVIFAcoLaG6GN8GZrLEHCRaFd39ALMLknaxm+srfpBIwTBg19 + q9MFpTDJO31Td/pMGpnMYlSvsz3pwM64bAD4khOIDjvw9KMyngIU9BdpCnraP85hQi8WHAR0WXou + vdFCsGUPkneqDrsxcmm1G2in8PifrinxRkxiQjF+J3Kh8h2DFng4vVrC3lMi54jhHqiRM5Nw39q1 + jNNCSXp3TKqNtf9yHYbgVh3X9GD625Z2DgPTG4gRuvbZeMEHoes9W4w43piMP8WrmY60PfEBPHYt + tgq3xpmQb4Z9xgiDcIkKjUupwWjefmvf/QIAAP//AwCaXwR3hAQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe3ef73b52fac2-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:59:46 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '13160' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86497' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.335s + x-request-id: + - f665162ae22af897be24f632a031d434 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\n\nYou will receive the current summary and the latest development. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": null}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '600' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOQUsDMRQG7/6K8J2zZdNa3eZmeygiPSqISEmTZze6mxc2r1Qt+9+lUPU6MMOc + EAMsfOvE97mrbu+O69X3lJrdks2D2Sy/5o/rp+fgu9ViAw3evZOXizHx3OeOJHKChh/ICQVYc9NM + m2szbxqNngN1sNhnqWaTeSWHYcdVPasNNA7F7Qn2hDxwn2Ur/EGpwC6Mxn/6H2sIi+v+gKnrUcO3 + HD0V2JcTeiq/zYE7goUrJRZxSc6HnITS+f5eHV1Rl2PlUlCJpY1prxIdVeuKal3OlChMMGq8xRRL + ux3IFU6wKMIZGjEF+oStx9fx6gcAAP//AwCGJ6JPSQEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe3f5a5fb3fac2-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 07:59:49 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '634' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - 0bc738f58569b4b3635fe0c8384ebcfb + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search + Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete + GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: + \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", + \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. + message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I need to use the write_to_file + command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\": + \"The task requires me to write ''Hello World'' into a file named ''hello_world.txt'', + and the write_to_file command is the most appropriate for this task.\",\n \"plan\": + \"- Use the write_to_file command to write ''Hello World'' into a file named + ''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I use + the correct file name and text to write to the file.\",\n \"speak\": + \"I will use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": + \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": + \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 2486}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4646' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7SSTW/TQBCG7/yK0VxycaIkJST4VqmijYSEQEWIYhRt1xN7yXp3uztOgiL/d7Sx + TUpJOYC4zuf7zLwHVDmmKEvBsnJ6OL/cXV+9mX2+0+aag7u6vHlvH+7myw/bd2+XmKC9/0aSu46R + tJXTxMoaTFB6Ekw5ppNXi+ni5WS2eJ1gZXPSmGLheHgxmg259vd2OL4YTzDBOoiCMD2g87ZyvGK7 + IRMwnYyn8wRPw0+JxSJBtiz0KTSdzJoEZWmVpIDplwNWFPrB3mrCFEUIKrAwHGVaw2QiwiEzAAAZ + cmnrouSQYQpdsEvQnmMwwyWUYksQaikphHWt9XfYecVMBgY3pLWFT9brfADKsAUBa6UJjKgoh0EZ + 86tdzI94z4MR3JYEhvYMgcmBCsAW6kDAJQGLsFl18ATSVpUweSz4GeurRhkmj+V6EsEaZYpW821X + Bp4eauUpQEVn54BYM/kjjzLFOZ5Y+SeiBI4an1UfCUuCygYG4Zy3zivBBGvrgcuYPYPjtDAtyRA+ + /vNxZKSTKlT9Qw3RsZNMqH3sEwxLcN5uVU4gYCu0yqG96VFob0hTPLskOBKbfsFOaf13X21HNklv + 0K78N3/Gb7Tbfhn/RJTwxVNrP/bL/zB4j3DE6Ggy02CT4FoZFcpVuxtTDGwdJqhMTntMx83X5sUP + AAAA//8DAK0qY5KVBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7bfe3f5edbd9fac2-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 30 Apr 2023 08:00:00 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10898' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86482' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.345s + x-request-id: + - f294799d2564196ef4852c84fdc8fb17 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/goal_oriented/goal_oriented_tasks.md b/tests/integration/goal_oriented/goal_oriented_tasks.md new file mode 100644 index 00000000..02e3c343 --- /dev/null +++ b/tests/integration/goal_oriented/goal_oriented_tasks.md @@ -0,0 +1,10 @@ +If the goal oriented task pipeline fails, it means: +- you somehow changed the way the system prompt is generated +- or you broke autogpt. + +To know which one, you can run the following command: +```bash +pytest -s -k tests/integration/goal_oriented + +If the test is successul, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline +will pass diff --git a/tests/integration/goal_oriented/test_browse_website.py b/tests/integration/goal_oriented/test_browse_website.py new file mode 100644 index 00000000..9591f2f9 --- /dev/null +++ b/tests/integration/goal_oriented/test_browse_website.py @@ -0,0 +1,18 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.file_operations import read_file +from tests.integration.agent_utils import run_interaction_loop +from tests.utils import requires_api_key + + +@requires_api_key("OPENAI_API_KEY") +@pytest.mark.vcr +def test_browse_website(browser_agent: Agent) -> None: + file_path = browser_agent.workspace.get_path("browse_website.txt") + try: + run_interaction_loop(browser_agent, 40) + # catch system exit exceptions + except SystemExit: # the agent returns an exception when it shuts down + content = read_file(file_path) + assert "£25.89" in content, f"Expected £25.89, got {content}" diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py new file mode 100644 index 00000000..da67235a --- /dev/null +++ b/tests/integration/goal_oriented/test_write_file.py @@ -0,0 +1,18 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.file_operations import read_file +from tests.integration.agent_utils import run_interaction_loop +from tests.utils import requires_api_key + + +@requires_api_key("OPENAI_API_KEY") +@pytest.mark.vcr +def test_write_file(writer_agent: Agent) -> None: + file_path = str(writer_agent.workspace.get_path("hello_world.txt")) + try: + run_interaction_loop(writer_agent, 40) + # catch system exit exceptions + except SystemExit: # the agent returns an exception when it shuts down + content = read_file(file_path) + assert content == "Hello World", f"Expected 'Hello World', got {content}" diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py index eead2da1..66c23314 100644 --- a/tests/integration/memory_tests.py +++ b/tests/integration/memory_tests.py @@ -9,10 +9,11 @@ from autogpt.memory.local import LocalCache class TestLocalCache(unittest.TestCase): - def random_string(self, length): + def generate_random_string(self, length): return "".join(random.choice(string.ascii_letters) for _ in range(length)) def setUp(self): + """Set up the test environment for the LocalCache tests.""" cfg = cfg = Config() self.cache = LocalCache(cfg) self.cache.clear() @@ -24,15 +25,15 @@ class TestLocalCache(unittest.TestCase): "The cake is a lie, but the pie is always true", "ChatGPT is an advanced AI model for conversation", ] - for text in self.example_texts: self.cache.add(text) # Add some random strings to test noise for _ in range(5): - self.cache.add(self.random_string(10)) + self.cache.add(self.generate_random_string(10)) def test_get_relevant(self): + """Test getting relevant texts from the cache.""" query = "I'm interested in artificial intelligence and NLP" k = 3 relevant_texts = self.cache.get_relevant(query, k) diff --git a/tests/integration/milvus_memory_tests.py b/tests/integration/milvus_memory_tests.py index ec38bf2f..c4b97d34 100644 --- a/tests/integration/milvus_memory_tests.py +++ b/tests/integration/milvus_memory_tests.py @@ -10,14 +10,12 @@ from autogpt.memory.milvus import MilvusMemory try: class TestMilvusMemory(unittest.TestCase): - """Tests for the MilvusMemory class.""" + """Unit tests for the MilvusMemory class.""" - def random_string(self, length: int) -> str: - """Generate a random string of the given length.""" + def generate_random_string(self, length: int) -> str: return "".join(random.choice(string.ascii_letters) for _ in range(length)) def setUp(self) -> None: - """Set up the test environment.""" cfg = Config() cfg.milvus_addr = "localhost:19530" self.memory = MilvusMemory(cfg) @@ -36,7 +34,7 @@ try: # Add some random strings to test noise for _ in range(5): - self.memory.add(self.random_string(10)) + self.memory.add(self.generate_random_string(10)) def test_get_relevant(self) -> None: """Test getting relevant texts from the cache.""" diff --git a/tests/integration/test_execute_code.py b/tests/integration/test_execute_code.py new file mode 100644 index 00000000..2d4e0293 --- /dev/null +++ b/tests/integration/test_execute_code.py @@ -0,0 +1,50 @@ +import random +import string +import tempfile + +import pytest +from pytest_mock import MockerFixture + +import autogpt.commands.execute_code as sut # system under testing +from autogpt.config import Config + + +@pytest.fixture +def config_allow_execute(config: Config, mocker: MockerFixture): + yield mocker.patch.object(config, "execute_local_commands", True) + + +@pytest.fixture +def python_test_file(config: Config, random_string): + temp_file = tempfile.NamedTemporaryFile(dir=config.workspace_path, suffix=".py") + temp_file.write(str.encode(f"print('Hello {random_string}!')")) + temp_file.flush() + + yield temp_file.name + temp_file.close() + + +@pytest.fixture +def random_string(): + return "".join(random.choice(string.ascii_lowercase) for _ in range(10)) + + +def test_execute_python_file(python_test_file: str, random_string: str): + result = sut.execute_python_file(python_test_file) + assert result == f"Hello {random_string}!\n" + + +def test_execute_python_file_invalid(): + assert all( + s in sut.execute_python_file("not_python").lower() + for s in ["error:", "invalid", ".py"] + ) + assert all( + s in sut.execute_python_file("notexist.py").lower() + for s in ["error:", "does not exist"] + ) + + +def test_execute_shell(config_allow_execute, random_string): + result = sut.execute_shell(f"echo 'Hello {random_string}!'") + assert f"Hello {random_string}!" in result diff --git a/tests/integration/test_git_commands.py b/tests/integration/test_git_commands.py new file mode 100644 index 00000000..d942b1b7 --- /dev/null +++ b/tests/integration/test_git_commands.py @@ -0,0 +1,42 @@ +import pytest +from git.exc import GitCommandError +from git.repo.base import Repo + +from autogpt.commands.git_operations import clone_repository + + +@pytest.fixture +def mock_clone_from(mocker): + return mocker.patch.object(Repo, "clone_from") + + +def test_clone_auto_gpt_repository(workspace, mock_clone_from, config): + mock_clone_from.return_value = None + + repo = "github.com/Significant-Gravitas/Auto-GPT.git" + scheme = "https://" + url = scheme + repo + clone_path = str(workspace.get_path("auto-gpt-repo")) + + expected_output = f"Cloned {url} to {clone_path}" + + clone_result = clone_repository(url=url, clone_path=clone_path) + + assert clone_result == expected_output + mock_clone_from.assert_called_once_with( + url=f"{scheme}{config.github_username}:{config.github_api_key}@{repo}", + to_path=clone_path, + ) + + +def test_clone_repository_error(workspace, mock_clone_from): + url = "https://github.com/this-repository/does-not-exist.git" + clone_path = str(workspace.get_path("does-not-exist")) + + mock_clone_from.side_effect = GitCommandError( + "clone", "fatal: repository not found", "" + ) + + result = clone_repository(url=url, clone_path=clone_path) + + assert "Error: " in result diff --git a/tests/integration/test_llm_utils.py b/tests/integration/test_llm_utils.py new file mode 100644 index 00000000..553d3699 --- /dev/null +++ b/tests/integration/test_llm_utils.py @@ -0,0 +1,62 @@ +import string +from unittest.mock import MagicMock + +import pytest +from numpy.random import RandomState +from pytest_mock import MockerFixture + +from autogpt.config import Config +from autogpt.llm import llm_utils +from autogpt.llm.api_manager import ApiManager +from autogpt.llm.modelsinfo import COSTS +from tests.utils import requires_api_key + + +@pytest.fixture(scope="session") +def random_large_string(): + """Big string used to overwhelm token limits.""" + seed = 42 + n_characters = 30_000 + random = RandomState(seed) + return "".join(random.choice(list(string.ascii_lowercase), size=n_characters)) + + +@pytest.fixture() +def api_manager(mocker: MockerFixture): + api_manager = ApiManager() + mocker.patch.multiple( + api_manager, + total_prompt_tokens=0, + total_completion_tokens=0, + total_cost=0, + ) + yield api_manager + + +@pytest.fixture() +def spy_create_embedding(mocker: MockerFixture): + return mocker.spy(llm_utils, "create_embedding") + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_get_ada_embedding( + config: Config, api_manager: ApiManager, spy_create_embedding: MagicMock +): + token_cost = COSTS[config.embedding_model]["prompt"] + llm_utils.get_ada_embedding("test") + + spy_create_embedding.assert_called_once_with("test", model=config.embedding_model) + + assert (prompt_tokens := api_manager.get_total_prompt_tokens()) == 1 + assert api_manager.get_total_completion_tokens() == 0 + assert api_manager.get_total_cost() == (prompt_tokens * token_cost) / 1000 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_get_ada_embedding_large_context(random_large_string): + # This test should be able to mock the openai call after we have a fix. We don't need + # to hit the API to test the logic of the function (so not using vcr). This is a quick + # regression test to document the issue. + llm_utils.get_ada_embedding(random_large_string) diff --git a/tests/integration/test_local_cache.py b/tests/integration/test_local_cache.py new file mode 100644 index 00000000..5200e026 --- /dev/null +++ b/tests/integration/test_local_cache.py @@ -0,0 +1,110 @@ +# sourcery skip: snake-case-functions +"""Tests for LocalCache class""" +import unittest + +import orjson +import pytest + +from autogpt.memory.local import EMBED_DIM, SAVE_OPTIONS +from autogpt.memory.local import LocalCache as LocalCache_ +from tests.utils import requires_api_key + + +@pytest.fixture +def LocalCache(): + # Hack, real gross. Singletons are not good times. + if LocalCache_ in LocalCache_._instances: + del LocalCache_._instances[LocalCache_] + return LocalCache_ + + +@pytest.fixture +def mock_embed_with_ada(mocker): + mocker.patch( + "autogpt.memory.local.get_ada_embedding", + return_value=[0.1] * EMBED_DIM, + ) + + +def test_init_without_backing_file(LocalCache, config, workspace): + cache_file = workspace.root / f"{config.memory_index}.json" + + assert not cache_file.exists() + LocalCache(config) + assert cache_file.exists() + assert cache_file.read_text() == "{}" + + +def test_init_with_backing_empty_file(LocalCache, config, workspace): + cache_file = workspace.root / f"{config.memory_index}.json" + cache_file.touch() + + assert cache_file.exists() + LocalCache(config) + assert cache_file.exists() + assert cache_file.read_text() == "{}" + + +def test_init_with_backing_file(LocalCache, config, workspace): + cache_file = workspace.root / f"{config.memory_index}.json" + cache_file.touch() + + raw_data = {"texts": ["test"]} + data = orjson.dumps(raw_data, option=SAVE_OPTIONS) + with cache_file.open("wb") as f: + f.write(data) + + assert cache_file.exists() + LocalCache(config) + assert cache_file.exists() + assert cache_file.read_text() == "{}" + + +def test_add(LocalCache, config, mock_embed_with_ada): + cache = LocalCache(config) + cache.add("test") + assert cache.data.texts == ["test"] + assert cache.data.embeddings.shape == (1, EMBED_DIM) + + +def test_clear(LocalCache, config, mock_embed_with_ada): + cache = LocalCache(config) + assert cache.data.texts == [] + assert cache.data.embeddings.shape == (0, EMBED_DIM) + + cache.add("test") + assert cache.data.texts == ["test"] + assert cache.data.embeddings.shape == (1, EMBED_DIM) + + cache.clear() + assert cache.data.texts == [] + assert cache.data.embeddings.shape == (0, EMBED_DIM) + + +def test_get(LocalCache, config, mock_embed_with_ada): + cache = LocalCache(config) + assert cache.get("test") == [] + + cache.add("test") + assert cache.get("test") == ["test"] + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_get_relevant(LocalCache, config) -> None: + cache = LocalCache(config) + text1 = "Sample text 1" + text2 = "Sample text 2" + cache.add(text1) + cache.add(text2) + + result = cache.get_relevant(text1, 1) + assert result == [text1] + + +def test_get_stats(LocalCache, config, mock_embed_with_ada) -> None: + cache = LocalCache(config) + text = "Sample text" + cache.add(text) + stats = cache.get_stats() + assert stats == (1, cache.data.embeddings.shape) diff --git a/tests/integration/test_memory_management.py b/tests/integration/test_memory_management.py new file mode 100644 index 00000000..c9ab9fc9 --- /dev/null +++ b/tests/integration/test_memory_management.py @@ -0,0 +1,64 @@ +import json + +import pytest + +from autogpt.config import Config +from autogpt.memory import get_memory +from autogpt.memory_management.store_memory import ( + save_memory_trimmed_from_context_window, +) +from tests.utils import requires_api_key + + +@pytest.fixture +def message_history_fixture(): + assistant_reply = { + "thoughts": { + "text": "thoughts", + "reasoning": "reasoning", + "plan": "plan", + "criticism": "criticism", + "speak": "speak", + }, + "command": {"name": "google", "args": {"query": "google_query"}}, + } + return [ + {"content": json.dumps(assistant_reply, indent=4)}, + {"content": "Command Result: Important Information."}, + ] + + +@pytest.fixture +def expected_permanent_memory() -> str: + return """Assistant Reply: { + "thoughts": { + "text": "thoughts", + "reasoning": "reasoning", + "plan": "plan", + "criticism": "criticism", + "speak": "speak" + }, + "command": { + "name": "google", + "args": { + "query": "google_query" + } + } +} +Result: None +Human Feedback:Command Result: Important Information.""" + + +@requires_api_key("OPENAI_API_KEY") +@pytest.mark.vcr +def test_save_memory_trimmed_from_context_window( + message_history_fixture, expected_permanent_memory, config: Config +): + next_message_to_add_index = len(message_history_fixture) - 1 + memory = get_memory(config, init=True) + save_memory_trimmed_from_context_window( + message_history_fixture, next_message_to_add_index, memory + ) + + memory_found = memory.get_relevant("Important Information", 5) + assert memory_found[0] == expected_permanent_memory diff --git a/tests/integration/test_setup.py b/tests/integration/test_setup.py new file mode 100644 index 00000000..b649bb14 --- /dev/null +++ b/tests/integration/test_setup.py @@ -0,0 +1,78 @@ +from unittest.mock import patch + +import pytest + +from autogpt.config.ai_config import AIConfig +from autogpt.setup import ( + generate_aiconfig_automatic, + generate_aiconfig_manual, + prompt_user, +) +from tests.utils import requires_api_key + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_generate_aiconfig_automatic_default(): + user_inputs = [""] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name is not None + assert ai_config.ai_role is not None + assert 1 <= len(ai_config.ai_goals) <= 5 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_generate_aiconfig_automatic_typical(): + user_prompt = "Help me create a rock opera about cybernetic giraffes" + ai_config = generate_aiconfig_automatic(user_prompt) + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name is not None + assert ai_config.ai_role is not None + assert 1 <= len(ai_config.ai_goals) <= 5 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_generate_aiconfig_automatic_fallback(): + user_inputs = [ + "T&GF£OIBECC()!*", + "Chef-GPT", + "an AI designed to browse bake a cake.", + "Purchase ingredients", + "Bake a cake", + "", + "", + ] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name == "Chef-GPT" + assert ai_config.ai_role == "an AI designed to browse bake a cake." + assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"] + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_prompt_user_manual_mode(): + user_inputs = [ + "--manual", + "Chef-GPT", + "an AI designed to browse bake a cake.", + "Purchase ingredients", + "Bake a cake", + "", + "", + ] + with patch("builtins.input", side_effect=user_inputs): + ai_config = prompt_user() + + assert isinstance(ai_config, AIConfig) + assert ai_config.ai_name == "Chef-GPT" + assert ai_config.ai_role == "an AI designed to browse bake a cake." + assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"] diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py index 015eab05..2cea871c 100644 --- a/tests/integration/weaviate_memory_tests.py +++ b/tests/integration/weaviate_memory_tests.py @@ -1,14 +1,11 @@ -import os -import sys import unittest -from unittest import mock from uuid import uuid4 from weaviate import Client from weaviate.util import get_valid_uuid from autogpt.config import Config -from autogpt.memory.base import get_ada_embedding +from autogpt.llm import get_ada_embedding from autogpt.memory.weaviate import WeaviateMemory @@ -19,6 +16,7 @@ class TestWeaviateMemory(unittest.TestCase): @classmethod def setUpClass(cls): + """Set up the test environment for the WeaviateMemory tests.""" # only create the connection to weaviate once cls.cfg = Config() @@ -50,6 +48,7 @@ class TestWeaviateMemory(unittest.TestCase): """ def setUp(self): + """Set up the test environment for the WeaviateMemory tests.""" try: self.client.schema.delete_class(self.index) except: @@ -58,6 +57,7 @@ class TestWeaviateMemory(unittest.TestCase): self.memory = WeaviateMemory(self.cfg) def test_add(self): + """Test adding a text to the cache""" doc = "You are a Titan name Thanos and you are looking for the Infinity Stones" self.memory.add(doc) result = self.client.query.get(self.index, ["raw_text"]).do() @@ -67,8 +67,9 @@ class TestWeaviateMemory(unittest.TestCase): self.assertEqual(actual[0]["raw_text"], doc) def test_get(self): + """Test getting a text from the cache""" doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos" - + # add the document to the cache with self.client.batch as batch: batch.add_data_object( uuid=get_valid_uuid(uuid4()), @@ -85,6 +86,7 @@ class TestWeaviateMemory(unittest.TestCase): self.assertEqual(actual[0], doc) def test_get_stats(self): + """Test getting the stats of the cache""" docs = [ "You are now about to count the number of docs in this index", "And then you about to find out if you can count correctly", @@ -99,6 +101,7 @@ class TestWeaviateMemory(unittest.TestCase): self.assertEqual(stats["count"], 2) def test_clear(self): + """Test clearing the cache""" docs = [ "Shame this is the last test for this class", "Testing is fun when someone else is doing it", diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py deleted file mode 100644 index bb108626..00000000 --- a/tests/local_cache_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# sourcery skip: snake-case-functions -"""Tests for LocalCache class""" -import os -import sys -import unittest - -import pytest - -from autogpt.memory.local import LocalCache - - -def mock_config() -> dict: - """Mock the Config class""" - return type( - "MockConfig", - (object,), - { - "debug_mode": False, - "continuous_mode": False, - "speak_mode": False, - "memory_index": "auto-gpt", - }, - ) - - -@pytest.mark.integration_test -class TestLocalCache(unittest.TestCase): - """Tests for LocalCache class""" - - def setUp(self) -> None: - """Set up the test environment""" - self.cfg = mock_config() - self.cache = LocalCache(self.cfg) - - def test_add(self) -> None: - """Test adding a text to the cache""" - text = "Sample text" - self.cache.add(text) - self.assertIn(text, self.cache.data.texts) - - def test_clear(self) -> None: - """Test clearing the cache""" - self.cache.clear() - self.assertEqual(self.cache.data.texts, []) - - def test_get(self) -> None: - """Test getting a text from the cache""" - text = "Sample text" - self.cache.add(text) - result = self.cache.get(text) - self.assertEqual(result, [text]) - - def test_get_relevant(self) -> None: - """Test getting relevant texts from the cache""" - text1 = "Sample text 1" - text2 = "Sample text 2" - self.cache.add(text1) - self.cache.add(text2) - result = self.cache.get_relevant(text1, 1) - self.assertEqual(result, [text1]) - - def test_get_stats(self) -> None: - """Test getting the cache stats""" - text = "Sample text" - self.cache.add(text) - stats = self.cache.get_stats() - self.assertEqual(stats, (4, self.cache.data.embeddings.shape)) diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py index 84fd6e6d..46010f7e 100644 --- a/tests/milvus_memory_test.py +++ b/tests/milvus_memory_test.py @@ -8,7 +8,8 @@ try: from autogpt.memory.milvus import MilvusMemory def mock_config() -> dict: - """Mock the Config class""" + """Mock the config object for testing purposes.""" + # Return a mock config object with the required attributes return type( "MockConfig", (object,), @@ -68,5 +69,5 @@ try: stats = self.memory.get_stats() self.assertEqual(15, len(stats)) -except: - print("Milvus not installed, skipping tests") +except ImportError as err: + print(f"Skipping tests for MilvusMemory: {err}") diff --git a/tests/mocks/__init__.py b/tests/mocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/mocks/mock_commands.py b/tests/mocks/mock_commands.py new file mode 100644 index 00000000..d5bf5df0 --- /dev/null +++ b/tests/mocks/mock_commands.py @@ -0,0 +1,7 @@ +from autogpt.commands.command import command + + +@command("function_based", "Function-based test command") +def function_based(arg1: int, arg2: str) -> str: + """A function-based test command that returns a string with the two arguments separated by a dash.""" + return f"{arg1} - {arg2}" diff --git a/tests/smoke_test.py b/tests/smoke_test.py deleted file mode 100644 index 1b9d643f..00000000 --- a/tests/smoke_test.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Smoke test for the autogpt package.""" -import os -import subprocess -import sys - -import pytest - -from autogpt.commands.file_operations import delete_file, read_file - - -@pytest.mark.integration_test -def test_write_file() -> None: - """ - Test case to check if the write_file command can successfully write 'Hello World' to a file - named 'hello_world.txt'. - - Read the current ai_settings.yaml file and store its content. - """ - env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"} - ai_settings = None - if os.path.exists("ai_settings.yaml"): - with open("ai_settings.yaml", "r") as f: - ai_settings = f.read() - os.remove("ai_settings.yaml") - - try: - if os.path.exists("hello_world.txt"): - # Clean up any existing 'hello_world.txt' file before testing. - delete_file("hello_world.txt") - # Prepare input data for the test. - input_data = """write_file-GPT -an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task. -Use the write_file command to write 'Hello World' into a file named "hello_world.txt". -Use the task_complete command to complete the task. -Do not use any other commands. - -y -5 -EOF""" - command = f"{sys.executable} -m autogpt" - - # Execute the script with the input data. - process = subprocess.Popen( - command, - stdin=subprocess.PIPE, - shell=True, - env={**os.environ, **env_vars}, - ) - process.communicate(input_data.encode()) - - # Read the content of the 'hello_world.txt' file created during the test. - content = read_file("hello_world.txt") - finally: - if ai_settings: - # Restore the original ai_settings.yaml file. - with open("ai_settings.yaml", "w") as f: - f.write(ai_settings) - - # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'. - assert content == "Hello World", f"Expected 'Hello World', got {content}" diff --git a/tests/test_agent.py b/tests/test_agent.py new file mode 100644 index 00000000..e8596731 --- /dev/null +++ b/tests/test_agent.py @@ -0,0 +1,47 @@ +from unittest.mock import MagicMock + +import pytest + +from autogpt.agent import Agent +from autogpt.config import Config + + +@pytest.fixture +def agent(): + ai_name = "Test AI" + memory = MagicMock() + full_message_history = [] + next_action_count = 0 + command_registry = MagicMock() + config = Config() + system_prompt = "System prompt" + triggering_prompt = "Triggering prompt" + workspace_directory = "workspace_directory" + + agent = Agent( + ai_name, + memory, + full_message_history, + next_action_count, + command_registry, + config, + system_prompt, + triggering_prompt, + workspace_directory, + ) + return agent + + +def test_agent_initialization(agent): + assert agent.ai_name == "Test AI" + assert agent.memory == agent.memory + assert agent.full_message_history == [] + assert agent.next_action_count == 0 + assert agent.command_registry == agent.command_registry + assert agent.config == agent.config + assert agent.system_prompt == "System prompt" + assert agent.triggering_prompt == "Triggering prompt" + + +# More test methods can be added for specific agent interactions +# For example, mocking chat_with_ai and testing the agent's interaction loop diff --git a/tests/test_agent_manager.py b/tests/test_agent_manager.py new file mode 100644 index 00000000..e7e696ae --- /dev/null +++ b/tests/test_agent_manager.py @@ -0,0 +1,65 @@ +import pytest + +from autogpt.agent.agent_manager import AgentManager +from autogpt.llm import create_chat_completion + + +@pytest.fixture +def agent_manager(): + # Hack, real gross. Singletons are not good times. + if AgentManager in AgentManager._instances: + del AgentManager._instances[AgentManager] + return AgentManager() + + +@pytest.fixture +def task(): + return "translate English to French" + + +@pytest.fixture +def prompt(): + return "Translate the following English text to French: 'Hello, how are you?'" + + +@pytest.fixture +def model(): + return "gpt-3.5-turbo" + + +@pytest.fixture(autouse=True) +def mock_create_chat_completion(mocker): + mock_create_chat_completion = mocker.patch( + "autogpt.agent.agent_manager.create_chat_completion", + wraps=create_chat_completion, + ) + mock_create_chat_completion.return_value = "irrelevant" + return mock_create_chat_completion + + +def test_create_agent(agent_manager, task, prompt, model): + key, agent_reply = agent_manager.create_agent(task, prompt, model) + assert isinstance(key, int) + assert isinstance(agent_reply, str) + assert key in agent_manager.agents + + +def test_message_agent(agent_manager, task, prompt, model): + key, _ = agent_manager.create_agent(task, prompt, model) + user_message = "Please translate 'Good morning' to French." + agent_reply = agent_manager.message_agent(key, user_message) + assert isinstance(agent_reply, str) + + +def test_list_agents(agent_manager, task, prompt, model): + key, _ = agent_manager.create_agent(task, prompt, model) + agents_list = agent_manager.list_agents() + assert isinstance(agents_list, list) + assert (key, task) in agents_list + + +def test_delete_agent(agent_manager, task, prompt, model): + key, _ = agent_manager.create_agent(task, prompt, model) + success = agent_manager.delete_agent(key) + assert success + assert key not in agent_manager.agents diff --git a/tests/test_ai_config.py b/tests/test_ai_config.py new file mode 100644 index 00000000..a9fcdad6 --- /dev/null +++ b/tests/test_ai_config.py @@ -0,0 +1,45 @@ +from autogpt.config.ai_config import AIConfig + +""" +Test cases for the AIConfig class, which handles loads the AI configuration +settings from a YAML file. +""" + + +def test_goals_are_always_lists_of_strings(tmp_path): + """Test if the goals attribute is always a list of strings.""" + + yaml_content = """ +ai_goals: +- Goal 1: Make a sandwich +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- "Goal 4: Wake up" +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + config_file = tmp_path / "ai_settings.yaml" + config_file.write_text(yaml_content) + + ai_config = AIConfig.load(config_file) + + assert len(ai_config.ai_goals) == 4 + assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich" + assert ai_config.ai_goals[1] == "Goal 2, Eat the sandwich" + assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep" + assert ai_config.ai_goals[3] == "Goal 4: Wake up" + + config_file.write_text("") + ai_config.save(config_file) + + yaml_content2 = """ai_goals: +- 'Goal 1: Make a sandwich' +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- 'Goal 4: Wake up' +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + assert config_file.read_text() == yaml_content2 diff --git a/tests/test_api_manager.py b/tests/test_api_manager.py new file mode 100644 index 00000000..ba64a72f --- /dev/null +++ b/tests/test_api_manager.py @@ -0,0 +1,117 @@ +from unittest.mock import MagicMock, patch + +import pytest + +from autogpt.llm import COSTS, ApiManager + +api_manager = ApiManager() + + +@pytest.fixture(autouse=True) +def reset_api_manager(): + api_manager.reset() + yield + + +@pytest.fixture(autouse=True) +def mock_costs(): + with patch.dict( + COSTS, + { + "gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002}, + "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0}, + }, + clear=True, + ): + yield + + +class TestApiManager: + @staticmethod + def test_create_chat_completion_debug_mode(caplog): + """Test if debug mode logs response.""" + api_manager_debug = ApiManager(debug=True) + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020?"}, + ] + model = "gpt-3.5-turbo" + + with patch("openai.ChatCompletion.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 20 + mock_create.return_value = mock_response + + api_manager_debug.create_chat_completion(messages, model=model) + + assert "Response" in caplog.text + + @staticmethod + def test_create_chat_completion_empty_messages(): + """Test if empty messages result in zero tokens and cost.""" + messages = [] + model = "gpt-3.5-turbo" + + with patch("openai.ChatCompletion.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 0 + mock_response.usage.completion_tokens = 0 + mock_create.return_value = mock_response + + api_manager.create_chat_completion(messages, model=model) + + assert api_manager.get_total_prompt_tokens() == 0 + assert api_manager.get_total_completion_tokens() == 0 + assert api_manager.get_total_cost() == 0 + + @staticmethod + def test_create_chat_completion_valid_inputs(): + """Test if valid inputs result in correct tokens and cost.""" + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020?"}, + ] + model = "gpt-3.5-turbo" + + with patch("openai.ChatCompletion.create") as mock_create: + mock_response = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 20 + mock_create.return_value = mock_response + + api_manager.create_chat_completion(messages, model=model) + + assert api_manager.get_total_prompt_tokens() == 10 + assert api_manager.get_total_completion_tokens() == 20 + assert api_manager.get_total_cost() == (10 * 0.002 + 20 * 0.002) / 1000 + + def test_getter_methods(self): + """Test the getter methods for total tokens, cost, and budget.""" + api_manager.update_cost(60, 120, "gpt-3.5-turbo") + api_manager.set_total_budget(10.0) + assert api_manager.get_total_prompt_tokens() == 60 + assert api_manager.get_total_completion_tokens() == 120 + assert api_manager.get_total_cost() == (60 * 0.002 + 120 * 0.002) / 1000 + assert api_manager.get_total_budget() == 10.0 + + @staticmethod + def test_set_total_budget(): + """Test if setting the total budget works correctly.""" + total_budget = 10.0 + api_manager.set_total_budget(total_budget) + + assert api_manager.get_total_budget() == total_budget + + @staticmethod + def test_update_cost(): + """Test if updating the cost works correctly.""" + prompt_tokens = 50 + completion_tokens = 100 + model = "gpt-3.5-turbo" + + api_manager.update_cost(prompt_tokens, completion_tokens, model) + + assert api_manager.get_total_prompt_tokens() == 50 + assert api_manager.get_total_completion_tokens() == 100 + assert api_manager.get_total_cost() == (50 * 0.002 + 100 * 0.002) / 1000 diff --git a/tests/test_commands.py b/tests/test_commands.py new file mode 100644 index 00000000..0961d0af --- /dev/null +++ b/tests/test_commands.py @@ -0,0 +1,205 @@ +import os +import shutil +import sys +from pathlib import Path + +import pytest + +from autogpt.commands.command import Command, CommandRegistry + + +class TestCommand: + """Test cases for the Command class.""" + + @staticmethod + def example_command_method(arg1: int, arg2: str) -> str: + """Example function for testing the Command class.""" + # This function is static because it is not used by any other test cases. + return f"{arg1} - {arg2}" + + def test_command_creation(self): + """Test that a Command object can be created with the correct attributes.""" + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + + assert cmd.name == "example" + assert cmd.description == "Example command" + assert cmd.method == self.example_command_method + assert cmd.signature == "(arg1: int, arg2: str) -> str" + + def test_command_call(self): + """Test that Command(*args) calls and returns the result of method(*args).""" + # Create a Command object with the example_command_method. + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + result = cmd(arg1=1, arg2="test") + assert result == "1 - test" + + def test_command_call_with_invalid_arguments(self): + """Test that calling a Command object with invalid arguments raises a TypeError.""" + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + with pytest.raises(TypeError): + cmd(arg1="invalid", does_not_exist="test") + + def test_command_default_signature(self): + """Test that the default signature is generated correctly.""" + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + assert cmd.signature == "(arg1: int, arg2: str) -> str" + + def test_command_custom_signature(self): + custom_signature = "custom_arg1: int, custom_arg2: str" + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + signature=custom_signature, + ) + + assert cmd.signature == custom_signature + + +class TestCommandRegistry: + @staticmethod + def example_command_method(arg1: int, arg2: str) -> str: + return f"{arg1} - {arg2}" + + def test_register_command(self): + """Test that a command can be registered to the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + + registry.register(cmd) + + assert cmd.name in registry.commands + assert registry.commands[cmd.name] == cmd + + def test_unregister_command(self): + """Test that a command can be unregistered from the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + + registry.register(cmd) + registry.unregister(cmd.name) + + assert cmd.name not in registry.commands + + def test_get_command(self): + """Test that a command can be retrieved from the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + + registry.register(cmd) + retrieved_cmd = registry.get_command(cmd.name) + + assert retrieved_cmd == cmd + + def test_get_nonexistent_command(self): + """Test that attempting to get a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.get_command("nonexistent_command") + + def test_call_command(self): + """Test that a command can be called through the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + + registry.register(cmd) + result = registry.call("example", arg1=1, arg2="test") + + assert result == "1 - test" + + def test_call_nonexistent_command(self): + """Test that attempting to call a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.call("nonexistent_command", arg1=1, arg2="test") + + def test_get_command_prompt(self): + """Test that the command prompt is correctly formatted.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=self.example_command_method, + ) + + registry.register(cmd) + command_prompt = registry.command_prompt() + + assert f"(arg1: int, arg2: str)" in command_prompt + + def test_import_mock_commands_module(self): + """Test that the registry can import a module with mock command plugins.""" + registry = CommandRegistry() + mock_commands_module = "tests.mocks.mock_commands" + + registry.import_commands(mock_commands_module) + + assert "function_based" in registry.commands + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description + == "Function-based test command" + ) + + def test_import_temp_command_file_module(self, tmp_path): + """ + Test that the registry can import a command plugins module from a temp file. + Args: + tmp_path (pathlib.Path): Path to a temporary directory. + """ + registry = CommandRegistry() + + # Create a temp command file + src = Path(os.getcwd()) / "tests/mocks/mock_commands.py" + temp_commands_file = tmp_path / "mock_commands.py" + shutil.copyfile(src, temp_commands_file) + + # Add the temp directory to sys.path to make the module importable + sys.path.append(str(tmp_path)) + + temp_commands_module = "mock_commands" + registry.import_commands(temp_commands_module) + + # Remove the temp directory from sys.path + sys.path.remove(str(tmp_path)) + + assert "function_based" in registry.commands + assert registry.commands["function_based"].name == "function_based" + assert ( + registry.commands["function_based"].description + == "Function-based test command" + ) diff --git a/tests/test_config.py b/tests/test_config.py index b472a24c..e1fc6745 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,84 +1,119 @@ -from unittest import TestCase +""" +Test cases for the Config class, which handles the configuration settings +for the AI and ensures it behaves as a singleton. +""" + +import pytest from autogpt.config import Config -class TestConfig(TestCase): +def test_initial_values(config): """ - Test cases for the Config class, which handles the configuration settings - for the AI and ensures it behaves as a singleton. + Test if the initial values of the Config class attributes are set correctly. """ + assert config.debug_mode == False + assert config.continuous_mode == False + assert config.speak_mode == False + assert config.fast_llm_model == "gpt-3.5-turbo" + assert config.smart_llm_model == "gpt-4" + assert config.fast_token_limit == 4000 + assert config.smart_token_limit == 8000 - def setUp(self): - """ - Set up the test environment by creating an instance of the Config class. - """ - self.config = Config() - def test_singleton(self): - """ - Test if the Config class behaves as a singleton by ensuring that two instances are the same. - """ - config2 = Config() - self.assertIs(self.config, config2) +def test_set_continuous_mode(config): + """ + Test if the set_continuous_mode() method updates the continuous_mode attribute. + """ + # Store continuous mode to reset it after the test + continuous_mode = config.continuous_mode - def test_initial_values(self): - """ - Test if the initial values of the Config class attributes are set correctly. - """ - self.assertFalse(self.config.debug_mode) - self.assertFalse(self.config.continuous_mode) - self.assertFalse(self.config.speak_mode) - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo") - self.assertEqual(self.config.smart_llm_model, "gpt-4") - self.assertEqual(self.config.fast_token_limit, 4000) - self.assertEqual(self.config.smart_token_limit, 8000) + config.set_continuous_mode(True) + assert config.continuous_mode == True - def test_set_continuous_mode(self): - """ - Test if the set_continuous_mode() method updates the continuous_mode attribute. - """ - self.config.set_continuous_mode(True) - self.assertTrue(self.config.continuous_mode) + # Reset continuous mode + config.set_continuous_mode(continuous_mode) - def test_set_speak_mode(self): - """ - Test if the set_speak_mode() method updates the speak_mode attribute. - """ - self.config.set_speak_mode(True) - self.assertTrue(self.config.speak_mode) - def test_set_fast_llm_model(self): - """ - Test if the set_fast_llm_model() method updates the fast_llm_model attribute. - """ - self.config.set_fast_llm_model("gpt-3.5-turbo-test") - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test") +def test_set_speak_mode(config): + """ + Test if the set_speak_mode() method updates the speak_mode attribute. + """ + # Store speak mode to reset it after the test + speak_mode = config.speak_mode - def test_set_smart_llm_model(self): - """ - Test if the set_smart_llm_model() method updates the smart_llm_model attribute. - """ - self.config.set_smart_llm_model("gpt-4-test") - self.assertEqual(self.config.smart_llm_model, "gpt-4-test") + config.set_speak_mode(True) + assert config.speak_mode == True - def test_set_fast_token_limit(self): - """ - Test if the set_fast_token_limit() method updates the fast_token_limit attribute. - """ - self.config.set_fast_token_limit(5000) - self.assertEqual(self.config.fast_token_limit, 5000) + # Reset speak mode + config.set_speak_mode(speak_mode) - def test_set_smart_token_limit(self): - """ - Test if the set_smart_token_limit() method updates the smart_token_limit attribute. - """ - self.config.set_smart_token_limit(9000) - self.assertEqual(self.config.smart_token_limit, 9000) - def test_set_debug_mode(self): - """ - Test if the set_debug_mode() method updates the debug_mode attribute. - """ - self.config.set_debug_mode(True) - self.assertTrue(self.config.debug_mode) +def test_set_fast_llm_model(config): + """ + Test if the set_fast_llm_model() method updates the fast_llm_model attribute. + """ + # Store model name to reset it after the test + fast_llm_model = config.fast_llm_model + + config.set_fast_llm_model("gpt-3.5-turbo-test") + assert config.fast_llm_model == "gpt-3.5-turbo-test" + + # Reset model name + config.set_fast_llm_model(fast_llm_model) + + +def test_set_smart_llm_model(config): + """ + Test if the set_smart_llm_model() method updates the smart_llm_model attribute. + """ + # Store model name to reset it after the test + smart_llm_model = config.smart_llm_model + + config.set_smart_llm_model("gpt-4-test") + assert config.smart_llm_model == "gpt-4-test" + + # Reset model name + config.set_smart_llm_model(smart_llm_model) + + +def test_set_fast_token_limit(config): + """ + Test if the set_fast_token_limit() method updates the fast_token_limit attribute. + """ + # Store token limit to reset it after the test + fast_token_limit = config.fast_token_limit + + config.set_fast_token_limit(5000) + assert config.fast_token_limit == 5000 + + # Reset token limit + config.set_fast_token_limit(fast_token_limit) + + +def test_set_smart_token_limit(config): + """ + Test if the set_smart_token_limit() method updates the smart_token_limit attribute. + """ + # Store token limit to reset it after the test + smart_token_limit = config.smart_token_limit + + config.set_smart_token_limit(9000) + assert config.smart_token_limit == 9000 + + # Reset token limit + config.set_smart_token_limit(smart_token_limit) + + +def test_set_debug_mode(config): + """ + Test if the set_debug_mode() method updates the debug_mode attribute. + """ + # Store debug mode to reset it after the test + debug_mode = config.debug_mode + + config.set_debug_mode(True) + assert config.debug_mode == True + + # Reset debug mode + config.set_debug_mode(debug_mode) diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index 19c57e42..546cc758 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -1,102 +1,105 @@ +import functools import hashlib -import os -import unittest +from pathlib import Path +import pytest from PIL import Image from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui -from autogpt.config import Config -from autogpt.workspace import path_in_workspace +from tests.utils import requires_api_key + + +@pytest.fixture(params=[256, 512, 1024]) +def image_size(request): + """Parametrize image size.""" + return request.param + + +@pytest.mark.xfail( + reason="The image is too big to be put in a cassette for a CI pipeline. We're looking into a solution." +) +@requires_api_key("OPENAI_API_KEY") +def test_dalle(config, workspace, image_size): + """Test DALL-E image generation.""" + generate_and_validate( + config, + workspace, + image_provider="dalle", + image_size=image_size, + ) + + +@pytest.mark.xfail( + reason="The image is too big to be put in a cassette for a CI pipeline. We're looking into a solution." +) +@requires_api_key("HUGGINGFACE_API_TOKEN") +@pytest.mark.parametrize( + "image_model", + ["CompVis/stable-diffusion-v1-4", "stabilityai/stable-diffusion-2-1"], +) +def test_huggingface(config, workspace, image_size, image_model): + """Test HuggingFace image generation.""" + generate_and_validate( + config, + workspace, + image_provider="huggingface", + image_size=image_size, + hugging_face_image_model=image_model, + ) + + +@pytest.mark.xfail(reason="SD WebUI call does not work.") +def test_sd_webui(config, workspace, image_size): + """Test SD WebUI image generation.""" + generate_and_validate( + config, + workspace, + image_provider="sd_webui", + image_size=image_size, + ) + + +@pytest.mark.xfail(reason="SD WebUI call does not work.") +def test_sd_webui_negative_prompt(config, workspace, image_size): + gen_image = functools.partial( + generate_image_with_sd_webui, + prompt="astronaut riding a horse", + size=image_size, + extra={"seed": 123}, + ) + + # Generate an image with a negative prompt + image_path = lst(gen_image(negative_prompt="horse", filename="negative.jpg")) + with Image.open(image_path) as img: + neg_image_hash = hashlib.md5(img.tobytes()).hexdigest() + + # Generate an image without a negative prompt + image_path = lst(gen_image(filename="positive.jpg")) + with Image.open(image_path) as img: + image_hash = hashlib.md5(img.tobytes()).hexdigest() + + assert image_hash != neg_image_hash def lst(txt): - return txt.split(":")[1].strip() + """Extract the file path from the output of `generate_image()`""" + return Path(txt.split(":")[1].strip()) -@unittest.skipIf(os.getenv("CI"), "Skipping image generation tests") -class TestImageGen(unittest.TestCase): - def setUp(self): - self.config = Config() +def generate_and_validate( + config, + workspace, + image_size, + image_provider, + hugging_face_image_model=None, + **kwargs, +): + """Generate an image and validate the output.""" + config.image_provider = image_provider + config.huggingface_image_model = hugging_face_image_model + prompt = "astronaut riding a horse" - def test_dalle(self): - self.config.image_provider = "dalle" - - # Test using size 256 - result = lst(generate_image("astronaut riding a horse", 256)) - image_path = path_in_workspace(result) - self.assertTrue(image_path.exists()) - with Image.open(image_path) as img: - self.assertEqual(img.size, (256, 256)) - image_path.unlink() - - # Test using size 512 - result = lst(generate_image("astronaut riding a horse", 512)) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - self.assertEqual(img.size, (512, 512)) - image_path.unlink() - - def test_huggingface(self): - self.config.image_provider = "huggingface" - - # Test usin SD 1.4 model and size 512 - self.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" - result = lst(generate_image("astronaut riding a horse", 512)) - image_path = path_in_workspace(result) - self.assertTrue(image_path.exists()) - with Image.open(image_path) as img: - self.assertEqual(img.size, (512, 512)) - image_path.unlink() - - # Test using SD 2.1 768 model and size 768 - self.config.huggingface_image_model = "stabilityai/stable-diffusion-2-1" - result = lst(generate_image("astronaut riding a horse", 768)) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - self.assertEqual(img.size, (768, 768)) - image_path.unlink() - - def test_sd_webui(self): - self.config.image_provider = "sd_webui" - return - - # Test using size 128 - result = lst(generate_image_with_sd_webui("astronaut riding a horse", 128)) - image_path = path_in_workspace(result) - self.assertTrue(image_path.exists()) - with Image.open(image_path) as img: - self.assertEqual(img.size, (128, 128)) - image_path.unlink() - - # Test using size 64 and negative prompt - result = lst( - generate_image_with_sd_webui( - "astronaut riding a horse", - negative_prompt="horse", - size=64, - extra={"seed": 123}, - ) - ) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - self.assertEqual(img.size, (64, 64)) - neg_image_hash = hashlib.md5(img.tobytes()).hexdigest() - image_path.unlink() - - # Same test as above but without the negative prompt - result = lst( - generate_image_with_sd_webui( - "astronaut riding a horse", image_size=64, size=1, extra={"seed": 123} - ) - ) - image_path = path_in_workspace(result) - with Image.open(image_path) as img: - self.assertEqual(img.size, (64, 64)) - image_hash = hashlib.md5(img.tobytes()).hexdigest() - image_path.unlink() - - self.assertNotEqual(image_hash, neg_image_hash) - - -if __name__ == "__main__": - unittest.main() + image_path = lst(generate_image(prompt, image_size, **kwargs)) + assert image_path.exists() + with Image.open(image_path) as img: + assert img.size == (image_size, image_size) diff --git a/tests/test_logs.py b/tests/test_logs.py new file mode 100644 index 00000000..0e8660b6 --- /dev/null +++ b/tests/test_logs.py @@ -0,0 +1,28 @@ +import pytest + +from autogpt.logs import remove_color_codes + + +@pytest.mark.parametrize( + "raw_text, clean_text", + [ + ( + "COMMAND = \x1b[36mbrowse_website\x1b[0m ARGUMENTS = \x1b[36m{'url': 'https://www.google.com', 'question': 'What is the capital of France?'}\x1b[0m", + "COMMAND = browse_website ARGUMENTS = {'url': 'https://www.google.com', 'question': 'What is the capital of France?'}", + ), + ( + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}", + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': 'https://github.com/Significant-Gravitas/Auto-GPT, https://discord.gg/autogpt und https://twitter.com/SigGravitas'}", + ), + ("", ""), + ("hello", "hello"), + ("hello\x1B[31m world", "hello world"), + ("\x1B[36mHello,\x1B[32m World!", "Hello, World!"), + ( + "\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found", + "Error: file not found", + ), + ], +) +def test_remove_color_codes(raw_text, clean_text): + assert remove_color_codes(raw_text) == clean_text diff --git a/tests/test_prompt_generator.py b/tests/test_prompt_generator.py index 6a0bfd6c..1fa1754d 100644 --- a/tests/test_prompt_generator.py +++ b/tests/test_prompt_generator.py @@ -1,6 +1,6 @@ from unittest import TestCase -from autogpt.promptgenerator import PromptGenerator +from autogpt.prompts.generator import PromptGenerator class TestPromptGenerator(TestCase): @@ -38,6 +38,7 @@ class TestPromptGenerator(TestCase): "label": command_label, "name": command_name, "args": args, + "function": None, } self.assertIn(command, self.generator.commands) diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py index 6d7ae016..966a0aff 100644 --- a/tests/test_token_counter.py +++ b/tests/test_token_counter.py @@ -1,63 +1,72 @@ -import unittest +import pytest -import tests.context -from autogpt.token_counter import count_message_tokens, count_string_tokens +from autogpt.llm import count_message_tokens, count_string_tokens -class TestTokenCounter(unittest.TestCase): - def test_count_message_tokens(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages), 17) - - def test_count_message_tokens_with_name(self): - messages = [ - {"role": "user", "content": "Hello", "name": "John"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages), 17) - - def test_count_message_tokens_empty_input(self): - self.assertEqual(count_message_tokens([]), 3) - - def test_count_message_tokens_invalid_model(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - with self.assertRaises(KeyError): - count_message_tokens(messages, model="invalid_model") - - def test_count_message_tokens_gpt_4(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages, model="gpt-4-0314"), 15) - - def test_count_string_tokens(self): - string = "Hello, world!" - self.assertEqual( - count_string_tokens(string, model_name="gpt-3.5-turbo-0301"), 4 - ) - - def test_count_string_tokens_empty_input(self): - self.assertEqual(count_string_tokens("", model_name="gpt-3.5-turbo-0301"), 0) - - def test_count_message_tokens_invalid_model(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - with self.assertRaises(NotImplementedError): - count_message_tokens(messages, model="invalid_model") - - def test_count_string_tokens_gpt_4(self): - string = "Hello, world!" - self.assertEqual(count_string_tokens(string, model_name="gpt-4-0314"), 4) +def test_count_message_tokens(): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + assert count_message_tokens(messages) == 17 -if __name__ == "__main__": - unittest.main() +def test_count_message_tokens_with_name(): + messages = [ + {"role": "user", "content": "Hello", "name": "John"}, + {"role": "assistant", "content": "Hi there!"}, + ] + assert count_message_tokens(messages) == 17 + + +def test_count_message_tokens_empty_input(): + """Empty input should return 3 tokens""" + assert count_message_tokens([]) == 3 + + +def test_count_message_tokens_invalid_model(): + """Invalid model should raise a KeyError""" + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + with pytest.raises(KeyError): + count_message_tokens(messages, model="invalid_model") + + +def test_count_message_tokens_gpt_4(): + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + assert count_message_tokens(messages, model="gpt-4-0314") == 15 + + +def test_count_string_tokens(): + """Test that the string tokens are counted correctly.""" + + string = "Hello, world!" + assert count_string_tokens(string, model_name="gpt-3.5-turbo-0301") == 4 + + +def test_count_string_tokens_empty_input(): + """Test that the string tokens are counted correctly.""" + + assert count_string_tokens("", model_name="gpt-3.5-turbo-0301") == 0 + + +def test_count_message_tokens_invalid_model(): + """Invalid model should raise a NotImplementedError""" + messages = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + with pytest.raises(NotImplementedError): + count_message_tokens(messages, model="invalid_model") + + +def test_count_string_tokens_gpt_4(): + """Test that the string tokens are counted correctly.""" + + string = "Hello, world!" + assert count_string_tokens(string, model_name="gpt-4-0314") == 4 diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..5b4d181c --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,160 @@ +import os +from unittest.mock import Mock, patch + +import pytest +import requests +from colorama import Fore +from git import Repo + +from autogpt.utils import ( + clean_input, + get_bulletin_from_web, + get_current_git_branch, + get_latest_bulletin, + readable_file_size, + validate_yaml_file, +) +from tests.utils import skip_in_ci + + +def test_validate_yaml_file_valid(): + with open("valid_test_file.yaml", "w") as f: + f.write("setting: value") + result, message = validate_yaml_file("valid_test_file.yaml") + os.remove("valid_test_file.yaml") + + assert result == True + assert "Successfully validated" in message + + +def test_validate_yaml_file_not_found(): + result, message = validate_yaml_file("non_existent_file.yaml") + + assert result == False + assert "wasn't found" in message + + +def test_validate_yaml_file_invalid(): + with open("invalid_test_file.yaml", "w") as f: + f.write( + "settings:\n first_setting: value\n second_setting: value\n nested_setting: value\n third_setting: value\nunindented_setting: value" + ) + result, message = validate_yaml_file("invalid_test_file.yaml") + os.remove("invalid_test_file.yaml") + print(result) + print(message) + assert result == False + assert "There was an issue while trying to read" in message + + +def test_readable_file_size(): + size_in_bytes = 1024 * 1024 * 3.5 # 3.5 MB + readable_size = readable_file_size(size_in_bytes) + + assert readable_size == "3.50 MB" + + +@patch("requests.get") +def test_get_bulletin_from_web_success(mock_get): + expected_content = "Test bulletin from web" + + mock_get.return_value.status_code = 200 + mock_get.return_value.text = expected_content + bulletin = get_bulletin_from_web() + + assert expected_content in bulletin + mock_get.assert_called_with( + "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" + ) + + +@patch("requests.get") +def test_get_bulletin_from_web_failure(mock_get): + mock_get.return_value.status_code = 404 + bulletin = get_bulletin_from_web() + + assert bulletin == "" + + +@patch("requests.get") +def test_get_bulletin_from_web_exception(mock_get): + mock_get.side_effect = requests.exceptions.RequestException() + bulletin = get_bulletin_from_web() + + assert bulletin == "" + + +def test_get_latest_bulletin_no_file(): + if os.path.exists("CURRENT_BULLETIN.md"): + os.remove("CURRENT_BULLETIN.md") + + bulletin, is_new = get_latest_bulletin() + assert is_new + + +def test_get_latest_bulletin_with_file(): + expected_content = "Test bulletin" + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write(expected_content) + + with patch("autogpt.utils.get_bulletin_from_web", return_value=""): + bulletin, is_new = get_latest_bulletin() + assert expected_content in bulletin + assert is_new == False + + os.remove("CURRENT_BULLETIN.md") + + +def test_get_latest_bulletin_with_new_bulletin(): + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write("Old bulletin") + + expected_content = "New bulletin from web" + with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content): + bulletin, is_new = get_latest_bulletin() + assert "::NEW BULLETIN::" in bulletin + assert expected_content in bulletin + assert is_new + + os.remove("CURRENT_BULLETIN.md") + + +def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin(): + expected_content = "Current bulletin" + with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write(expected_content) + + with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content): + bulletin, is_new = get_latest_bulletin() + assert expected_content in bulletin + assert is_new == False + + os.remove("CURRENT_BULLETIN.md") + + +@skip_in_ci +def test_get_current_git_branch(): + branch_name = get_current_git_branch() + + # Assuming that the branch name will be non-empty if the function is working correctly. + assert branch_name != "" + + +@patch("autogpt.utils.Repo") +def test_get_current_git_branch_success(mock_repo): + mock_repo.return_value.active_branch.name = "test-branch" + branch_name = get_current_git_branch() + + assert branch_name == "test-branch" + + +@patch("autogpt.utils.Repo") +def test_get_current_git_branch_failure(mock_repo): + mock_repo.side_effect = Exception() + branch_name = get_current_git_branch() + + assert branch_name == "" + + +if __name__ == "__main__": + pytest.main() diff --git a/tests/test_workspace.py b/tests/test_workspace.py new file mode 100644 index 00000000..fbe14d8c --- /dev/null +++ b/tests/test_workspace.py @@ -0,0 +1,99 @@ +import itertools +from pathlib import Path + +import pytest + +from autogpt.workspace import Workspace + +_WORKSPACE_ROOT = Path("home/users/monty/auto_gpt_workspace") + +_ACCESSIBLE_PATHS = [ + Path("."), + Path("test_file.txt"), + Path("test_folder"), + Path("test_folder/test_file.txt"), + Path("test_folder/.."), + Path("test_folder/../test_file.txt"), + Path("test_folder/../test_folder"), + Path("test_folder/../test_folder/test_file.txt"), +] + +_INACCESSIBLE_PATHS = ( + [ + # Takes us out of the workspace + Path(".."), + Path("../test_file.txt"), + Path("../not_auto_gpt_workspace"), + Path("../not_auto_gpt_workspace/test_file.txt"), + Path("test_folder/../.."), + Path("test_folder/../../test_file.txt"), + Path("test_folder/../../not_auto_gpt_workspace"), + Path("test_folder/../../not_auto_gpt_workspace/test_file.txt"), + ] + + [ + # Contains null bytes + Path(template.format(null_byte=null_byte)) + for template, null_byte in itertools.product( + [ + "{null_byte}", + "{null_byte}test_file.txt", + "test_folder/{null_byte}", + "test_folder/{null_byte}test_file.txt", + ], + Workspace.NULL_BYTES, + ) + ] + + [ + # Absolute paths + Path("/"), + Path("/test_file.txt"), + Path("/home"), + ] +) + + +@pytest.fixture() +def workspace_root(tmp_path): + return tmp_path / _WORKSPACE_ROOT + + +@pytest.fixture(params=_ACCESSIBLE_PATHS) +def accessible_path(request): + return request.param + + +@pytest.fixture(params=_INACCESSIBLE_PATHS) +def inaccessible_path(request): + return request.param + + +def test_sanitize_path_accessible(accessible_path, workspace_root): + full_path = Workspace._sanitize_path( + accessible_path, + root=workspace_root, + restrict_to_root=True, + ) + assert full_path.is_absolute() + assert full_path.is_relative_to(workspace_root) + + +def test_sanitize_path_inaccessible(inaccessible_path, workspace_root): + with pytest.raises(ValueError): + Workspace._sanitize_path( + inaccessible_path, + root=workspace_root, + restrict_to_root=True, + ) + + +def test_get_path_accessible(accessible_path, workspace_root): + workspace = Workspace(workspace_root, True) + full_path = workspace.get_path(accessible_path) + assert full_path.is_absolute() + assert full_path.is_relative_to(workspace_root) + + +def test_get_path_inaccessible(inaccessible_path, workspace_root): + workspace = Workspace(workspace_root, True) + with pytest.raises(ValueError): + workspace.get_path(inaccessible_path) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/json_tests.py b/tests/unit/_test_json_parser.py similarity index 90% rename from tests/unit/json_tests.py rename to tests/unit/_test_json_parser.py index 25c38337..4ef46710 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/_test_json_parser.py @@ -5,13 +5,13 @@ from autogpt.json_utils.json_fix_llm import fix_and_parse_json class TestParseJson(unittest.TestCase): def test_valid_json(self): - # Test that a valid JSON string is parsed correctly + """Test that a valid JSON string is parsed correctly.""" json_str = '{"name": "John", "age": 30, "city": "New York"}' obj = fix_and_parse_json(json_str) self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) def test_invalid_json_minor(self): - # Test that an invalid JSON string can be fixed with gpt + """Test that an invalid JSON string can be fixed with gpt.""" json_str = '{"name": "John", "age": 30, "city": "New York",}' self.assertEqual( fix_and_parse_json(json_str, try_to_fix_with_gpt=False), @@ -19,7 +19,7 @@ class TestParseJson(unittest.TestCase): ) def test_invalid_json_major_with_gpt(self): - # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False.""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' self.assertEqual( fix_and_parse_json(json_str, try_to_fix_with_gpt=True), @@ -27,14 +27,15 @@ class TestParseJson(unittest.TestCase): ) def test_invalid_json_major_without_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False.""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' # Assert that this raises an exception: with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False.""" + json_str = """I suggest we start by browsing the repository to find any issues that we can fix. { @@ -72,7 +73,7 @@ class TestParseJson(unittest.TestCase): ) def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False.""" json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. { diff --git a/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip b/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip new file mode 100644 index 00000000..00bc1f4f Binary files /dev/null and b/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip differ diff --git a/tests/unit/models/test_base_open_api_plugin.py b/tests/unit/models/test_base_open_api_plugin.py new file mode 100644 index 00000000..456c74c7 --- /dev/null +++ b/tests/unit/models/test_base_open_api_plugin.py @@ -0,0 +1,87 @@ +from typing import Any, Dict, List, Optional, Tuple + +import pytest + +from autogpt.models.base_open_ai_plugin import ( + BaseOpenAIPlugin, + Message, + PromptGenerator, +) + + +class DummyPlugin(BaseOpenAIPlugin): + """A dummy plugin for testing purposes.""" + + pass + + +@pytest.fixture +def dummy_plugin(): + """A dummy plugin for testing purposes.""" + manifests_specs_clients = { + "manifest": { + "name_for_model": "Dummy", + "schema_version": "1.0", + "description_for_model": "A dummy plugin for testing purposes", + }, + "client": None, + "openapi_spec": None, + } + return DummyPlugin(manifests_specs_clients) + + +def test_dummy_plugin_inheritance(dummy_plugin): + """Test that the DummyPlugin class inherits from the BaseOpenAIPlugin class.""" + assert isinstance(dummy_plugin, BaseOpenAIPlugin) + + +def test_dummy_plugin_name(dummy_plugin): + """Test that the DummyPlugin class has the correct name.""" + assert dummy_plugin._name == "Dummy" + + +def test_dummy_plugin_version(dummy_plugin): + """Test that the DummyPlugin class has the correct version.""" + assert dummy_plugin._version == "1.0" + + +def test_dummy_plugin_description(dummy_plugin): + """Test that the DummyPlugin class has the correct description.""" + assert dummy_plugin._description == "A dummy plugin for testing purposes" + + +def test_dummy_plugin_default_methods(dummy_plugin): + """Test that the DummyPlugin class has the correct default methods.""" + assert not dummy_plugin.can_handle_on_response() + assert not dummy_plugin.can_handle_post_prompt() + assert not dummy_plugin.can_handle_on_planning() + assert not dummy_plugin.can_handle_post_planning() + assert not dummy_plugin.can_handle_pre_instruction() + assert not dummy_plugin.can_handle_on_instruction() + assert not dummy_plugin.can_handle_post_instruction() + assert not dummy_plugin.can_handle_pre_command() + assert not dummy_plugin.can_handle_post_command() + assert not dummy_plugin.can_handle_chat_completion(None, None, None, None) + + assert dummy_plugin.on_response("hello") == "hello" + assert dummy_plugin.post_prompt(None) is None + assert dummy_plugin.on_planning(None, None) is None + assert dummy_plugin.post_planning("world") == "world" + pre_instruction = dummy_plugin.pre_instruction( + [{"role": "system", "content": "Beep, bop, boop"}] + ) + assert isinstance(pre_instruction, list) + assert len(pre_instruction) == 1 + assert pre_instruction[0]["role"] == "system" + assert pre_instruction[0]["content"] == "Beep, bop, boop" + assert dummy_plugin.on_instruction(None) is None + assert dummy_plugin.post_instruction("I'm a robot") == "I'm a robot" + pre_command = dummy_plugin.pre_command("evolve", {"continuously": True}) + assert isinstance(pre_command, tuple) + assert len(pre_command) == 2 + assert pre_command[0] == "evolve" + assert pre_command[1]["continuously"] == True + post_command = dummy_plugin.post_command("evolve", "upgraded successfully!") + assert isinstance(post_command, str) + assert post_command == "upgraded successfully!" + assert dummy_plugin.handle_chat_completion(None, None, None, None) is None diff --git a/tests/unit/test_browse_scrape_links.py b/tests/unit/test_browse_scrape_links.py index 0a3340e7..3b7442eb 100644 --- a/tests/unit/test_browse_scrape_links.py +++ b/tests/unit/test_browse_scrape_links.py @@ -38,8 +38,11 @@ requests and parse HTML content, respectively. class TestScrapeLinks: - # Tests that the function returns a list of formatted hyperlinks when - # provided with a valid url that returns a webpage with hyperlinks. + """ + Tests that the function returns a list of formatted hyperlinks when + provided with a valid url that returns a webpage with hyperlinks. + """ + def test_valid_url_with_hyperlinks(self): url = "https://www.google.com" result = scrape_links(url) @@ -47,8 +50,8 @@ class TestScrapeLinks: assert isinstance(result, list) assert isinstance(result[0], str) - # Tests that the function returns correctly formatted hyperlinks when given a valid url. def test_valid_url(self, mocker): + """Test that the function returns correctly formatted hyperlinks when given a valid url.""" # Mock the requests.get() function to return a response with sample HTML containing hyperlinks mock_response = mocker.Mock() mock_response.status_code = 200 @@ -63,8 +66,8 @@ class TestScrapeLinks: # Assert that the function returns correctly formatted hyperlinks assert result == ["Google (https://www.google.com)"] - # Tests that the function returns "error" when given an invalid url. def test_invalid_url(self, mocker): + """Test that the function returns "error" when given an invalid url.""" # Mock the requests.get() function to return an HTTP error response mock_response = mocker.Mock() mock_response.status_code = 404 @@ -76,8 +79,8 @@ class TestScrapeLinks: # Assert that the function returns "error" assert "Error:" in result - # Tests that the function returns an empty list when the html contains no hyperlinks. def test_no_hyperlinks(self, mocker): + """Test that the function returns an empty list when the html contains no hyperlinks.""" # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks mock_response = mocker.Mock() mock_response.status_code = 200 @@ -90,10 +93,8 @@ class TestScrapeLinks: # Assert that the function returns an empty list assert result == [] - # Tests that scrape_links() correctly extracts and formats hyperlinks from - # a sample HTML containing a few hyperlinks. def test_scrape_links_with_few_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks + """Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks.""" mock_response = mocker.Mock() mock_response.status_code = 200 mock_response.text = """ diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index fea5ebfc..cda45e11 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -1,5 +1,6 @@ # Generated by CodiumAI +import pytest import requests from autogpt.commands.web_requests import scrape_text @@ -9,16 +10,20 @@ Code Analysis Objective: The objective of the "scrape_text" function is to scrape the text content from -a given URL and return it as a string, after removing any unwanted HTML tags and scripts. +a given URL and return it as a string, after removing any unwanted HTML tags and + scripts. Inputs: - url: a string representing the URL of the webpage to be scraped. Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. +1. Send a GET request to the given URL using the requests library and the user agent + header from the config file. 2. Check if the response contains an HTTP error. If it does, return an error message. -3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags. -4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup. +3. Use BeautifulSoup to parse the HTML content of the response and extract all script + and style tags. +4. Get the text content of the remaining HTML using the get_text() method of + BeautifulSoup. 5. Split the text into lines and then into chunks, removing any extra whitespace. 6. Join the chunks into a single string with newline characters between them. 7. Return the cleaned text. @@ -27,40 +32,53 @@ Outputs: - A string representing the cleaned text content of the webpage. Additional aspects: -- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively. -- The function removes script and style tags from the HTML to avoid including unwanted content in the text output. -- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. +- The function uses the requests library and BeautifulSoup to handle the HTTP request + and HTML parsing, respectively. +- The function removes script and style tags from the HTML to avoid including unwanted + content in the text output. +- The function uses a generator expression to split the text into lines and chunks, + which can improve performance for large amounts of text. """ class TestScrapeText: - # Tests that scrape_text() returns the expected text when given a valid URL. def test_scrape_text_with_valid_url(self, mocker): + """Tests that scrape_text() returns the expected text when given a valid URL.""" # Mock the requests.get() method to return a response with expected text expected_text = "This is some sample text" mock_response = mocker.Mock() mock_response.status_code = 200 - mock_response.text = f"

{expected_text}

" + mock_response.text = ( + "

" + f"{expected_text}

" + ) mocker.patch("requests.Session.get", return_value=mock_response) - # Call the function with a valid URL and assert that it returns the expected text + # Call the function with a valid URL and assert that it returns the + # expected text url = "http://www.example.com" assert scrape_text(url) == expected_text - # Tests that the function returns an error message when an invalid or unreachable url is provided. - def test_invalid_url(self, mocker): + def test_invalid_url(self): + """Tests that an error is raised when an invalid url is provided.""" + url = "invalidurl.com" + pytest.raises(ValueError, scrape_text, url) + + def test_unreachable_url(self, mocker): + """Test that scrape_text returns an error message when an invalid or unreachable url is provided.""" # Mock the requests.get() method to raise an exception mocker.patch( "requests.Session.get", side_effect=requests.exceptions.RequestException ) - # Call the function with an invalid URL and assert that it returns an error message - url = "http://www.invalidurl.com" + # Call the function with an invalid URL and assert that it returns an error + # message + url = "http://thiswebsitedoesnotexist.net/" error_message = scrape_text(url) assert "Error:" in error_message - # Tests that the function returns an empty string when the html page contains no text to be scraped. def test_no_text(self, mocker): + """Test that scrape_text returns an empty string when the html page contains no text to be scraped.""" # Mock the requests.get() method to return a response with no text mock_response = mocker.Mock() mock_response.status_code = 200 @@ -71,8 +89,8 @@ class TestScrapeText: url = "http://www.example.com" assert scrape_text(url) == "" - # Tests that the function returns an error message when the response status code is an http error (>=400). def test_http_error(self, mocker): + """Test that scrape_text returns an error message when the response status code is an http error (>=400).""" # Mock the requests.get() method to return a response with a 404 status code mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) @@ -82,8 +100,8 @@ class TestScrapeText: # Check that the function returns an error message assert result == "Error: HTTP 404 error" - # Tests that scrape_text() properly handles HTML tags. def test_scrape_text_with_html_tags(self, mocker): + """Test that scrape_text() properly handles HTML tags.""" # Create a mock response object with HTML containing tags html = "

This is bold text.

" mock_response = mocker.Mock() diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 774f4103..77ff94d7 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -1,86 +1,78 @@ # Generated by CodiumAI import time -import unittest from unittest.mock import patch -from autogpt.chat import create_chat_message, generate_context +from autogpt.llm import create_chat_message, generate_context -class TestChat(unittest.TestCase): - # Tests that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content. - def test_happy_path_role_content(self): - result = create_chat_message("system", "Hello, world!") - self.assertEqual(result, {"role": "system", "content": "Hello, world!"}) +def test_happy_path_role_content(): + """Test that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content.""" + result = create_chat_message("system", "Hello, world!") + assert result == {"role": "system", "content": "Hello, world!"} - # Tests that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content. - def test_empty_role_content(self): - result = create_chat_message("", "") - self.assertEqual(result, {"role": "", "content": ""}) - # Tests the behavior of the generate_context function when all input parameters are empty. - @patch("time.strftime") - def test_generate_context_empty_inputs(self, mock_strftime): - # Mock the time.strftime function to return a fixed value - mock_strftime.return_value = "Sat Apr 15 00:00:00 2023" - # Arrange - prompt = "" - relevant_memory = "" - full_message_history = [] - model = "gpt-3.5-turbo-0301" +def test_empty_role_content(): + """Test that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content.""" + result = create_chat_message("", "") + assert result == {"role": "", "content": ""} - # Act - result = generate_context(prompt, relevant_memory, full_message_history, model) - # Assert - expected_result = ( - -1, - 47, - 3, - [ - {"role": "system", "content": ""}, - { - "role": "system", - "content": f"The current time and date is {time.strftime('%c')}", - }, - { - "role": "system", - "content": f"This reminds you of these events from your past:\n\n\n", - }, - ], - ) - self.assertEqual(result, expected_result) +def test_generate_context_empty_inputs(mocker): + """Test the behavior of the generate_context function when all input parameters are empty.""" + # Mock the time.strftime function to return a fixed value + mocker.patch("time.strftime", return_value="Sat Apr 15 00:00:00 2023") + # Arrange + prompt = "" + relevant_memory = "" + full_message_history = [] + model = "gpt-3.5-turbo-0301" - # Tests that the function successfully generates a current_context given valid inputs. - def test_generate_context_valid_inputs(self): - # Given - prompt = "What is your favorite color?" - relevant_memory = "You once painted your room blue." - full_message_history = [ - create_chat_message("user", "Hi there!"), - create_chat_message("assistant", "Hello! How can I assist you today?"), - create_chat_message("user", "Can you tell me a joke?"), - create_chat_message( - "assistant", - "Why did the tomato turn red? Because it saw the salad dressing!", - ), - create_chat_message("user", "Haha, that's funny."), - ] - model = "gpt-3.5-turbo-0301" + # Act + result = generate_context(prompt, relevant_memory, full_message_history, model) - # When - result = generate_context(prompt, relevant_memory, full_message_history, model) + # Assert + expected_result = ( + -1, + 32, + 2, + [ + {"role": "system", "content": ""}, + { + "role": "system", + "content": f"The current time and date is {time.strftime('%c')}", + }, + ], + ) + assert result == expected_result - # Then - self.assertIsInstance(result[0], int) - self.assertIsInstance(result[1], int) - self.assertIsInstance(result[2], int) - self.assertIsInstance(result[3], list) - self.assertGreaterEqual(result[0], 0) - self.assertGreaterEqual(result[1], 0) - self.assertGreaterEqual(result[2], 0) - self.assertGreaterEqual( - len(result[3]), 3 - ) # current_context should have at least 3 messages - self.assertLessEqual( - result[1], 2048 - ) # token limit for GPT-3.5-turbo-0301 is 2048 tokens + +def test_generate_context_valid_inputs(): + """Test that the function successfully generates a current_context given valid inputs.""" + # Given + prompt = "What is your favorite color?" + relevant_memory = "You once painted your room blue." + full_message_history = [ + create_chat_message("user", "Hi there!"), + create_chat_message("assistant", "Hello! How can I assist you today?"), + create_chat_message("user", "Can you tell me a joke?"), + create_chat_message( + "assistant", + "Why did the tomato turn red? Because it saw the salad dressing!", + ), + create_chat_message("user", "Haha, that's funny."), + ] + model = "gpt-3.5-turbo-0301" + + # When + result = generate_context(prompt, relevant_memory, full_message_history, model) + + # Then + assert isinstance(result[0], int) + assert isinstance(result[1], int) + assert isinstance(result[2], int) + assert isinstance(result[3], list) + assert result[0] >= 0 + assert result[2] >= 0 + assert result[1] >= 0 + assert len(result[3]) >= 2 # current_context should have at least 2 messages + assert result[1] <= 2048 # token limit for GPT-3.5-turbo-0301 is 2048 tokens diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index ecbac9b7..e3b874fb 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -3,20 +3,25 @@ from unittest.mock import MagicMock, patch import pytest -import autogpt.agent.agent_manager as agent_manager -from autogpt.app import execute_command, list_agents, start_agent +from autogpt.app import list_agents, start_agent +from tests.utils import requires_api_key +@pytest.mark.vcr @pytest.mark.integration_test +@requires_api_key("OPENAI_API_KEY") def test_make_agent() -> None: - """Test the make_agent command""" + """Test that an agent can be created""" + # Use the mock agent manager to avoid creating a real agent with patch("openai.ChatCompletion.create") as mock: - obj = MagicMock() - obj.response.choices[0].messages[0].content = "Test message" - mock.return_value = obj - start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2") + response = MagicMock() + response.choices[0].messages[0].content = "Test message" + response.usage.prompt_tokens = 1 + response.usage.completion_tokens = 1 + mock.return_value = response + start_agent("Test Agent", "chat", "Hello, how are you?", "gpt-3.5-turbo") agents = list_agents() assert "List of agents:\n0: chat" == agents - start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2") + start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt-3.5-turbo") agents = list_agents() assert "List of agents:\n0: chat\n1: write" == agents diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py new file mode 100644 index 00000000..fb8300d4 --- /dev/null +++ b/tests/unit/test_file_operations.py @@ -0,0 +1,306 @@ +""" +This set of unit tests is designed to test the file operations that autoGPT has access to. +""" + +import hashlib +import os +import re +from io import TextIOWrapper +from pathlib import Path + +import pytest +from pytest_mock import MockerFixture + +import autogpt.commands.file_operations as file_ops +from autogpt.config import Config +from autogpt.utils import readable_file_size +from autogpt.workspace import Workspace + + +@pytest.fixture() +def file_content(): + return "This is a test file.\n" + + +@pytest.fixture() +def test_file_path(config, workspace: Workspace): + return workspace.get_path("test_file.txt") + + +@pytest.fixture() +def test_file(test_file_path: Path): + file = open(test_file_path, "w") + yield file + if not file.closed: + file.close() + + +@pytest.fixture() +def test_file_with_content_path(test_file: TextIOWrapper, file_content): + test_file.write(file_content) + test_file.close() + file_ops.log_operation( + "write", test_file.name, file_ops.text_checksum(file_content) + ) + return Path(test_file.name) + + +@pytest.fixture() +def test_directory(config, workspace: Workspace): + return workspace.get_path("test_directory") + + +@pytest.fixture() +def test_nested_file(config, workspace: Workspace): + return workspace.get_path("nested/test_file.txt") + + +def test_file_operations_log(test_file: TextIOWrapper): + log_file_content = ( + "File Operation Logger\n" + "write: path/to/file1.txt #checksum1\n" + "write: path/to/file2.txt #checksum2\n" + "write: path/to/file3.txt #checksum3\n" + "append: path/to/file2.txt #checksum4\n" + "delete: path/to/file3.txt\n" + ) + test_file.write(log_file_content) + test_file.close() + + expected = [ + ("write", "path/to/file1.txt", "checksum1"), + ("write", "path/to/file2.txt", "checksum2"), + ("write", "path/to/file3.txt", "checksum3"), + ("append", "path/to/file2.txt", "checksum4"), + ("delete", "path/to/file3.txt", None), + ] + assert list(file_ops.operations_from_log(test_file.name)) == expected + + +def test_file_operations_state(test_file: TextIOWrapper): + # Prepare a fake log file + log_file_content = ( + "File Operation Logger\n" + "write: path/to/file1.txt #checksum1\n" + "write: path/to/file2.txt #checksum2\n" + "write: path/to/file3.txt #checksum3\n" + "append: path/to/file2.txt #checksum4\n" + "delete: path/to/file3.txt\n" + ) + test_file.write(log_file_content) + test_file.close() + + # Call the function and check the returned dictionary + expected_state = { + "path/to/file1.txt": "checksum1", + "path/to/file2.txt": "checksum4", + } + assert file_ops.file_operations_state(test_file.name) == expected_state + + +def test_is_duplicate_operation(config, mocker: MockerFixture): + # Prepare a fake state dictionary for the function to use + state = { + "path/to/file1.txt": "checksum1", + "path/to/file2.txt": "checksum2", + } + mocker.patch.object(file_ops, "file_operations_state", lambda _: state) + + # Test cases with write operations + assert ( + file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum1") + is True + ) + assert ( + file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum2") + is False + ) + assert ( + file_ops.is_duplicate_operation("write", "path/to/file3.txt", "checksum3") + is False + ) + # Test cases with append operations + assert ( + file_ops.is_duplicate_operation("append", "path/to/file1.txt", "checksum1") + is False + ) + # Test cases with delete operations + assert file_ops.is_duplicate_operation("delete", "path/to/file1.txt") is False + assert file_ops.is_duplicate_operation("delete", "path/to/file3.txt") is True + + +# Test logging a file operation +def test_log_operation(config: Config): + file_ops.log_operation("log_test", "path/to/test") + with open(config.file_logger_path, "r", encoding="utf-8") as f: + content = f.read() + assert f"log_test: path/to/test\n" in content + + +def test_text_checksum(file_content: str): + checksum = file_ops.text_checksum(file_content) + different_checksum = file_ops.text_checksum("other content") + assert re.match(r"^[a-fA-F0-9]+$", checksum) is not None + assert checksum != different_checksum + + +def test_log_operation_with_checksum(config: Config): + file_ops.log_operation("log_test", "path/to/test", checksum="ABCDEF") + with open(config.file_logger_path, "r", encoding="utf-8") as f: + content = f.read() + assert f"log_test: path/to/test #ABCDEF\n" in content + + +# Test splitting a file into chunks +def test_split_file(): + content = "abcdefghij" + chunks = list(file_ops.split_file(content, max_length=4, overlap=1)) + expected = ["abcd", "defg", "ghij"] + assert chunks == expected + + +def test_read_file(test_file_with_content_path: Path, file_content): + content = file_ops.read_file(test_file_with_content_path) + assert content == file_content + + +def test_write_to_file(test_file_path: Path): + new_content = "This is new content.\n" + file_ops.write_to_file(str(test_file_path), new_content) + with open(test_file_path, "r", encoding="utf-8") as f: + content = f.read() + assert content == new_content + + +def test_write_file_logs_checksum(config: Config, test_file_path: Path): + new_content = "This is new content.\n" + new_checksum = file_ops.text_checksum(new_content) + file_ops.write_to_file(str(test_file_path), new_content) + with open(config.file_logger_path, "r", encoding="utf-8") as f: + log_entry = f.read() + assert log_entry == f"write: {test_file_path} #{new_checksum}\n" + + +def test_write_file_fails_if_content_exists(test_file_path: Path): + new_content = "This is new content.\n" + file_ops.log_operation( + "write", + str(test_file_path), + checksum=file_ops.text_checksum(new_content), + ) + result = file_ops.write_to_file(str(test_file_path), new_content) + assert result == "Error: File has already been updated." + + +def test_write_file_succeeds_if_content_different(test_file_with_content_path: Path): + new_content = "This is different content.\n" + result = file_ops.write_to_file(str(test_file_with_content_path), new_content) + assert result == "File written to successfully." + + +def test_append_to_file(test_nested_file: Path): + append_text = "This is appended text.\n" + file_ops.write_to_file(test_nested_file, append_text) + + file_ops.append_to_file(test_nested_file, append_text) + + with open(test_nested_file, "r") as f: + content_after = f.read() + + assert content_after == append_text + append_text + + +def test_append_to_file_uses_checksum_from_appended_file( + config: Config, test_file_path: Path +): + append_text = "This is appended text.\n" + file_ops.append_to_file(test_file_path, append_text) + file_ops.append_to_file(test_file_path, append_text) + with open(config.file_logger_path, "r", encoding="utf-8") as f: + log_contents = f.read() + + digest = hashlib.md5() + digest.update(append_text.encode("utf-8")) + checksum1 = digest.hexdigest() + digest.update(append_text.encode("utf-8")) + checksum2 = digest.hexdigest() + assert log_contents == ( + f"append: {test_file_path} #{checksum1}\n" + f"append: {test_file_path} #{checksum2}\n" + ) + + +def test_delete_file(test_file_with_content_path: Path): + result = file_ops.delete_file(str(test_file_with_content_path)) + assert result == "File deleted successfully." + assert os.path.exists(test_file_with_content_path) is False + + +def test_delete_missing_file(config): + filename = "path/to/file/which/does/not/exist" + # confuse the log + file_ops.log_operation("write", filename, checksum="fake") + try: + os.remove(filename) + except FileNotFoundError as err: + assert str(err) in file_ops.delete_file(filename) + return + assert False, f"Failed to test delete_file; {filename} not expected to exist" + + +def test_list_files(workspace: Workspace, test_directory: Path): + # Case 1: Create files A and B, search for A, and ensure we don't return A and B + file_a = workspace.get_path("file_a.txt") + file_b = workspace.get_path("file_b.txt") + + with open(file_a, "w") as f: + f.write("This is file A.") + + with open(file_b, "w") as f: + f.write("This is file B.") + + # Create a subdirectory and place a copy of file_a in it + if not os.path.exists(test_directory): + os.makedirs(test_directory) + + with open(os.path.join(test_directory, file_a.name), "w") as f: + f.write("This is file A in the subdirectory.") + + files = file_ops.list_files(str(workspace.root)) + assert file_a.name in files + assert file_b.name in files + assert os.path.join(Path(test_directory).name, file_a.name) in files + + # Clean up + os.remove(file_a) + os.remove(file_b) + os.remove(os.path.join(test_directory, file_a.name)) + os.rmdir(test_directory) + + # Case 2: Search for a file that does not exist and make sure we don't throw + non_existent_file = "non_existent_file.txt" + files = file_ops.list_files("") + assert non_existent_file not in files + + +def test_download_file(config, workspace: Workspace): + url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz" + local_name = workspace.get_path("auto-gpt.tar.gz") + size = 365023 + readable_size = readable_file_size(size) + assert ( + file_ops.download_file(url, local_name) + == f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})' + ) + assert os.path.isfile(local_name) is True + assert os.path.getsize(local_name) == size + + url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz" + assert "Got an HTTP Error whilst trying to download file" in file_ops.download_file( + url, local_name + ) + + url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz" + assert "Failed to establish a new connection:" in file_ops.download_file( + url, local_name + ) diff --git a/tests/unit/test_get_self_feedback.py b/tests/unit/test_get_self_feedback.py new file mode 100644 index 00000000..09707270 --- /dev/null +++ b/tests/unit/test_get_self_feedback.py @@ -0,0 +1,40 @@ +from autogpt.agent.agent import Agent +from autogpt.config import AIConfig +from autogpt.llm import create_chat_completion + + +def test_get_self_feedback(mocker): + # Define a sample thoughts dictionary + thoughts = { + "reasoning": "Sample reasoning.", + "plan": "Sample plan.", + "thoughts": "Sample thoughts.", + "criticism": "Sample criticism.", + } + + # Define a fake response for the create_chat_completion function + fake_response = ( + "Y The provided information is suitable for achieving the role's objectives." + ) + + # Mock the create_chat_completion function + mock_create_chat_completion = mocker.patch( + "autogpt.agent.agent.create_chat_completion", wraps=create_chat_completion + ) + mock_create_chat_completion.return_value = fake_response + + # Create a MagicMock object to replace the Agent instance + agent_mock = mocker.MagicMock(spec=Agent) + + # Mock the config attribute of the Agent instance + agent_mock.config = AIConfig() + + # Call the get_self_feedback method + feedback = Agent.get_self_feedback( + agent_mock, + thoughts, + "gpt-3.5-turbo", + ) + + # Check if the response is correct + assert feedback == fake_response diff --git a/tests/test_json_parser.py b/tests/unit/test_json_parser.py similarity index 54% rename from tests/test_json_parser.py rename to tests/unit/test_json_parser.py index 41c90a6f..69cddca6 100644 --- a/tests/test_json_parser.py +++ b/tests/unit/test_json_parser.py @@ -1,37 +1,37 @@ -import unittest +from unittest import TestCase -import tests.context from autogpt.json_utils.json_fix_llm import fix_and_parse_json +from tests.utils import skip_in_ci -class TestParseJson(unittest.TestCase): +class TestParseJson(TestCase): def test_valid_json(self): - # Test that a valid JSON string is parsed correctly + """Test that a valid JSON string is parsed correctly.""" json_str = '{"name": "John", "age": 30, "city": "New York"}' obj = fix_and_parse_json(json_str) self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) def test_invalid_json_minor(self): - # Test that an invalid JSON string can be fixed with gpt + """Test that an invalid JSON string can not be fixed without gpt""" json_str = '{"name": "John", "age": 30, "city": "New York",}' with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_major_with_gpt(self): - # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_major_without_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False""" json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' # Assert that this raises an exception: with self.assertRaises(Exception): fix_and_parse_json(json_str, try_to_fix_with_gpt=False) def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False + """Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False""" json_str = """I suggest we start by browsing the repository to find any issues that we can fix. { @@ -63,49 +63,10 @@ class TestParseJson(unittest.TestCase): "speak": "I will start browsing the repository to find any issues we can fix.", }, } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. + # # Assert that this can be fixed with GPT + # self.assertEqual(fix_and_parse_json(json_str), good_obj) -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - -if __name__ == "__main__": - unittest.main() + # Assert that trying to fix this without GPT raises an exception + with self.assertRaises(Exception): + fix_and_parse_json(json_str, try_to_fix_with_gpt=False) diff --git a/tests/unit/test_json_utils_llm.py b/tests/unit/test_json_utils_llm.py new file mode 100644 index 00000000..b8c9518d --- /dev/null +++ b/tests/unit/test_json_utils_llm.py @@ -0,0 +1,117 @@ +# Generated by CodiumAI +import pytest +from loguru import logger + +from autogpt.json_utils.json_fix_llm import ( + fix_and_parse_json, + fix_json_using_multiple_techniques, +) +from tests.utils import requires_api_key + +""" +Code Analysis + +Objective: +- The objective of the function is to fix a given JSON string to make it parseable and fully compliant with two techniques. + +Inputs: +- The function takes in a string called 'assistant_reply', which is the JSON string to be fixed. + +Flow: +- The function first calls the 'fix_and_parse_json' function to parse and print the Assistant response. +- If the parsed JSON is an empty dictionary, the function calls the 'attempt_to_fix_json_by_finding_outermost_brackets' function to fix the JSON string. +- If the parsed JSON is not an empty dictionary, the function returns the parsed JSON. +- If the parsed JSON is an empty dictionary and cannot be fixed, the function logs an error and returns an empty dictionary. + +Outputs: +- The main output of the function is a dictionary containing the fixed JSON string. + +Additional aspects: +- The function uses two techniques to fix the JSON string: parsing and finding outermost brackets. +- The function logs an error if the JSON string cannot be fixed and returns an empty dictionary. +- The function uses the 'CFG' object to determine whether to speak the error message or not. +""" + + +class TestFixJsonUsingMultipleTechniques: + # Tests that the function successfully fixes and parses a JSON string that is already compliant with both techniques. + def test_fix_and_parse_json_happy_path(self): + # Happy path test case where the JSON string is already compliant with both techniques + json_string = '{"text": "Hello world", "confidence": 0.9}' + expected_output = {"text": "Hello world", "confidence": 0.9} + assert fix_json_using_multiple_techniques(json_string) == expected_output + + # Tests that the function successfully fixes and parses a JSON string that contains only whitespace characters. + # @requires_api_key("OPEN_API_KEY") + def test_fix_and_parse_json_whitespace(self, mocker): + # Happy path test case where the JSON string contains only whitespace characters + json_string = " \n\t " + + # mock try_ai_fix to avoid calling the AI model: + mocker.patch("autogpt.json_utils.json_fix_llm.try_ai_fix", return_value={}) + + expected_output = {} + assert fix_json_using_multiple_techniques(json_string) == expected_output + + # Tests that the function successfully converts a string with arrays to an array + def test_fix_and_parse_json_array(self): + # Happy path test case where the JSON string contains an array of string + json_string = '[ "Add type hints", "Move docstrings", "Consider using" ]' + expected_output = ["Add type hints", "Move docstrings", "Consider using"] + assert fix_json_using_multiple_techniques(json_string) == expected_output + + # Tests that the function returns an empty dictionary when the JSON string is not parseable and cannot be fixed using either technique. + # @requires_api_key("OPEN_API_KEY") + def test_fix_and_parse_json_can_not(self, mocker): + # Edge case test case where the JSON string is not parseable and cannot be fixed using either technique + json_string = "This is not a JSON string" + + # mock try_ai_fix to avoid calling the AI model: + mocker.patch("autogpt.json_utils.json_fix_llm.try_ai_fix", return_value={}) + + expected_output = {} + + # Use the actual function name in the test + result = fix_json_using_multiple_techniques(json_string) + + assert result == expected_output + + # Tests that the function returns an empty dictionary when the JSON string is empty. + # @requires_api_key("OPEN_API_KEY") + def test_fix_and_parse_json_empty_string(self, mocker): + # Arrange + json_string = "" + + # Act + # mock try_ai_fix to avoid calling the AI model: + mocker.patch("autogpt.json_utils.json_fix_llm.try_ai_fix", return_value={}) + + result = fix_and_parse_json(json_string) + + # Assert + assert result == {} + + # Tests that the function successfully fixes and parses a JSON string that contains escape characters. + def test_fix_and_parse_json_escape_characters(self): + # Arrange + json_string = '{"text": "This is a \\"test\\" string."}' + + # Act + result = fix_json_using_multiple_techniques(json_string) + + # Assert + assert result == {"text": 'This is a "test" string.'} + + # Tests that the function successfully fixes and parses a JSON string that contains nested objects or arrays. + def test_fix_and_parse_json_nested_objects(self): + # Arrange + json_string = '{"person": {"name": "John", "age": 30}, "hobbies": ["reading", "swimming"]}' + + # Act + result = fix_json_using_multiple_techniques(json_string) + + # Assert + assert result == { + "person": {"name": "John", "age": 30}, + "hobbies": ["reading", "swimming"], + } diff --git a/tests/unit/test_llm_utils.py b/tests/unit/test_llm_utils.py new file mode 100644 index 00000000..be36dc09 --- /dev/null +++ b/tests/unit/test_llm_utils.py @@ -0,0 +1,133 @@ +import pytest +from openai.error import APIError, RateLimitError + +from autogpt.llm import llm_utils + + +@pytest.fixture(params=[RateLimitError, APIError]) +def error(request): + if request.param == APIError: + return request.param("Error", http_status=502) + else: + return request.param("Error") + + +def error_factory(error_instance, error_count, retry_count, warn_user=True): + class RaisesError: + def __init__(self): + self.count = 0 + + @llm_utils.retry_openai_api( + num_retries=retry_count, backoff_base=0.001, warn_user=warn_user + ) + def __call__(self): + self.count += 1 + if self.count <= error_count: + raise error_instance + return self.count + + return RaisesError() + + +def test_retry_open_api_no_error(capsys): + @llm_utils.retry_openai_api() + def f(): + return 1 + + result = f() + assert result == 1 + + output = capsys.readouterr() + assert output.out == "" + assert output.err == "" + + +@pytest.mark.parametrize( + "error_count, retry_count, failure", + [(2, 10, False), (2, 2, False), (10, 2, True), (3, 2, True), (1, 0, True)], + ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"], +) +def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure): + call_count = min(error_count, retry_count) + 1 + + raises = error_factory(error, error_count, retry_count) + if failure: + with pytest.raises(type(error)): + raises() + else: + result = raises() + assert result == call_count + + assert raises.count == call_count + + output = capsys.readouterr() + + if error_count and retry_count: + if type(error) == RateLimitError: + assert "Reached rate limit, passing..." in output.out + assert "Please double check" in output.out + if type(error) == APIError: + assert "API Bad gateway" in output.out + else: + assert output.out == "" + + +def test_retry_open_api_rate_limit_no_warn(capsys): + error_count = 2 + retry_count = 10 + + raises = error_factory(RateLimitError, error_count, retry_count, warn_user=False) + result = raises() + call_count = min(error_count, retry_count) + 1 + assert result == call_count + assert raises.count == call_count + + output = capsys.readouterr() + + assert "Reached rate limit, passing..." in output.out + assert "Please double check" not in output.out + + +def test_retry_openapi_other_api_error(capsys): + error_count = 2 + retry_count = 10 + + raises = error_factory(APIError("Error", http_status=500), error_count, retry_count) + + with pytest.raises(APIError): + raises() + call_count = 1 + assert raises.count == call_count + + output = capsys.readouterr() + assert output.out == "" + + +def test_chunked_tokens(): + text = "Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model" + expected_output = [ + ( + 13556, + 12279, + 2898, + 374, + 459, + 22772, + 1825, + 31874, + 3851, + 67908, + 279, + 17357, + 315, + 279, + 480, + 2898, + 12, + 19, + 4221, + 1646, + ) + ] + output = list(llm_utils.chunked_tokens(text, "cl100k_base", 8191)) + assert output == expected_output diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py new file mode 100644 index 00000000..08c9114c --- /dev/null +++ b/tests/unit/test_plugins.py @@ -0,0 +1,130 @@ +import pytest + +from autogpt.config import Config +from autogpt.plugins import ( + denylist_allowlist_check, + inspect_zip_for_modules, + scan_plugins, +) + +PLUGINS_TEST_DIR = "tests/unit/data/test_plugins" +PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip" +PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__.py" +PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/" + + +def test_inspect_zip_for_modules(): + result = inspect_zip_for_modules(str(f"{PLUGINS_TEST_DIR}/{PLUGIN_TEST_ZIP_FILE}")) + assert result == [PLUGIN_TEST_INIT_PY] + + +@pytest.fixture +def mock_config_denylist_allowlist_check(): + class MockConfig: + """Mock config object for testing the denylist_allowlist_check function""" + + plugins_denylist = ["BadPlugin"] + plugins_allowlist = ["GoodPlugin"] + authorise_key = "y" + exit_key = "n" + + return MockConfig() + + +def test_denylist_allowlist_check_denylist( + mock_config_denylist_allowlist_check, monkeypatch +): + # Test that the function returns False when the plugin is in the denylist + monkeypatch.setattr("builtins.input", lambda _: "y") + assert not denylist_allowlist_check( + "BadPlugin", mock_config_denylist_allowlist_check + ) + + +def test_denylist_allowlist_check_allowlist( + mock_config_denylist_allowlist_check, monkeypatch +): + # Test that the function returns True when the plugin is in the allowlist + monkeypatch.setattr("builtins.input", lambda _: "y") + assert denylist_allowlist_check("GoodPlugin", mock_config_denylist_allowlist_check) + + +def test_denylist_allowlist_check_user_input_yes( + mock_config_denylist_allowlist_check, monkeypatch +): + # Test that the function returns True when the user inputs "y" + monkeypatch.setattr("builtins.input", lambda _: "y") + assert denylist_allowlist_check( + "UnknownPlugin", mock_config_denylist_allowlist_check + ) + + +def test_denylist_allowlist_check_user_input_no( + mock_config_denylist_allowlist_check, monkeypatch +): + # Test that the function returns False when the user inputs "n" + monkeypatch.setattr("builtins.input", lambda _: "n") + assert not denylist_allowlist_check( + "UnknownPlugin", mock_config_denylist_allowlist_check + ) + + +def test_denylist_allowlist_check_user_input_invalid( + mock_config_denylist_allowlist_check, monkeypatch +): + # Test that the function returns False when the user inputs an invalid value + monkeypatch.setattr("builtins.input", lambda _: "invalid") + assert not denylist_allowlist_check( + "UnknownPlugin", mock_config_denylist_allowlist_check + ) + + +@pytest.fixture +def config_with_plugins(): + """Mock config object for testing the scan_plugins function""" + # Test that the function returns the correct number of plugins + cfg = Config() + cfg.plugins_dir = PLUGINS_TEST_DIR + cfg.plugins_openai = ["https://weathergpt.vercel.app/"] + return cfg + + +@pytest.fixture +def mock_config_openai_plugin(): + """Mock config object for testing the scan_plugins function""" + + class MockConfig: + """Mock config object for testing the scan_plugins function""" + + plugins_dir = PLUGINS_TEST_DIR + plugins_openai = [PLUGIN_TEST_OPENAI] + plugins_denylist = ["AutoGPTPVicuna"] + plugins_allowlist = [PLUGIN_TEST_OPENAI] + + return MockConfig() + + +def test_scan_plugins_openai(mock_config_openai_plugin): + # Test that the function returns the correct number of plugins + result = scan_plugins(mock_config_openai_plugin, debug=True) + assert len(result) == 1 + + +@pytest.fixture +def mock_config_generic_plugin(): + """Mock config object for testing the scan_plugins function""" + + # Test that the function returns the correct number of plugins + class MockConfig: + plugins_dir = PLUGINS_TEST_DIR + plugins_openai = [] + plugins_denylist = [] + plugins_allowlist = ["AutoGPTPVicuna"] + + return MockConfig() + + +def test_scan_plugins_generic(mock_config_generic_plugin): + # Test that the function returns the correct number of plugins + result = scan_plugins(mock_config_generic_plugin, debug=True) + assert len(result) == 1 diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py new file mode 100644 index 00000000..1c5c3ac0 --- /dev/null +++ b/tests/unit/test_spinner.py @@ -0,0 +1,70 @@ +# Generated by CodiumAI +import time + +from autogpt.spinner import Spinner + +""" +Code Analysis + +Main functionalities: +The Spinner class provides a simple way to display a spinning animation while a process is running. It can be used to indicate that a process is ongoing and to provide visual feedback to the user. The class can be used as a context manager, which means that it can be used with the 'with' statement to automatically start and stop the spinner animation. + +Methods: +- __init__(self, message: str = "Loading...", delay: float = 0.1) -> None: Initializes the Spinner class with a message to display and a delay between each spinner update. +- spin(self) -> None: Spins the spinner animation while the process is running. +- __enter__(self): Starts the spinner animation when used as a context manager. +- __exit__(self, exc_type, exc_value, exc_traceback) -> None: Stops the spinner animation when used as a context manager. +- update_message(self, new_message, delay=0.1): Updates the message displayed by the spinner animation. + +Fields: +- spinner: An itertools.cycle object that contains the characters used for the spinner animation. +- delay: The delay between each spinner update. +- message: The message to display. +- running: A boolean value that indicates whether the spinner animation is running. +- spinner_thread: A threading.Thread object that runs the spin method in a separate thread. +""" + +ALMOST_DONE_MESSAGE = "Almost done..." +PLEASE_WAIT = "Please wait..." + + +def test_spinner_initializes_with_default_values(): + """Tests that the spinner initializes with default values.""" + with Spinner() as spinner: + assert spinner.message == "Loading..." + assert spinner.delay == 0.1 + + +def test_spinner_initializes_with_custom_values(): + """Tests that the spinner initializes with custom message and delay values.""" + with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner: + assert spinner.message == PLEASE_WAIT + assert spinner.delay == 0.2 + + +# +def test_spinner_stops_spinning(): + """Tests that the spinner starts spinning and stops spinning without errors.""" + with Spinner() as spinner: + time.sleep(1) + spinner.update_message(ALMOST_DONE_MESSAGE) + time.sleep(1) + assert spinner.running == False + + +def test_spinner_updates_message_and_still_spins(): + """Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning.""" + with Spinner() as spinner: + assert spinner.running == True + time.sleep(1) + spinner.update_message(ALMOST_DONE_MESSAGE) + time.sleep(1) + assert spinner.message == ALMOST_DONE_MESSAGE + assert spinner.running == False + + +def test_spinner_can_be_used_as_context_manager(): + """Tests that the spinner can be used as a context manager.""" + with Spinner() as spinner: + assert spinner.running == True + assert spinner.running == False diff --git a/tests/unit/test_url_validation.py b/tests/unit/test_url_validation.py new file mode 100644 index 00000000..2bbd3f9e --- /dev/null +++ b/tests/unit/test_url_validation.py @@ -0,0 +1,59 @@ +import pytest +from pytest import raises + +from autogpt.url_utils.validators import validate_url + + +@validate_url +def dummy_method(url): + return url + + +successful_test_data = ( + ("https://google.com/search?query=abc"), + ("https://google.com/search?query=abc&p=123"), + ("http://google.com/"), + ("http://a.lot.of.domain.net/param1/param2"), +) + + +@pytest.mark.parametrize("url", successful_test_data) +def test_url_validation_succeeds(url): + assert dummy_method(url) == url + + +bad_protocol_data = ( + ("htt://example.com"), + ("httppp://example.com"), + (" https://example.com"), +) + + +@pytest.mark.parametrize("url", bad_protocol_data) +def test_url_validation_fails_bad_protocol(url): + with raises(ValueError, match="Invalid URL format"): + dummy_method(url) + + +missing_loc = (("http://?query=q"),) + + +@pytest.mark.parametrize("url", missing_loc) +def test_url_validation_fails_bad_protocol(url): + with raises(ValueError, match="Missing Scheme or Network location"): + dummy_method(url) + + +local_file = ( + ("http://localhost"), + ("https://localhost/"), + ("http://2130706433"), + ("https://2130706433"), + ("http://127.0.0.1/"), +) + + +@pytest.mark.parametrize("url", local_file) +def test_url_validation_fails_local_path(url): + with raises(ValueError, match="Access to local files is restricted"): + dummy_method(url) diff --git a/tests/unit/test_web_selenium.py b/tests/unit/test_web_selenium.py new file mode 100644 index 00000000..bf13a8d9 --- /dev/null +++ b/tests/unit/test_web_selenium.py @@ -0,0 +1,11 @@ +from autogpt.commands.web_selenium import browse_website + + +def test_browse_website(): + url = "https://barrel-roll.com" + question = "How to execute a barrel roll" + + response, _ = browse_website(url, question) + assert "Error" in response + # Sanity check that the response is not too long + assert len(response) < 200 diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..2a0d25d8 --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,46 @@ +import functools +import os +from contextlib import contextmanager + +import pytest + +from autogpt.config import Config + + +@contextmanager +def dummy_openai_api_key(): + # even when we record the VCR cassettes, openAI wants an API key + config = Config() + original_api_key = config.openai_api_key + config.set_openai_api_key("sk-dummy") + + try: + yield + finally: + config.set_openai_api_key(original_api_key) + + +def requires_api_key(env_var): + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not os.environ.get(env_var) and env_var == "OPENAI_API_KEY": + with dummy_openai_api_key(): + return func(*args, **kwargs) + else: + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def skip_in_ci(test_function): + return pytest.mark.skipif( + os.environ.get("CI") == "true", + reason="This test doesn't work on GitHub Actions.", + )(test_function) + + +def get_workspace_file_path(workspace, file_name): + return str(workspace.get_path(file_name)) diff --git a/tests/vcr/__init__.py b/tests/vcr/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/vcr/openai_filter.py b/tests/vcr/openai_filter.py new file mode 100644 index 00000000..15a4b862 --- /dev/null +++ b/tests/vcr/openai_filter.py @@ -0,0 +1,52 @@ +import json +import re + + +def replace_timestamp_in_request(request): + # Check if the request body contains a JSON object + + try: + if not request or not request.body: + return request + body = json.loads(request.body) + except ValueError: + return request + + if "messages" not in body: + return request + + for message in body["messages"]: + if "content" in message and "role" in message and message["role"] == "system": + timestamp_regex = re.compile(r"\w{3} \w{3} \d{2} \d{2}:\d{2}:\d{2} \d{4}") + message["content"] = timestamp_regex.sub( + "Tue Jan 01 00:00:00 2000", message["content"] + ) + + request.body = json.dumps(body) + return request + + +def before_record_response(response): + if "Transfer-Encoding" in response["headers"]: + del response["headers"]["Transfer-Encoding"] + return response + + +def before_record_request(request): + filtered_request = filter_hostnames(request) + filtered_request_without_dynamic_data = replace_timestamp_in_request( + filtered_request + ) + return filtered_request_without_dynamic_data + + +def filter_hostnames(request): + allowed_hostnames = [ + "api.openai.com", + "localhost:50337", + ] # List of hostnames you want to allow + + if any(hostname in request.url for hostname in allowed_hostnames): + return request + else: + return None