mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-27 10:54:35 +01:00
Release v0.4.0 (#4539)
This commit is contained in:
@@ -7,11 +7,12 @@
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
"installZsh": "true",
|
||||
"username": "vscode",
|
||||
"userUid": "6942",
|
||||
"userGid": "6942",
|
||||
"userUid": "1000",
|
||||
"userGid": "1000",
|
||||
"upgradePackages": "true"
|
||||
},
|
||||
"ghcr.io/devcontainers/features/desktop-lite:1": {},
|
||||
"ghcr.io/devcontainers/features/github-cli:1": {},
|
||||
"ghcr.io/devcontainers/features/python:1": "none",
|
||||
"ghcr.io/devcontainers/features/node:1": "none",
|
||||
"ghcr.io/devcontainers/features/git:1": {
|
||||
@@ -25,8 +26,20 @@
|
||||
"vscode": {
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python"
|
||||
}
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python",
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.unittestEnabled": false
|
||||
},
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"VisualStudioExptTeam.vscodeintellicode",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.isort",
|
||||
"GitHub.vscode-pull-request-github",
|
||||
"GitHub.copilot",
|
||||
"github.vscode-github-actions"
|
||||
]
|
||||
}
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
@@ -36,5 +49,8 @@
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode"
|
||||
}
|
||||
"remoteUser": "vscode",
|
||||
|
||||
// Add the freshly containerized repo to the list of safe repositories
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/Auto-GPT && pip3 install --user -r requirements.txt"
|
||||
}
|
||||
@@ -4,16 +4,9 @@ version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
depends_on:
|
||||
- redis
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ../:/workspace/Auto-GPT
|
||||
redis:
|
||||
image: 'redis/redis-stack-server:latest'
|
||||
|
||||
@@ -13,11 +13,18 @@
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
## PLAIN_OUTPUT - Enabeling plain output will disable spinner (Default: False)
|
||||
## Note: Spinner is used to indicate that Auto-GPT is working on something in the background
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled. Each of the below are an option:
|
||||
## autogpt.commands.analyze_code
|
||||
## autogpt.commands.audio_text
|
||||
@@ -27,7 +34,6 @@
|
||||
## autogpt.commands.google_search
|
||||
## autogpt.commands.image_gen
|
||||
## autogpt.commands.improve_code
|
||||
## autogpt.commands.twitter
|
||||
## autogpt.commands.web_selenium
|
||||
## autogpt.commands.write_tests
|
||||
## autogpt.app
|
||||
@@ -35,6 +41,15 @@
|
||||
## For example, to disable coding related features, uncomment the next line
|
||||
# DISABLED_COMMAND_CATEGORIES=autogpt.commands.analyze_code,autogpt.commands.execute_code,autogpt.commands.git_operations,autogpt.commands.improve_code,autogpt.commands.write_tests
|
||||
|
||||
## DENY_COMMANDS - The list of commands that are not allowed to be executed by Auto-GPT (Default: None)
|
||||
# the following are examples:
|
||||
# DENY_COMMANDS=cd,nano,vim,vi,emacs,rm,sudo,top,ping,ssh,scp
|
||||
|
||||
## ALLOW_COMMANDS - ONLY those commands will be allowed to be executed by Auto-GPT
|
||||
# the following are examples:
|
||||
# ALLOW_COMMANDS=ls,git,cat,grep,find,echo,ps,curl,wget
|
||||
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
@@ -56,6 +71,7 @@
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# TEMPERATURE=0
|
||||
# USE_AZURE=False
|
||||
# OPENAI_ORGANIZATION=your-openai-organization-key-if-applicable
|
||||
|
||||
### AZURE
|
||||
# moved to `azure.yaml.template`
|
||||
@@ -78,30 +94,18 @@ OPENAI_API_KEY=your-openai-api-key
|
||||
|
||||
### EMBEDDINGS
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
## EMBEDDING_TOKENIZER - Tokenizer to use for chunking large inputs
|
||||
## EMBEDDING_TOKEN_LIMIT - Chunk size limit for large inputs
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
# EMBEDDING_TOKENIZER=cl100k_base
|
||||
# EMBEDDING_TOKEN_LIMIT=8191
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
## local - Default
|
||||
## pinecone - Pinecone (if configured)
|
||||
## json_file - Default
|
||||
## redis - Redis (if configured)
|
||||
## milvus - Milvus (if configured - also works with Zilliz)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=local
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### PINECONE
|
||||
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
# PINECONE_API_KEY=your-pinecone-api-key
|
||||
# PINECONE_ENV=your-pinecone-region
|
||||
# MEMORY_BACKEND=json_file
|
||||
# MEMORY_INDEX=auto-gpt-memory
|
||||
|
||||
### REDIS
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
@@ -113,46 +117,14 @@ OPENAI_API_KEY=your-openai-api-key
|
||||
# REDIS_PASSWORD=
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
### WEAVIATE
|
||||
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
## WEAVIATE_HOST - Weaviate host IP
|
||||
## WEAVIATE_PORT - Weaviate host port
|
||||
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
## WEAVIATE_USERNAME - Weaviate username
|
||||
## WEAVIATE_PASSWORD - Weaviate password
|
||||
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# WEAVIATE_HOST="127.0.0.1"
|
||||
# WEAVIATE_PORT=8080
|
||||
# WEAVIATE_PROTOCOL="http"
|
||||
# USE_WEAVIATE_EMBEDDED=False
|
||||
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
# WEAVIATE_USERNAME=
|
||||
# WEAVIATE_PASSWORD=
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
### MILVUS
|
||||
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443)
|
||||
## MILVUS_USERNAME - username for your Milvus database
|
||||
## MILVUS_PASSWORD - password for your Milvus database
|
||||
## MILVUS_SECURE - True to enable TLS. (Default: False)
|
||||
## Setting MILVUS_ADDR to a `https://` URL will override this setting.
|
||||
## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
|
||||
# MILVUS_ADDR=localhost:19530
|
||||
# MILVUS_USERNAME=
|
||||
# MILVUS_PASSWORD=
|
||||
# MILVUS_SECURE=
|
||||
# MILVUS_COLLECTION=autogpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPEN AI
|
||||
## IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
### COMMON SETTINGS
|
||||
## IMAGE_PROVIDER - Image provider - dalle, huggingface, or sdwebui
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## DALLE: 256, 512, 1024
|
||||
## Image sizes for dalle: 256, 512, 1024
|
||||
# IMAGE_PROVIDER=dalle
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,5 +1,5 @@
|
||||
# Exclude VCR cassettes from stats
|
||||
tests/**/cassettes/**.y*ml linguist-generated
|
||||
tests/Auto-GPT-test-cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
|
||||
149
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
149
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -8,16 +8,16 @@ body:
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
* **Thoroughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
@@ -27,23 +27,29 @@ body:
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we spend building AutoGPT.
|
||||
Please confirm that the issue you have is described well and precise in the title above ⬆️.
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
Please help us help you:
|
||||
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
|
||||
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
|
||||
- Search for existing issues, "add comment" is tidier than "new issue"
|
||||
- Ask on our Discord (https://discord.gg/autogpt)
|
||||
For example:
|
||||
BAD - my auto-gpt keeps looping
|
||||
GOOD - After performing execute_python_file, auto-gpt goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
|
||||
Please help us help you by following these steps:
|
||||
- Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
|
||||
newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
|
||||
- Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it) if possible
|
||||
- If it's a pip/packages issue, mention this in the title and provide pip version, python version
|
||||
- If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
@@ -56,9 +62,15 @@ body:
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other (Please specify in your problem)
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the system
|
||||
description: Please specify the system you are working on.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of Auto-GPT are you using?
|
||||
@@ -73,61 +85,80 @@ body:
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: GPT-3 or GPT-4?
|
||||
label: Do you use OpenAI GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
- GPT-4(32k)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Steps to reproduce 🕹
|
||||
description: |
|
||||
**⚠️ Issues that we can't reproduce will be closed.**
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current behavior 😯
|
||||
description: Describe what happens instead of the expected behavior.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior 🤔
|
||||
description: Describe what should happen.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
label: Which area covers your issue best?
|
||||
description: >
|
||||
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
|
||||
value: |
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
Select the area related to the issue you are reporting.
|
||||
options:
|
||||
- Installation and setup
|
||||
- Memory
|
||||
- Performance
|
||||
- Prompt
|
||||
- Commands
|
||||
- Plugins
|
||||
- AI Model Limitations
|
||||
- Challenges
|
||||
- Documentation
|
||||
- Logging
|
||||
- Agents
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
autolabels: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your Logs 📒
|
||||
description: |
|
||||
Please include the log showing your error and the command that caused it, if applicable.
|
||||
You can copy it from your terminal or from `logs/activity.log`.
|
||||
This will help us understand your issue better!
|
||||
|
||||
<details>
|
||||
<summary><i>Example</i></summary>
|
||||
```log
|
||||
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
|
||||
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
|
||||
Traceback (most recent call last):
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
|
||||
self._interpret_response_line(
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
|
||||
raise self.handle_error_response(
|
||||
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
|
||||
```
|
||||
</details>
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
#Following are optional file content uploads
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
```log
|
||||
<insert your logs here>
|
||||
```
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
2
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -5,7 +5,7 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing)
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
|
||||
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -14,7 +14,7 @@ Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing)
|
||||
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
@@ -35,7 +35,14 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes. <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
- [ ] I have run the following commands against my code to ensure it passes our linters:
|
||||
```shell
|
||||
black .
|
||||
isort .
|
||||
mypy
|
||||
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests --in-place
|
||||
```
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
|
||||
49
.github/workflows/add-cassettes.yml
vendored
49
.github/workflows/add-cassettes.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: Merge and Commit Cassettes
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
update-cassettes:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # This is necessary to fetch all branches and tags
|
||||
|
||||
- name: Fetch all branches
|
||||
run: git fetch --all
|
||||
|
||||
- name: Reset branch
|
||||
run: |
|
||||
git checkout ${{ github.event.pull_request.base.ref }}
|
||||
git reset --hard origin/cassette-diff-${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Create PR
|
||||
id: create_pr
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
commit-message: Update cassettes
|
||||
signoff: false
|
||||
branch: cassette-diff-${{ github.event.pull_request.number }}
|
||||
delete-branch: false
|
||||
title: "Update cassettes"
|
||||
body: "This PR updates the cassettes."
|
||||
draft: false
|
||||
|
||||
- name: Check PR
|
||||
run: |
|
||||
echo "Pull Request Number - ${{ steps.create_pr.outputs.pull-request-number }}"
|
||||
echo "Pull Request URL - ${{ steps.create_pr.outputs.pull-request-url }}"
|
||||
|
||||
- name: Comment PR URL in the current PR
|
||||
uses: thollander/actions-comment-pull-request@v2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
message: |
|
||||
New pull request created for cassettes: [HERE](${{ steps.create_pr.outputs.pull-request-url }}). Please merge it asap.
|
||||
44
.github/workflows/ci.yml
vendored
44
.github/workflows/ci.yml
vendored
@@ -89,12 +89,11 @@ jobs:
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
submodules: true
|
||||
|
||||
- name: Check out cassettes
|
||||
- id: checkout_cassettes
|
||||
name: Check out cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
@@ -109,10 +108,15 @@ jobs:
|
||||
echo "Could not merge upstream changes to cassettes. Using cassettes from ${{ github.event.pull_request.base.ref }}."
|
||||
git merge --abort
|
||||
git checkout ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Delete branch to prevent conflict when re-creating it
|
||||
git branch -D $cassette_branch
|
||||
fi
|
||||
echo "cassette_branch=$(git branch --show-current)" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule."\
|
||||
"Using cassettes from ${{ github.event.pull_request.base.ref }}."
|
||||
echo "cassette_branch=${{ github.event.pull_request.base.ref }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
@@ -141,9 +145,18 @@ jobs:
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic x-access-token:${{ secrets.PAT_REVIEW }}"
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
@@ -163,9 +176,10 @@ jobs:
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
run: |
|
||||
if [[ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]]; then
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_source_branch="${{ steps.checkout_cassettes.outputs.cassette_branch }}"
|
||||
base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
else
|
||||
current_branch=$(echo ${{ github.ref }} | sed -e "s/refs\/heads\///g")
|
||||
@@ -173,20 +187,26 @@ jobs:
|
||||
fi
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_source_branch:$cassette_source_branch
|
||||
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff-index --quiet $cassette_branch; then
|
||||
if ! git diff --quiet $cassette_source_branch --; then
|
||||
if [ "$cassette_branch" != "$cassette_source_branch" ]; then
|
||||
git checkout -b $cassette_branch
|
||||
fi
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git pull --rebase origin $cassette_branch
|
||||
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ $is_pull_request ]; then
|
||||
git push --force origin HEAD:$cassette_branch
|
||||
else
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
|
||||
cd ../..
|
||||
if [ $is_pull_request ]; then
|
||||
git fetch origin $base_branch
|
||||
cassette_diff=$(git diff $cassette_branch origin/$base_branch)
|
||||
cassette_diff=$(git diff origin/$base_branch)
|
||||
else
|
||||
git add tests/Auto-GPT-test-cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
@@ -203,8 +223,10 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset "${{ steps.setup_git_auth.outputs.config_key }}"
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply or remove behaviour change label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
77
.github/workflows/docker-ci.yml
vendored
77
.github/workflows/docker-ci.yml
vendored
@@ -3,6 +3,9 @@ name: Docker CI
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master, stable ]
|
||||
|
||||
@@ -73,43 +76,47 @@ jobs:
|
||||
# Docker setup needs fixing before this is going to work: #1843
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
needs: build
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
3
.github/workflows/pr-label.yml
vendored
3
.github/workflows/pr-label.yml
vendored
@@ -4,6 +4,9 @@ on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
|
||||
28
.github/workflows/sponsors_readme.yml
vendored
28
.github/workflows/sponsors_readme.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Generate Sponsors README
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 */12 * * *'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Generate Sponsors 💖
|
||||
uses: JamesIves/github-sponsors-readme-action@v1
|
||||
with:
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
file: 'README.md'
|
||||
minimum: 2500
|
||||
maximum: 99999
|
||||
|
||||
- name: Deploy to GitHub Pages 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: master
|
||||
folder: '.'
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,6 +21,7 @@ logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
|
||||
4
.gitmodules
vendored
Normal file
4
.gitmodules
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
[submodule "tests/Auto-GPT-test-cassettes"]
|
||||
path = tests/Auto-GPT-test-cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
branch = master
|
||||
@@ -22,8 +22,18 @@ repos:
|
||||
- id: black
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: 'v1.3.0'
|
||||
hooks:
|
||||
- id: mypy
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: autoflake
|
||||
name: autoflake
|
||||
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests
|
||||
language: python
|
||||
types: [ python ]
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
|
||||
55
BULLETIN.md
55
BULLETIN.md
@@ -2,23 +2,46 @@
|
||||
Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
|
||||
The documentation also has a place here, at *https://docs.agpt.co*
|
||||
|
||||
# 🚀 v0.3.0 Release 🚀
|
||||
Over a week and 275 pull requests have passed since v0.2.2, and we are happy to announce
|
||||
the release of v0.3.0! *From now on, we will be focusing on major improvements* rather
|
||||
than bugfixes, as we feel stability has reached a reasonable level. Most remaining
|
||||
issues relate to limitations in prompt generation and the memory system, which will be
|
||||
the focus of our efforts for the next release.
|
||||
# For contributors 👷🏼
|
||||
Since releasing v0.3.0, we are working on re-architecting the Auto-GPT core to make
|
||||
it more extensible and to make room for structural performance-oriented R&D.
|
||||
In the meantime, we have less time to process incoming pull requests and issues,
|
||||
so we focus on high-value contributions:
|
||||
* significant bugfixes
|
||||
* *major* improvements to existing functionality and/or docs (so no single-typo fixes)
|
||||
* contributions that help us with re-architecture and other roadmapped items
|
||||
We have to be somewhat selective in order to keep making progress, but this does not
|
||||
mean you can't contribute. Check out the contribution guide on our wiki:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
|
||||
Highlights and notable changes in this release:
|
||||
# 🚀 v0.4.0 Release 🚀
|
||||
Two weeks and 76 pull requests have passed since v0.3.1, and we are happy to announce
|
||||
the release of v0.4.0!
|
||||
|
||||
## Plugin support 🔌
|
||||
Auto-GPT now has support for plugins! With plugins, you can extend Auto-GPT's abilities,
|
||||
adding support for third-party services and more.
|
||||
See https://github.com/Significant-Gravitas/Auto-GPT-Plugins for instructions and available plugins.
|
||||
Highlights and notable changes since v0.3.0:
|
||||
|
||||
## Changes to Docker configuration 🐋
|
||||
The workdir has been changed from */home/appuser* to */app*.
|
||||
Be sure to update any volume mounts accordingly!
|
||||
## ⚠️ Command `send_tweet` is REMOVED
|
||||
Twitter functionality (and more) is now covered by plugins.
|
||||
|
||||
# ⚠️ Command `send_tweet` is DEPRECATED, and will be removed in v0.4.0 ⚠️
|
||||
Twitter functionality (and more) is now covered by plugins, see [Plugin support 🔌]
|
||||
## ⚠️ Memory backend deprecation 💾
|
||||
The Milvus, Pinecone and Weaviate memory backends were rendered incompatible
|
||||
by work on the memory system, and have been removed in `master`. The Redis
|
||||
memory store was also temporarily removed; we will merge a new implementation ASAP.
|
||||
Whether built-in support for the others will be added back in the future is subject to
|
||||
discussion, feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
|
||||
|
||||
## Document support in `read_file` 📄
|
||||
Auto-GPT can now read text from document files, with support added for PDF, DOCX, CSV,
|
||||
HTML, TeX and more!
|
||||
|
||||
## Managing Auto-GPT's access to commands ❌🔧
|
||||
You can now disable set of built-in commands through the *DISABLED_COMMAND_CATEGORIES*
|
||||
variable in .env. Specific shell commands can also be disabled using *DENY_COMMANDS*,
|
||||
or selectively enabled using *ALLOW_COMMANDS*.
|
||||
|
||||
## Further fixes and changes 🛠️
|
||||
Other highlights include improvements to self-feedback mode and continuous mode,
|
||||
documentation, docker and devcontainer setups, and much more. Most of the improvements
|
||||
that were made are not yet visible to users, but will pay off in the long term.
|
||||
Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases
|
||||
|
||||
@@ -1 +1,14 @@
|
||||
This document now lives at https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
We maintain a knowledgebase at this [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
|
||||
|
||||
We would like to say "We value all contributions". After all, we are an open-source project, so we should say something fluffy like this, right?
|
||||
|
||||
However the reality is that some contributions are SUPER-valuable, while others create more trouble than they are worth and actually _create_ work for the core team.
|
||||
|
||||
If you wish to contribute, please look through the wiki [contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) page.
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
|
||||
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and hop on our discord (you'll find it in the wiki).
|
||||
|
||||
❤️ & 🔆
|
||||
The team @ Auto-GPT
|
||||
|
||||
@@ -38,5 +38,6 @@ WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
|
||||
@@ -1,20 +1,29 @@
|
||||
import signal
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
|
||||
from autogpt.llm.token_counter import count_string_tokens
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.chat import chat_with_ai, create_chat_completion
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.log_cycle.log_cycle import (
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.memory.message_history import MessageHistory
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input
|
||||
@@ -27,7 +36,6 @@ class Agent:
|
||||
Attributes:
|
||||
ai_name: The name of the agent.
|
||||
memory: The memory object to use.
|
||||
full_message_history: The full message history.
|
||||
next_action_count: The number of actions to execute.
|
||||
system_prompt: The system prompt is the initial prompt that defines everything
|
||||
the AI needs to know to achieve its task successfully.
|
||||
@@ -52,24 +60,19 @@ class Agent:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
ai_name: str,
|
||||
memory: VectorMemory,
|
||||
next_action_count: int,
|
||||
command_registry: CommandRegistry,
|
||||
config: AIConfig,
|
||||
system_prompt: str,
|
||||
triggering_prompt: str,
|
||||
workspace_directory: str,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hallucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
self.history = MessageHistory(self)
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
@@ -88,6 +91,20 @@ class Agent:
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
# Signal handler for interrupting y -N
|
||||
def signal_handler(signum, frame):
|
||||
if self.next_action_count == 0:
|
||||
sys.exit()
|
||||
else:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Interrupt signal received. Stopping continuous command execution."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
self.next_action_count = 0
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
self.cycle_count += 1
|
||||
@@ -96,7 +113,7 @@ class Agent:
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.full_message_history,
|
||||
[m.raw() for m in self.history],
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
if (
|
||||
@@ -109,15 +126,15 @@ class Agent:
|
||||
)
|
||||
break
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
with Spinner("Thinking... ", plain_output=cfg.plain_output):
|
||||
assistant_reply = chat_with_ai(
|
||||
cfg,
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
cfg.fast_llm_model,
|
||||
)
|
||||
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
@@ -242,9 +259,7 @@ class Agent:
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
result = f"Could not execute command: {arguments}"
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
elif command_name == "self_feedback":
|
||||
@@ -261,6 +276,7 @@ class Agent:
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
config=cfg,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
@@ -268,7 +284,7 @@ class Agent:
|
||||
str(command_result), cfg.fast_llm_model
|
||||
)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.summary_memory), cfg.fast_llm_model
|
||||
str(self.history.summary_message()), cfg.fast_llm_model
|
||||
)
|
||||
if result_tlength + memory_tlength + 600 > cfg.fast_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
@@ -284,12 +300,10 @@ class Agent:
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.full_message_history.append(create_chat_message("system", result))
|
||||
self.history.add("system", result, "action_result")
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.full_message_history.append(
|
||||
create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
self.history.add("system", "Unable to execute command", "action_result")
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
@@ -324,7 +338,25 @@ class Agent:
|
||||
plan = thoughts.get("plan", "")
|
||||
thought = thoughts.get("thoughts", "")
|
||||
feedback_thoughts = thought + reasoning + plan
|
||||
return create_chat_completion(
|
||||
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
|
||||
llm_model,
|
||||
|
||||
prompt = ChatSequence.for_model(llm_model)
|
||||
prompt.add("user", feedback_prompt + feedback_thoughts)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
prompt.raw(),
|
||||
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
)
|
||||
|
||||
feedback = create_chat_completion(prompt)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
feedback,
|
||||
SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
)
|
||||
return feedback
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import Message, create_chat_completion
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.chat import Message, create_chat_completion
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
@@ -13,55 +12,55 @@ class AgentManager(metaclass=Singleton):
|
||||
|
||||
def __init__(self):
|
||||
self.next_key = 0
|
||||
self.agents = {} # key, (task, full_message_history, model)
|
||||
self.agents: dict[
|
||||
int, tuple[str, list[Message], str]
|
||||
] = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
|
||||
def create_agent(
|
||||
self, task: str, creation_prompt: str, model: str
|
||||
) -> tuple[int, str]:
|
||||
"""Create a new agent and return its key
|
||||
|
||||
Args:
|
||||
task: The task to perform
|
||||
prompt: The prompt to use
|
||||
model: The model to use
|
||||
creation_prompt: Prompt passed to the LLM at creation
|
||||
model: The model to use to run this agent
|
||||
|
||||
Returns:
|
||||
The key of the new agent
|
||||
"""
|
||||
messages: List[Message] = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
messages.extend(iter(plugin_messages))
|
||||
if plugin_messages := plugin.pre_instruction(messages.raw()):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
agent_reply = create_chat_completion(prompt=messages)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
messages.add("assistant", agent_reply)
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
messages.add("assistant", plugins_reply)
|
||||
key = self.next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
self.next_key += 1
|
||||
|
||||
self.agents[key] = (task, messages, model)
|
||||
self.agents[key] = (task, list(messages), model)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
@@ -83,33 +82,30 @@ class AgentManager(metaclass=Singleton):
|
||||
task, messages, model = self.agents[int(key)]
|
||||
|
||||
# Add user message to message history before sending to agent
|
||||
messages.append({"role": "user", "content": message})
|
||||
messages = ChatSequence.for_model(model, messages)
|
||||
messages.add("user", message)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
for plugin_message in plugin_messages:
|
||||
messages.append(plugin_message)
|
||||
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
agent_reply = create_chat_completion(prompt=messages)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
messages.add("assistant", agent_reply)
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
# Update full message history
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
messages.add("assistant", plugins_reply)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
|
||||
@@ -1,21 +1,16 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
from typing import Dict, List, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
|
||||
|
||||
def is_valid_int(value: str) -> bool:
|
||||
"""Check if the value is a valid integer
|
||||
@@ -94,6 +89,7 @@ def execute_command(
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
config: Config,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
@@ -109,29 +105,25 @@ def execute_command(
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments)
|
||||
return cmd(**arguments, config=config)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
@@ -140,8 +132,8 @@ def execute_command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a Google search
|
||||
def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
"""Get the text summary of a webpage
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
@@ -151,14 +143,15 @@ def get_text_summary(url: str, question: str) -> str:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
summary = summarize_text(url, text, question)
|
||||
summary, _ = summarize_text(text, question=question)
|
||||
|
||||
return f""" "Result" : {summary}"""
|
||||
|
||||
|
||||
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
|
||||
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a Google search
|
||||
def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
"""Get all hyperlinks on a webpage
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
@@ -166,7 +159,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url)
|
||||
return scrape_links(url, config)
|
||||
|
||||
|
||||
@command(
|
||||
@@ -174,7 +167,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
||||
def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
@@ -186,6 +179,8 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
|
||||
Returns:
|
||||
str: The response of the agent
|
||||
"""
|
||||
agent_manager = AgentManager()
|
||||
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
@@ -193,48 +188,48 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if CFG.speak_mode:
|
||||
if config.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
|
||||
key, ack = agent_manager.create_agent(task, first_message, model)
|
||||
|
||||
if CFG.speak_mode:
|
||||
if config.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = AGENT_MANAGER.message_agent(key, prompt)
|
||||
agent_response = agent_manager.message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str) -> str:
|
||||
def message_agent(key: str, message: str, config: Config) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = AGENT_MANAGER.message_agent(int(key), message)
|
||||
agent_response = AgentManager().message_agent(int(key), message)
|
||||
else:
|
||||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if CFG.speak_mode:
|
||||
if config.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "")
|
||||
def list_agents() -> str:
|
||||
@command("list_agents", "List GPT Agents", "() -> str")
|
||||
def list_agents(config: Config) -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
str: A list of all agents
|
||||
"""
|
||||
return "List of agents:\n" + "\n".join(
|
||||
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
|
||||
[str(x[0]) + ": " + x[1] for x in AgentManager().list_agents()]
|
||||
)
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str) -> str:
|
||||
def delete_agent(key: str, config: Config) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
@@ -243,5 +238,5 @@ def delete_agent(key: str) -> str:
|
||||
Returns:
|
||||
str: A message indicating whether the agent was deleted or not
|
||||
"""
|
||||
result = AGENT_MANAGER.delete_agent(key)
|
||||
result = AgentManager().delete_agent(key)
|
||||
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
||||
|
||||
@@ -15,6 +15,11 @@ import click
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
@@ -66,6 +71,7 @@ def main(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -91,6 +97,7 @@ def main(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
@@ -10,7 +15,7 @@ from autogpt.llm import call_ai_function
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str) -> list[str]:
|
||||
def analyze_code(code: str, config: Config) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
@@ -28,4 +33,4 @@ def analyze_code(code: str) -> list[str]:
|
||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
|
||||
@@ -1,22 +1,25 @@
|
||||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"read_audio_from_file",
|
||||
"Convert Audio to text",
|
||||
'"filename": "<filename>"',
|
||||
CFG.huggingface_audio_to_text_model,
|
||||
"Configure huggingface_audio_to_text_model.",
|
||||
lambda config: config.huggingface_audio_to_text_model
|
||||
and config.huggingface_api_token,
|
||||
"Configure huggingface_audio_to_text_model and Hugging Face api token.",
|
||||
)
|
||||
def read_audio_from_file(filename: str) -> str:
|
||||
def read_audio_from_file(filename: str, config: Config) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
@@ -28,10 +31,10 @@ def read_audio_from_file(filename: str) -> str:
|
||||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio)
|
||||
return read_audio(audio, config)
|
||||
|
||||
|
||||
def read_audio(audio: bytes) -> str:
|
||||
def read_audio(audio: bytes, config: Config) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
@@ -41,9 +44,9 @@ def read_audio(audio: bytes) -> str:
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
model = CFG.huggingface_audio_to_text_model
|
||||
model = config.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = CFG.huggingface_api_token
|
||||
api_token = config.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
|
||||
@@ -3,6 +3,9 @@ import importlib
|
||||
import inspect
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
@@ -22,19 +25,23 @@ class Command:
|
||||
description: str,
|
||||
method: Callable[..., Any],
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.method = method
|
||||
self.signature = signature if signature else str(inspect.signature(self.method))
|
||||
self.signature = signature
|
||||
self.enabled = enabled
|
||||
self.disabled_reason = disabled_reason
|
||||
|
||||
def __call__(self, *args, **kwargs) -> Any:
|
||||
if hasattr(kwargs, "config") and callable(self.enabled):
|
||||
self.enabled = self.enabled(kwargs["config"])
|
||||
if not self.enabled:
|
||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
if self.disabled_reason:
|
||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
return f"Command '{self.name}' is disabled"
|
||||
return self.method(*args, **kwargs)
|
||||
|
||||
def __str__(self) -> str:
|
||||
@@ -59,6 +66,10 @@ class CommandRegistry:
|
||||
return importlib.reload(module)
|
||||
|
||||
def register(self, cmd: Command) -> None:
|
||||
if cmd.name in self.commands:
|
||||
logger.warn(
|
||||
f"Command '{cmd.name}' already registered and will be overwritten!"
|
||||
)
|
||||
self.commands[cmd.name] = cmd
|
||||
|
||||
def unregister(self, command_name: str):
|
||||
@@ -127,12 +138,22 @@ class CommandRegistry:
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
signature: str,
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
# TODO: Remove this in favor of better command management
|
||||
CFG = Config()
|
||||
|
||||
if callable(enabled):
|
||||
enabled = enabled(CFG)
|
||||
if not enabled:
|
||||
if disabled_reason is not None:
|
||||
logger.debug(f"Command '{name}' is disabled: {disabled_reason}")
|
||||
return lambda func: func
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
cmd = Command(
|
||||
name=name,
|
||||
|
||||
@@ -10,11 +10,9 @@ from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str) -> str:
|
||||
def execute_python_file(filename: str, config: Config) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
@@ -33,7 +31,7 @@ def execute_python_file(filename: str) -> str:
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
result = subprocess.run(
|
||||
f"python {filename}", capture_output=True, encoding="utf8", shell=True
|
||||
["python", filename], capture_output=True, encoding="utf8"
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
@@ -65,9 +63,9 @@ def execute_python_file(filename: str) -> str:
|
||||
logger.info(status)
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
||||
["python", str(Path(filename).relative_to(config.workspace_path))],
|
||||
volumes={
|
||||
CFG.workspace_path: {
|
||||
config.workspace_path: {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
@@ -97,16 +95,42 @@ def execute_python_file(filename: str) -> str:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def validate_command(command: str, config: Config) -> bool:
|
||||
"""Validate a command to ensure it is allowed
|
||||
|
||||
Args:
|
||||
command (str): The command to validate
|
||||
|
||||
Returns:
|
||||
bool: True if the command is allowed, False otherwise
|
||||
"""
|
||||
tokens = command.split()
|
||||
|
||||
if not tokens:
|
||||
return False
|
||||
|
||||
if config.deny_commands and tokens[0] not in config.deny_commands:
|
||||
return False
|
||||
|
||||
for keyword in config.allow_commands:
|
||||
if keyword in tokens:
|
||||
return True
|
||||
if config.allow_commands:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
lambda cfg: cfg.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str) -> str:
|
||||
def execute_shell(command_line: str, config: Config) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
Args:
|
||||
@@ -115,11 +139,14 @@ def execute_shell(command_line: str) -> str:
|
||||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = Path.cwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
||||
os.chdir(CFG.workspace_path)
|
||||
if not current_dir.is_relative_to(config.workspace_path):
|
||||
os.chdir(config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
@@ -138,12 +165,12 @@ def execute_shell(command_line: str) -> str:
|
||||
"execute_shell_popen",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
lambda config: config.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line) -> str:
|
||||
def execute_shell_popen(command_line, config: Config) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
@@ -153,11 +180,14 @@ def execute_shell_popen(command_line) -> str:
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
if config.workspace_path not in current_dir:
|
||||
os.chdir(config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
|
||||
@@ -4,20 +4,22 @@ from __future__ import annotations
|
||||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
from typing import Dict, Generator, Literal, Tuple
|
||||
from typing import TYPE_CHECKING, Generator, Literal
|
||||
|
||||
import charset_normalizer
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.commands.file_operations_utils import read_textual_file
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
CFG = Config()
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
|
||||
@@ -27,7 +29,9 @@ def text_checksum(text: str) -> str:
|
||||
return hashlib.md5(text.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str | None]]:
|
||||
def operations_from_log(
|
||||
log_path: str,
|
||||
) -> Generator[tuple[Operation, str, str | None], None, None]:
|
||||
"""Parse the file operations log and return a tuple containing the log entries"""
|
||||
try:
|
||||
log = open(log_path, "r", encoding="utf-8")
|
||||
@@ -44,6 +48,7 @@ def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str |
|
||||
try:
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
except ValueError:
|
||||
logger.warn(f"File log entry lacks checksum: '{line}'")
|
||||
path, checksum = tail.strip(), None
|
||||
yield (operation, path, checksum)
|
||||
elif operation == "delete":
|
||||
@@ -52,10 +57,10 @@ def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str |
|
||||
log.close()
|
||||
|
||||
|
||||
def file_operations_state(log_path: str) -> Dict:
|
||||
def file_operations_state(log_path: str) -> dict[str, str]:
|
||||
"""Iterates over the operations log and returns the expected state.
|
||||
|
||||
Parses a log file at CFG.file_logger_path to construct a dictionary that maps
|
||||
Parses a log file at config.file_logger_path to construct a dictionary that maps
|
||||
each file path written or appended to its checksum. Deleted files are removed
|
||||
from the dictionary.
|
||||
|
||||
@@ -63,7 +68,7 @@ def file_operations_state(log_path: str) -> Dict:
|
||||
A dictionary mapping file paths to their checksums.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If CFG.file_logger_path is not found.
|
||||
FileNotFoundError: If config.file_logger_path is not found.
|
||||
ValueError: If the log file content is not in the expected format.
|
||||
"""
|
||||
state = {}
|
||||
@@ -76,7 +81,7 @@ def file_operations_state(log_path: str) -> Dict:
|
||||
|
||||
|
||||
def is_duplicate_operation(
|
||||
operation: Operation, filename: str, checksum: str | None = None
|
||||
operation: Operation, filename: str, config: Config, checksum: str | None = None
|
||||
) -> bool:
|
||||
"""Check if the operation has already been performed
|
||||
|
||||
@@ -88,7 +93,7 @@ def is_duplicate_operation(
|
||||
Returns:
|
||||
True if the operation has already been performed on the file
|
||||
"""
|
||||
state = file_operations_state(CFG.file_logger_path)
|
||||
state = file_operations_state(config.file_logger_path)
|
||||
if operation == "delete" and filename not in state:
|
||||
return True
|
||||
if operation == "write" and state.get(filename) == checksum:
|
||||
@@ -96,7 +101,9 @@ def is_duplicate_operation(
|
||||
return False
|
||||
|
||||
|
||||
def log_operation(operation: str, filename: str, checksum: str | None = None) -> None:
|
||||
def log_operation(
|
||||
operation: str, filename: str, config: Config, checksum: str | None = None
|
||||
) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
Args:
|
||||
@@ -108,7 +115,7 @@ def log_operation(operation: str, filename: str, checksum: str | None = None) ->
|
||||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(CFG.file_logger_path, f"{log_entry}\n", should_log=False)
|
||||
append_to_file(config.file_logger_path, f"{log_entry}\n", config, should_log=False)
|
||||
|
||||
|
||||
def split_file(
|
||||
@@ -131,7 +138,7 @@ def split_file(
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + overlap - 1]
|
||||
chunk = content[start : end + max(overlap - 1, 0)]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
@@ -143,8 +150,8 @@ def split_file(
|
||||
start += max_length - overlap
|
||||
|
||||
|
||||
@command("read_file", "Read file", '"filename": "<filename>"')
|
||||
def read_file(filename: str) -> str:
|
||||
@command("read_file", "Read a file", '"filename": "<filename>"')
|
||||
def read_file(filename: str, config: Config) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
Args:
|
||||
@@ -154,50 +161,46 @@ def read_file(filename: str) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
charset_match = charset_normalizer.from_path(filename).best()
|
||||
encoding = charset_match.encoding
|
||||
logger.debug(f"Read file '{filename}' with encoding '{encoding}'")
|
||||
return str(charset_match)
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
content = read_textual_file(filename, logger)
|
||||
|
||||
# TODO: invalidate/update memory when file is edited
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
if len(file_memory.chunks) > 1:
|
||||
return file_memory.summary
|
||||
|
||||
return content
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def ingest_file(
|
||||
filename: str, memory, max_length: int = 4000, overlap: int = 200
|
||||
filename: str,
|
||||
memory: VectorMemory,
|
||||
) -> None:
|
||||
"""
|
||||
Ingest a file by reading its content, splitting it into chunks with a specified
|
||||
maximum length and overlap, and adding the chunks to the memory storage.
|
||||
|
||||
:param filename: The name of the file to ingest
|
||||
:param memory: An object with an add() method to store the chunks in memory
|
||||
:param max_length: The maximum length of each chunk, default is 4000
|
||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||
Args:
|
||||
filename: The name of the file to ingest
|
||||
memory: An object with an add() method to store the chunks in memory
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Working with file {filename}")
|
||||
logger.info(f"Ingesting file {filename}")
|
||||
content = read_file(filename)
|
||||
content_length = len(content)
|
||||
logger.info(f"File length: {content_length} characters")
|
||||
|
||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||
# TODO: differentiate between different types of files
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
logger.debug(f"Created memory: {file_memory.dump()}")
|
||||
memory.add(file_memory)
|
||||
|
||||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
|
||||
except Exception as err:
|
||||
logger.info(f"Error while ingesting file '{filename}': {err}")
|
||||
logger.warn(f"Error while ingesting file '{filename}': {err}")
|
||||
|
||||
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str) -> str:
|
||||
def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
Args:
|
||||
@@ -208,14 +211,14 @@ def write_to_file(filename: str, text: str) -> str:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, checksum):
|
||||
if is_duplicate_operation("write", filename, config, checksum):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename, checksum)
|
||||
log_operation("write", filename, config, checksum)
|
||||
return "File written to successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
@@ -224,7 +227,9 @@ def write_to_file(filename: str, text: str) -> str:
|
||||
@command(
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
)
|
||||
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
def append_to_file(
|
||||
filename: str, text: str, config: Config, should_log: bool = True
|
||||
) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
Args:
|
||||
@@ -244,7 +249,7 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
if should_log:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, checksum=checksum)
|
||||
log_operation("append", filename, config, checksum=checksum)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as err:
|
||||
@@ -252,7 +257,7 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
|
||||
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str) -> str:
|
||||
def delete_file(filename: str, config: Config) -> str:
|
||||
"""Delete a file
|
||||
|
||||
Args:
|
||||
@@ -261,18 +266,18 @@ def delete_file(filename: str) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if is_duplicate_operation("delete", filename):
|
||||
if is_duplicate_operation("delete", filename, config):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename)
|
||||
log_operation("delete", filename, config)
|
||||
return "File deleted successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
|
||||
def list_files(directory: str) -> list[str]:
|
||||
def list_files(directory: str, config: Config) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
|
||||
Args:
|
||||
@@ -288,7 +293,7 @@ def list_files(directory: str) -> list[str]:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), CFG.workspace_path
|
||||
os.path.join(root, file), config.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
@@ -299,10 +304,10 @@ def list_files(directory: str) -> list[str]:
|
||||
"download_file",
|
||||
"Download File",
|
||||
'"url": "<url>", "filename": "<filename>"',
|
||||
CFG.allow_downloads,
|
||||
lambda config: config.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename):
|
||||
def download_file(url, filename, config: Config):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
@@ -312,7 +317,7 @@ def download_file(url, filename):
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message) as spinner:
|
||||
with Spinner(message, plain_output=config.plain_output) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
|
||||
159
autogpt/commands/file_operations_utils.py
Normal file
159
autogpt/commands/file_operations_utils.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import charset_normalizer
|
||||
import docx
|
||||
import markdown
|
||||
import PyPDF2
|
||||
import yaml
|
||||
from bs4 import BeautifulSoup
|
||||
from pylatexenc.latex2text import LatexNodes2Text
|
||||
|
||||
from autogpt import logs
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class ParserStrategy:
|
||||
def read(self, file_path: str) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
# Basic text file reading
|
||||
class TXTParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
charset_match = charset_normalizer.from_path(file_path).best()
|
||||
logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
|
||||
return str(charset_match)
|
||||
|
||||
|
||||
# Reading text from binary file using pdf parser
|
||||
class PDFParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
parser = PyPDF2.PdfReader(file_path)
|
||||
text = ""
|
||||
for page_idx in range(len(parser.pages)):
|
||||
text += parser.pages[page_idx].extract_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading text from binary file using docs parser
|
||||
class DOCXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
doc_file = docx.Document(file_path)
|
||||
text = ""
|
||||
for para in doc_file.paragraphs:
|
||||
text += para.text
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class JSONParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = json.load(f)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class XMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "xml")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class YAMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class HTMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "html.parser")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
class MarkdownParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
html = markdown.markdown(f.read())
|
||||
text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True))
|
||||
return text
|
||||
|
||||
|
||||
class LaTeXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
latex = f.read()
|
||||
text = LatexNodes2Text().latex_to_text(latex)
|
||||
return text
|
||||
|
||||
|
||||
class FileContext:
|
||||
def __init__(self, parser: ParserStrategy, logger: logs.Logger):
|
||||
self.parser = parser
|
||||
self.logger = logger
|
||||
|
||||
def set_parser(self, parser: ParserStrategy) -> None:
|
||||
self.logger.debug(f"Setting Context Parser to {parser}")
|
||||
self.parser = parser
|
||||
|
||||
def read_file(self, file_path) -> str:
|
||||
self.logger.debug(f"Reading file {file_path} with parser {self.parser}")
|
||||
return self.parser.read(file_path)
|
||||
|
||||
|
||||
extension_to_parser = {
|
||||
".txt": TXTParser(),
|
||||
".csv": TXTParser(),
|
||||
".pdf": PDFParser(),
|
||||
".docx": DOCXParser(),
|
||||
".json": JSONParser(),
|
||||
".xml": XMLParser(),
|
||||
".yaml": YAMLParser(),
|
||||
".yml": YAMLParser(),
|
||||
".html": HTMLParser(),
|
||||
".htm": HTMLParser(),
|
||||
".xhtml": HTMLParser(),
|
||||
".md": MarkdownParser(),
|
||||
".markdown": MarkdownParser(),
|
||||
".tex": LaTeXParser(),
|
||||
}
|
||||
|
||||
|
||||
def is_file_binary_fn(file_path: str):
|
||||
"""Given a file path load all its content and checks if the null bytes is present
|
||||
|
||||
Args:
|
||||
file_path (_type_): _description_
|
||||
|
||||
Returns:
|
||||
bool: is_binary
|
||||
"""
|
||||
with open(file_path, "rb") as f:
|
||||
file_data = f.read()
|
||||
if b"\x00" in file_data:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def read_textual_file(file_path: str, logger: logs.Logger) -> str:
|
||||
if not os.path.isfile(file_path):
|
||||
raise FileNotFoundError(f"{file_path} not found!")
|
||||
is_binary = is_file_binary_fn(file_path)
|
||||
file_extension = os.path.splitext(file_path)[1].lower()
|
||||
parser = extension_to_parser.get(file_extension)
|
||||
if not parser:
|
||||
if is_binary:
|
||||
raise ValueError(f"Unsupported binary file format: {file_extension}")
|
||||
# fallback to txt file parser (to support script and code files loading)
|
||||
parser = TXTParser()
|
||||
file_context = FileContext(parser, logger)
|
||||
return file_context.read_file(file_path)
|
||||
@@ -1,22 +1,25 @@
|
||||
"""Git operations for autogpt"""
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"clone_repository",
|
||||
"Clone Repository",
|
||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
CFG.github_username and CFG.github_api_key,
|
||||
lambda config: config.github_username and config.github_api_key,
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str) -> str:
|
||||
def clone_repository(url: str, clone_path: str, config: Config) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
@@ -27,7 +30,9 @@ def clone_repository(url: str, clone_path: str) -> str:
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||
auth_repo_url = f"//{config.github_username}:{config.github_api_key}@".join(
|
||||
split_url
|
||||
)
|
||||
try:
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
|
||||
@@ -2,17 +2,24 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from itertools import islice
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from duckduckgo_search import ddg
|
||||
from duckduckgo_search import DDGS
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
|
||||
def google_search(query: str, num_results: int = 8) -> str:
|
||||
@command(
|
||||
"google",
|
||||
"Google Search",
|
||||
'"query": "<query>"',
|
||||
lambda config: not config.google_api_key,
|
||||
)
|
||||
def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
@@ -26,12 +33,12 @@ def google_search(query: str, num_results: int = 8) -> str:
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
results = ddg(query, max_results=num_results)
|
||||
results = DDGS().text(query)
|
||||
if not results:
|
||||
return json.dumps(search_results)
|
||||
|
||||
for j in results:
|
||||
search_results.append(j)
|
||||
for item in islice(results, num_results):
|
||||
search_results.append(item)
|
||||
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
@@ -41,10 +48,12 @@ def google_search(query: str, num_results: int = 8) -> str:
|
||||
"google",
|
||||
"Google Search",
|
||||
'"query": "<query>"',
|
||||
bool(CFG.google_api_key),
|
||||
"Configure google_api_key.",
|
||||
lambda config: bool(config.google_api_key) and bool(config.custom_search_engine_id),
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
)
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
def google_official_search(
|
||||
query: str, config: Config, num_results: int = 8
|
||||
) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
Args:
|
||||
@@ -60,8 +69,8 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = CFG.google_api_key
|
||||
custom_search_engine_id = CFG.custom_search_engine_id
|
||||
api_key = config.google_api_key
|
||||
custom_search_engine_id = config.custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
@@ -110,7 +119,7 @@ def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
if isinstance(results, list):
|
||||
safe_message = json.dumps(
|
||||
[result.encode("utf-8", "ignore") for result in results]
|
||||
[result.encode("utf-8", "ignore").decode("utf-8") for result in results]
|
||||
)
|
||||
else:
|
||||
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
""" Image Generation Module for AutoGPT."""
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import openai
|
||||
import requests
|
||||
@@ -11,11 +14,18 @@ from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
|
||||
def generate_image(prompt: str, size: int = 256) -> str:
|
||||
@command(
|
||||
"generate_image",
|
||||
"Generate Image",
|
||||
'"prompt": "<prompt>"',
|
||||
lambda config: config.image_provider,
|
||||
"Requires a image provider to be set.",
|
||||
)
|
||||
def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
@@ -25,21 +35,21 @@ def generate_image(prompt: str, size: int = 256) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
filename = f"{config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if CFG.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size)
|
||||
if config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, config)
|
||||
# HuggingFace
|
||||
elif CFG.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename)
|
||||
elif config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, config)
|
||||
# SD WebUI
|
||||
elif CFG.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, size)
|
||||
elif config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, config, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||
"""Generate an image with HuggingFace's API.
|
||||
|
||||
Args:
|
||||
@@ -50,34 +60,58 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = (
|
||||
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
|
||||
f"https://api-inference.huggingface.co/models/{config.huggingface_image_model}"
|
||||
)
|
||||
if CFG.huggingface_api_token is None:
|
||||
if config.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {CFG.huggingface_api_token}",
|
||||
"Authorization": f"Bearer {config.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
retry_count = 0
|
||||
while retry_count < 10:
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
if response.ok:
|
||||
try:
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
image.save(filename)
|
||||
return f"Saved to disk:{filename}"
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
error = json.loads(response.text)
|
||||
if "estimated_time" in error:
|
||||
delay = error["estimated_time"]
|
||||
logger.debug(response.text)
|
||||
logger.info("Retrying in", delay)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
|
||||
image.save(filename)
|
||||
retry_count += 1
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
return f"Error creating image."
|
||||
|
||||
|
||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
def generate_image_with_dalle(
|
||||
prompt: str, filename: str, size: int, config: Config
|
||||
) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
Args:
|
||||
@@ -102,7 +136,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=CFG.openai_api_key,
|
||||
api_key=config.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
@@ -118,6 +152,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
config: Config,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
@@ -134,13 +169,13 @@ def generate_image_with_sd_webui(
|
||||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if CFG.sd_webui_auth:
|
||||
username, password = CFG.sd_webui_auth.split(":")
|
||||
if config.sd_webui_auth:
|
||||
username, password = config.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
|
||||
f"{config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
@@ -11,7 +15,7 @@ from autogpt.llm import call_ai_function
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str) -> str:
|
||||
def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
@@ -32,4 +36,4 @@ def improve_code(suggestions: list[str], code: str) -> str:
|
||||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
"""Task Statuses module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import NoReturn
|
||||
from typing import TYPE_CHECKING, NoReturn
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"task_complete",
|
||||
"Task Complete (Shutdown)",
|
||||
'"reason": "<reason>"',
|
||||
)
|
||||
def task_complete(reason: str) -> NoReturn:
|
||||
def task_complete(reason: str, config: Config) -> NoReturn:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
"""A module that contains a command to send a tweet."""
|
||||
import os
|
||||
|
||||
import tweepy
|
||||
|
||||
from autogpt.commands.command import command
|
||||
|
||||
|
||||
@command(
|
||||
"send_tweet",
|
||||
"Send Tweet",
|
||||
'"tweet_text": "<tweet_text>"',
|
||||
)
|
||||
def send_tweet(tweet_text: str) -> str:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Args:
|
||||
tweet_text (str): Text to be tweeted.
|
||||
|
||||
Returns:
|
||||
A result from sending the tweet.
|
||||
"""
|
||||
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
||||
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
||||
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
||||
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
|
||||
# Authenticate to Twitter
|
||||
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
||||
auth.set_access_token(access_token, access_token_secret)
|
||||
|
||||
# Create API object
|
||||
api = tweepy.API(auth)
|
||||
|
||||
# Send tweet
|
||||
try:
|
||||
api.update_status(tweet_text)
|
||||
return "Tweet sent successfully!"
|
||||
except tweepy.TweepyException as e:
|
||||
return f"Error sending tweet: {e.reason}"
|
||||
@@ -9,15 +9,12 @@ from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({"User-Agent": CFG.user_agent})
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, timeout: int = 10
|
||||
url: str, config: Config, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
@@ -33,6 +30,7 @@ def get_response(
|
||||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
session.headers.update({"User-Agent": config.user_agent})
|
||||
response = session.get(url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
@@ -50,7 +48,7 @@ def get_response(
|
||||
return None, f"Error: {str(re)}"
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
def scrape_text(url: str, config: Config) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
@@ -59,7 +57,7 @@ def scrape_text(url: str) -> str:
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
response, error_message = get_response(url, config)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
@@ -78,7 +76,7 @@ def scrape_text(url: str) -> str:
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
@@ -87,7 +85,7 @@ def scrape_links(url: str) -> str | list[str]:
|
||||
Returns:
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
response, error_message = get_response(url, config)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
@@ -100,13 +98,3 @@ def scrape_links(url: str) -> str | list[str]:
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
|
||||
return format_hyperlinks(hyperlinks)
|
||||
|
||||
|
||||
def create_message(chunk, question):
|
||||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the'
|
||||
" text, summarize the text.",
|
||||
}
|
||||
|
||||
@@ -4,30 +4,41 @@ from __future__ import annotations
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import TYPE_CHECKING, Optional, Type
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.chrome.service import Service as ChromeDriverService
|
||||
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.edge.service import Service as EdgeDriverService
|
||||
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.firefox.service import Service as GeckoDriverService
|
||||
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, get_memory
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
@@ -36,7 +47,7 @@ CFG = Config()
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str) -> str:
|
||||
def browse_website(url: str, question: str, config: Config) -> str:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
@@ -47,7 +58,7 @@ def browse_website(url: str, question: str) -> str:
|
||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url)
|
||||
driver, text = scrape_text_with_selenium(url, config)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
@@ -55,17 +66,17 @@ def browse_website(url: str, question: str) -> str:
|
||||
return f"Error: {msg}"
|
||||
|
||||
add_header(driver)
|
||||
summary_text = summary.summarize_text(url, text, question, driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, config, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to 5
|
||||
if len(links) > 5:
|
||||
links = links[:5]
|
||||
close_browser(driver)
|
||||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}"
|
||||
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
@@ -76,49 +87,49 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
"""
|
||||
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
||||
|
||||
options_available = {
|
||||
options_available: dict[str, Type[BrowserOptions]] = {
|
||||
"chrome": ChromeOptions,
|
||||
"safari": SafariOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"edge": EdgeOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"safari": SafariOptions,
|
||||
}
|
||||
|
||||
options = options_available[CFG.selenium_web_browser]()
|
||||
options: BrowserOptions = options_available[config.selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if CFG.selenium_web_browser == "firefox":
|
||||
if CFG.selenium_headless:
|
||||
if config.selenium_web_browser == "firefox":
|
||||
if config.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = webdriver.Firefox(
|
||||
executable_path=GeckoDriverManager().install(), options=options
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
)
|
||||
elif CFG.selenium_web_browser == "safari":
|
||||
elif config.selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif config.selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = webdriver.Safari(options=options)
|
||||
elif CFG.selenium_web_browser == "edge":
|
||||
driver = webdriver.Edge(
|
||||
executable_path=EdgeChromiumDriverManager().install(), options=options
|
||||
)
|
||||
driver = SafariDriver(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if CFG.selenium_headless:
|
||||
if config.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
chromium_driver_path = Path("/usr/bin/chromedriver")
|
||||
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=chromium_driver_path
|
||||
driver = ChromeDriver(
|
||||
service=ChromeDriverService(str(chromium_driver_path))
|
||||
if chromium_driver_path.exists()
|
||||
else ChromeDriverManager().install(),
|
||||
else ChromeDriverService(ChromeDriverManager().install()),
|
||||
options=options,
|
||||
)
|
||||
driver.get(url)
|
||||
@@ -188,3 +199,34 @@ def add_header(driver: WebDriver) -> None:
|
||||
driver.execute_script(overlay_script)
|
||||
except Exception as e:
|
||||
print(f"Error executing overlay.js: {e}")
|
||||
|
||||
|
||||
def summarize_memorize_webpage(
|
||||
url: str,
|
||||
text: str,
|
||||
question: str,
|
||||
config: Config,
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
url (str): The url of the text
|
||||
text (str): The text to summarize
|
||||
question (str): The question to ask the model
|
||||
driver (WebDriver): The webdriver to use to scroll the page
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
memory = get_memory(config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, question=question)
|
||||
memory.add(new_memory)
|
||||
return new_memory.summary
|
||||
|
||||
@@ -2,9 +2,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
@@ -12,7 +16,7 @@ from autogpt.llm import call_ai_function
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str]) -> str:
|
||||
def write_tests(code: str, focus: list[str], config: Config) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
@@ -34,4 +38,4 @@ def write_tests(code: str, focus: list[str]) -> str:
|
||||
" specific areas if required."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
|
||||
@@ -7,12 +7,14 @@ from __future__ import annotations
|
||||
import os
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional, Type
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
import distro
|
||||
import yaml
|
||||
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
||||
@@ -53,8 +55,8 @@ class AIConfig:
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
self.api_budget = api_budget
|
||||
self.prompt_generator = None
|
||||
self.command_registry = None
|
||||
self.prompt_generator: PromptGenerator | None = None
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
|
||||
@staticmethod
|
||||
def load(config_file: str = SAVE_FILE) -> "AIConfig":
|
||||
@@ -73,7 +75,7 @@ class AIConfig:
|
||||
|
||||
try:
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ class Config(metaclass=Singleton):
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the Config class"""
|
||||
self.workspace_path = None
|
||||
self.file_logger_path = None
|
||||
self.workspace_path: str = None
|
||||
self.file_logger_path: str = None
|
||||
|
||||
self.debug_mode = False
|
||||
self.continuous_mode = False
|
||||
@@ -30,6 +30,7 @@ class Config(metaclass=Singleton):
|
||||
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
self.plain_output = os.getenv("PLAIN_OUTPUT", "False") == "True"
|
||||
|
||||
disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
if disabled_command_categories:
|
||||
@@ -37,20 +38,33 @@ class Config(metaclass=Singleton):
|
||||
else:
|
||||
self.disabled_command_categories = []
|
||||
|
||||
deny_commands = os.getenv("DENY_COMMANDS")
|
||||
if deny_commands:
|
||||
self.deny_commands = deny_commands.split(",")
|
||||
else:
|
||||
self.deny_commands = []
|
||||
|
||||
allow_commands = os.getenv("ALLOW_COMMANDS")
|
||||
if allow_commands:
|
||||
self.allow_commands = allow_commands.split(",")
|
||||
else:
|
||||
self.allow_commands = []
|
||||
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.prompt_settings_file = os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", "prompt_settings.yaml"
|
||||
)
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002")
|
||||
self.embedding_tokenizer = os.getenv("EMBEDDING_TOKENIZER", "cl100k_base")
|
||||
self.embedding_token_limit = int(os.getenv("EMBEDDING_TOKEN_LIMIT", 8191))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "0"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
@@ -66,6 +80,9 @@ class Config(metaclass=Singleton):
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
|
||||
if self.openai_organization is not None:
|
||||
openai.organization = self.openai_organization
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
||||
@@ -84,28 +101,6 @@ class Config(metaclass=Singleton):
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_port = os.getenv("WEAVIATE_PORT")
|
||||
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
|
||||
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
|
||||
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = (
|
||||
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
)
|
||||
|
||||
# milvus or zilliz cloud configuration.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
self.milvus_username = os.getenv("MILVUS_USERNAME")
|
||||
self.milvus_password = os.getenv("MILVUS_PASSWORD")
|
||||
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
|
||||
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
@@ -131,14 +126,13 @@ class Config(metaclass=Singleton):
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "json_file")
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt-memory")
|
||||
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_port = int(os.getenv("REDIS_PORT", "6379"))
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
||||
|
||||
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
|
||||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
@@ -195,7 +189,7 @@ class Config(metaclass=Singleton):
|
||||
None
|
||||
"""
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
self.openai_api_type = config_params.get("azure_api_type") or "azure"
|
||||
self.openai_api_base = config_params.get("azure_api_base") or ""
|
||||
self.openai_api_version = (
|
||||
@@ -235,18 +229,6 @@ class Config(metaclass=Singleton):
|
||||
"""Set the model to use for creating embeddings."""
|
||||
self.embedding_model = value
|
||||
|
||||
def set_embedding_tokenizer(self, value: str) -> None:
|
||||
"""Set the tokenizer to use when creating embeddings."""
|
||||
self.embedding_tokenizer = value
|
||||
|
||||
def set_embedding_token_limit(self, value: int) -> None:
|
||||
"""Set the token limit for creating embeddings."""
|
||||
self.embedding_token_limit = value
|
||||
|
||||
def set_browse_chunk_max_length(self, value: int) -> None:
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
@@ -271,14 +253,6 @@ class Config(metaclass=Singleton):
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str) -> None:
|
||||
"""Set the Pinecone API key value."""
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str) -> None:
|
||||
"""Set the Pinecone region value."""
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool) -> None:
|
||||
"""Set the debug mode value."""
|
||||
self.debug_mode = value
|
||||
|
||||
53
autogpt/config/prompt_config.py
Normal file
53
autogpt/config/prompt_config.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# sourcery skip: do-not-use-staticmethod
|
||||
"""
|
||||
A module that contains the PromptConfig class object that contains the configuration
|
||||
"""
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class PromptConfig:
|
||||
"""
|
||||
A class object that contains the configuration information for the prompt, which will be used by the prompt generator
|
||||
|
||||
Attributes:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config_file: str = CFG.prompt_settings_file,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else raises error.
|
||||
|
||||
Parameters:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(config_file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
||||
self.constraints = config_params.get("constraints", [])
|
||||
self.resources = config_params.get("resources", [])
|
||||
self.performance_evaluations = config_params.get("performance_evaluations", [])
|
||||
@@ -1,19 +1,29 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.utils import check_model
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_supported_memory_backends
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
|
||||
CFG = Config()
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
|
||||
|
||||
def create_config(
|
||||
config: Config,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -30,6 +40,7 @@ def create_config(
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
@@ -40,13 +51,13 @@ def create_config(
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
config.set_debug_mode(False)
|
||||
config.set_continuous_mode(False)
|
||||
config.set_speak_mode(False)
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_debug_mode(True)
|
||||
config.set_debug_mode(True)
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
@@ -57,13 +68,13 @@ def create_config(
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
CFG.set_continuous_mode(True)
|
||||
config.set_continuous_mode(True)
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
config.set_continuous_limit(continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
@@ -71,15 +82,28 @@ def create_config(
|
||||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_speak_mode(True)
|
||||
config.set_speak_mode(True)
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config
|
||||
config.set_fast_llm_model(GPT_3_MODEL)
|
||||
config.set_smart_llm_model(GPT_3_MODEL)
|
||||
|
||||
if gpt4only:
|
||||
elif (
|
||||
gpt4only
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm_model") == GPT_4_MODEL
|
||||
):
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config
|
||||
config.set_fast_llm_model(GPT_4_MODEL)
|
||||
config.set_smart_llm_model(GPT_4_MODEL)
|
||||
else:
|
||||
config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model"))
|
||||
config.set_smart_llm_model(
|
||||
check_model(config.smart_llm_model, "smart_llm_model")
|
||||
)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
@@ -90,13 +114,13 @@ def create_config(
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
|
||||
else:
|
||||
CFG.memory_backend = chosen
|
||||
config.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
CFG.skip_reprompt = True
|
||||
config.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
@@ -109,11 +133,24 @@ def create_config(
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
config.ai_settings_file = file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||
config.prompt_settings_file = file
|
||||
|
||||
if browser_name:
|
||||
CFG.selenium_web_browser = browser_name
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
@@ -128,7 +165,7 @@ def create_config(
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
CFG.allow_downloads = True
|
||||
config.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
CFG.skip_news = True
|
||||
config.skip_news = True
|
||||
|
||||
@@ -11,7 +11,7 @@ from regex import regex
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_general import correct_json
|
||||
from autogpt.llm import call_ai_function
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
from autogpt.logs import logger
|
||||
from autogpt.speech import say_text
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import (
|
||||
ChatModelInfo,
|
||||
ChatModelResponse,
|
||||
@@ -8,18 +7,8 @@ from autogpt.llm.base import (
|
||||
Message,
|
||||
ModelInfo,
|
||||
)
|
||||
from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context
|
||||
from autogpt.llm.llm_utils import (
|
||||
call_ai_function,
|
||||
chunked_tokens,
|
||||
create_chat_completion,
|
||||
get_ada_embedding,
|
||||
)
|
||||
from autogpt.llm.modelsinfo import COSTS
|
||||
from autogpt.llm.token_counter import count_message_tokens, count_string_tokens
|
||||
|
||||
__all__ = [
|
||||
"ApiManager",
|
||||
"Message",
|
||||
"ModelInfo",
|
||||
"ChatModelInfo",
|
||||
@@ -27,14 +16,4 @@ __all__ = [
|
||||
"LLMResponse",
|
||||
"ChatModelResponse",
|
||||
"EmbeddingModelResponse",
|
||||
"create_chat_message",
|
||||
"generate_context",
|
||||
"chat_with_ai",
|
||||
"call_ai_function",
|
||||
"create_chat_completion",
|
||||
"get_ada_embedding",
|
||||
"chunked_tokens",
|
||||
"COSTS",
|
||||
"count_message_tokens",
|
||||
"count_string_tokens",
|
||||
]
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
import openai
|
||||
from openai import Model
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.base import MessageDict
|
||||
from autogpt.llm.modelsinfo import COSTS
|
||||
from autogpt.logs import logger
|
||||
from autogpt.singleton import Singleton
|
||||
@@ -14,16 +18,18 @@ class ApiManager(metaclass=Singleton):
|
||||
self.total_completion_tokens = 0
|
||||
self.total_cost = 0
|
||||
self.total_budget = 0
|
||||
self.models: Optional[list[Model]] = None
|
||||
|
||||
def reset(self):
|
||||
self.total_prompt_tokens = 0
|
||||
self.total_completion_tokens = 0
|
||||
self.total_cost = 0
|
||||
self.total_budget = 0.0
|
||||
self.models = None
|
||||
|
||||
def create_chat_completion(
|
||||
self,
|
||||
messages: list, # type: ignore
|
||||
messages: list[MessageDict],
|
||||
model: str | None = None,
|
||||
temperature: float = None,
|
||||
max_tokens: int | None = None,
|
||||
@@ -66,7 +72,7 @@ class ApiManager(metaclass=Singleton):
|
||||
self.update_cost(prompt_tokens, completion_tokens, model)
|
||||
return response
|
||||
|
||||
def update_cost(self, prompt_tokens, completion_tokens, model):
|
||||
def update_cost(self, prompt_tokens, completion_tokens, model: str):
|
||||
"""
|
||||
Update the total cost, prompt tokens, and completion tokens.
|
||||
|
||||
@@ -75,6 +81,9 @@ class ApiManager(metaclass=Singleton):
|
||||
completion_tokens (int): The number of tokens used in the completion.
|
||||
model (str): The model used for the API call.
|
||||
"""
|
||||
# the .model property in API responses can contain version suffixes like -v2
|
||||
model = model[:-3] if model.endswith("-v2") else model
|
||||
|
||||
self.total_prompt_tokens += prompt_tokens
|
||||
self.total_completion_tokens += completion_tokens
|
||||
self.total_cost += (
|
||||
@@ -127,3 +136,17 @@ class ApiManager(metaclass=Singleton):
|
||||
float: The total budget for API calls.
|
||||
"""
|
||||
return self.total_budget
|
||||
|
||||
def get_models(self) -> List[Model]:
|
||||
"""
|
||||
Get list of available GPT models.
|
||||
|
||||
Returns:
|
||||
list: List of available GPT models.
|
||||
|
||||
"""
|
||||
if self.models is None:
|
||||
all_models = openai.Model.list()["data"]
|
||||
self.models = [model for model in all_models if "gpt" in model["id"]]
|
||||
|
||||
return self.models
|
||||
|
||||
@@ -1,12 +1,28 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, TypedDict
|
||||
from math import ceil, floor
|
||||
from typing import List, Literal, TypedDict
|
||||
|
||||
MessageRole = Literal["system", "user", "assistant"]
|
||||
MessageType = Literal["ai_response", "action_result"]
|
||||
|
||||
|
||||
class Message(TypedDict):
|
||||
class MessageDict(TypedDict):
|
||||
role: MessageRole
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
"""OpenAI Message object containing a role and the message content"""
|
||||
|
||||
role: str
|
||||
role: MessageRole
|
||||
content: str
|
||||
type: MessageType | None = None
|
||||
|
||||
def raw(self) -> MessageDict:
|
||||
return {"role": self.role, "content": self.content}
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -28,7 +44,10 @@ class ModelInfo:
|
||||
class ChatModelInfo(ModelInfo):
|
||||
"""Struct for chat model information."""
|
||||
|
||||
pass
|
||||
|
||||
@dataclass
|
||||
class TextModelInfo(ModelInfo):
|
||||
"""Struct for text completion model information."""
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -38,6 +57,73 @@ class EmbeddingModelInfo(ModelInfo):
|
||||
embedding_dimensions: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatSequence:
|
||||
"""Utility container for a chat sequence"""
|
||||
|
||||
model: ChatModelInfo
|
||||
messages: list[Message] = field(default_factory=list)
|
||||
|
||||
def __getitem__(self, i: int):
|
||||
return self.messages[i]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.messages)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.messages)
|
||||
|
||||
def append(self, message: Message):
|
||||
return self.messages.append(message)
|
||||
|
||||
def extend(self, messages: list[Message] | ChatSequence):
|
||||
return self.messages.extend(messages)
|
||||
|
||||
def insert(self, index: int, *messages: Message):
|
||||
for message in reversed(messages):
|
||||
self.messages.insert(index, message)
|
||||
|
||||
@classmethod
|
||||
def for_model(cls, model_name: str, messages: list[Message] | ChatSequence = []):
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
|
||||
|
||||
if not model_name in OPEN_AI_CHAT_MODELS:
|
||||
raise ValueError(f"Unknown chat model '{model_name}'")
|
||||
|
||||
return ChatSequence(
|
||||
model=OPEN_AI_CHAT_MODELS[model_name], messages=list(messages)
|
||||
)
|
||||
|
||||
def add(self, message_role: MessageRole, content: str):
|
||||
self.messages.append(Message(message_role, content))
|
||||
|
||||
@property
|
||||
def token_length(self):
|
||||
from autogpt.llm.utils import count_message_tokens
|
||||
|
||||
return count_message_tokens(self.messages, self.model.name)
|
||||
|
||||
def raw(self) -> list[MessageDict]:
|
||||
return [m.raw() for m in self.messages]
|
||||
|
||||
def dump(self) -> str:
|
||||
SEPARATOR_LENGTH = 42
|
||||
|
||||
def separator(text: str):
|
||||
half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2
|
||||
return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}"
|
||||
|
||||
formatted_messages = "\n".join(
|
||||
[f"{separator(m.role)}\n{m.content}" for m in self.messages]
|
||||
)
|
||||
return f"""
|
||||
============== ChatSequence ==============
|
||||
Length: {self.token_length} tokens; {len(self.messages)} messages
|
||||
{formatted_messages}
|
||||
==========================================
|
||||
"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMResponse:
|
||||
"""Standard response struct for a response from an LLM model."""
|
||||
|
||||
@@ -1,260 +1,202 @@
|
||||
import time
|
||||
from random import shuffle
|
||||
from __future__ import annotations
|
||||
|
||||
from openai.error import RateLimitError
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agent.agent import Agent
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.llm.llm_utils import create_chat_completion
|
||||
from autogpt.llm.token_counter import count_message_tokens
|
||||
from autogpt.llm.base import ChatSequence, Message
|
||||
from autogpt.llm.utils import count_message_tokens, create_chat_completion
|
||||
from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME
|
||||
from autogpt.logs import logger
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def create_chat_message(role, content) -> Message:
|
||||
"""
|
||||
Create a chat message with the given role and content.
|
||||
|
||||
Args:
|
||||
role (str): The role of the message sender, e.g., "system", "user", or "assistant".
|
||||
content (str): The content of the message.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the role and content of the message.
|
||||
"""
|
||||
return {"role": role, "content": content}
|
||||
|
||||
|
||||
def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
current_context = [
|
||||
create_chat_message("system", prompt),
|
||||
create_chat_message(
|
||||
"system", f"The current time and date is {time.strftime('%c')}"
|
||||
),
|
||||
# create_chat_message(
|
||||
# "system",
|
||||
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
# ),
|
||||
]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
insertion_index = len(current_context)
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = count_message_tokens(current_context, model)
|
||||
return (
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
)
|
||||
|
||||
|
||||
# TODO: Change debug from hardcode to argument
|
||||
def chat_with_ai(
|
||||
agent, prompt, user_input, full_message_history, permanent_memory, token_limit
|
||||
config: Config,
|
||||
agent: Agent,
|
||||
system_prompt: str,
|
||||
user_input: str,
|
||||
token_limit: int,
|
||||
model: str | None = None,
|
||||
):
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history,
|
||||
and permanent memory."""
|
||||
while True:
|
||||
try:
|
||||
"""
|
||||
Interact with the OpenAI API, sending the prompt, user input,
|
||||
message history, and permanent memory.
|
||||
"""
|
||||
Interact with the OpenAI API, sending the prompt, user input,
|
||||
message history, and permanent memory.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt explaining the rules to the AI.
|
||||
user_input (str): The input from the user.
|
||||
full_message_history (list): The list of all messages sent between the
|
||||
user and the AI.
|
||||
permanent_memory (Obj): The memory object containing the permanent
|
||||
memory.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
Args:
|
||||
config (Config): The config to use.
|
||||
agent (Agent): The agent to use.
|
||||
system_prompt (str): The prompt explaining the rules to the AI.
|
||||
user_input (str): The input from the user.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
model (str, optional): The model to use. If None, the config.fast_llm_model will be used. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
if model is None:
|
||||
model = config.fast_llm_model
|
||||
|
||||
# if len(full_message_history) == 0:
|
||||
# relevant_memory = ""
|
||||
# else:
|
||||
# recent_history = full_message_history[-5:]
|
||||
# shuffle(recent_history)
|
||||
# relevant_memories = permanent_memory.get_relevant(
|
||||
# str(recent_history), 5
|
||||
# )
|
||||
# if relevant_memories:
|
||||
# shuffle(relevant_memories)
|
||||
# relevant_memory = str(relevant_memories)
|
||||
relevant_memory = ""
|
||||
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
|
||||
# Reserve 1000 tokens for the response
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
) = generate_context(prompt, relevant_memory, full_message_history, model)
|
||||
# if len(agent.history) == 0:
|
||||
# relevant_memory = ""
|
||||
# else:
|
||||
# recent_history = agent.history[-5:]
|
||||
# shuffle(recent_history)
|
||||
# relevant_memories = agent.memory.get_relevant(
|
||||
# str(recent_history), 5
|
||||
# )
|
||||
# if relevant_memories:
|
||||
# shuffle(relevant_memories)
|
||||
# relevant_memory = str(relevant_memories)
|
||||
# logger.debug(f"Memory Stats: {agent.memory.get_stats()}")
|
||||
relevant_memory = []
|
||||
|
||||
# while current_tokens_used > 2500:
|
||||
# # remove memories until we are under 2500 tokens
|
||||
# relevant_memory = relevant_memory[:-1]
|
||||
# (
|
||||
# next_message_to_add_index,
|
||||
# current_tokens_used,
|
||||
# insertion_index,
|
||||
# current_context,
|
||||
# ) = generate_context(
|
||||
# prompt, relevant_memory, full_message_history, model
|
||||
# )
|
||||
message_sequence = ChatSequence.for_model(
|
||||
model,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("system", f"The current time and date is {time.strftime('%c')}"),
|
||||
# Message(
|
||||
# "system",
|
||||
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
# ),
|
||||
],
|
||||
)
|
||||
|
||||
current_tokens_used += count_message_tokens(
|
||||
[create_chat_message("user", user_input)], model
|
||||
) # Account for user input (appended later)
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(agent.history) - 1
|
||||
insertion_index = len(message_sequence)
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = message_sequence.token_length
|
||||
|
||||
current_tokens_used += 500 # Account for memory (appended later) TODO: The final memory may be less than 500 tokens
|
||||
# while current_tokens_used > 2500:
|
||||
# # remove memories until we are under 2500 tokens
|
||||
# relevant_memory = relevant_memory[:-1]
|
||||
# (
|
||||
# next_message_to_add_index,
|
||||
# current_tokens_used,
|
||||
# insertion_index,
|
||||
# current_context,
|
||||
# ) = generate_context(
|
||||
# prompt, relevant_memory, agent.history, model
|
||||
# )
|
||||
|
||||
# Add Messages until the token limit is reached or there are no more messages to add.
|
||||
while next_message_to_add_index >= 0:
|
||||
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
||||
message_to_add = full_message_history[next_message_to_add_index]
|
||||
# Account for user input (appended later)
|
||||
user_input_msg = Message("user", user_input)
|
||||
current_tokens_used += count_message_tokens([user_input_msg], model)
|
||||
|
||||
tokens_to_add = count_message_tokens([message_to_add], model)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
# save_memory_trimmed_from_context_window(
|
||||
# full_message_history,
|
||||
# next_message_to_add_index,
|
||||
# permanent_memory,
|
||||
# )
|
||||
break
|
||||
current_tokens_used += 500 # Reserve space for new_summary_message
|
||||
|
||||
# Add the most recent message to the start of the current context,
|
||||
# after the two system prompts.
|
||||
current_context.insert(
|
||||
insertion_index, full_message_history[next_message_to_add_index]
|
||||
)
|
||||
# Add Messages until the token limit is reached or there are no more messages to add.
|
||||
for cycle in reversed(list(agent.history.per_cycle())):
|
||||
messages_to_add = [msg for msg in cycle if msg is not None]
|
||||
tokens_to_add = count_message_tokens(messages_to_add, model)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
break
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used += tokens_to_add
|
||||
# Add the most recent message to the start of the chain,
|
||||
# after the system prompts.
|
||||
message_sequence.insert(insertion_index, *messages_to_add)
|
||||
current_tokens_used += tokens_to_add
|
||||
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
from autogpt.memory_management.summary_memory import (
|
||||
get_newly_trimmed_messages,
|
||||
update_running_summary,
|
||||
)
|
||||
# Update & add summary of trimmed messages
|
||||
if len(agent.history) > 0:
|
||||
new_summary_message, trimmed_messages = agent.history.trim_messages(
|
||||
current_message_chain=list(message_sequence),
|
||||
)
|
||||
tokens_to_add = count_message_tokens([new_summary_message], model)
|
||||
message_sequence.insert(insertion_index, new_summary_message)
|
||||
current_tokens_used += tokens_to_add - 500
|
||||
|
||||
# Insert Memories
|
||||
if len(full_message_history) > 0:
|
||||
(
|
||||
newly_trimmed_messages,
|
||||
agent.last_memory_index,
|
||||
) = get_newly_trimmed_messages(
|
||||
full_message_history=full_message_history,
|
||||
current_context=current_context,
|
||||
last_memory_index=agent.last_memory_index,
|
||||
)
|
||||
# FIXME: uncomment when memory is back in use
|
||||
# memory_store = get_memory(cfg)
|
||||
# for _, ai_msg, result_msg in agent.history.per_cycle(trimmed_messages):
|
||||
# memory_to_add = MemoryItem.from_ai_action(ai_msg, result_msg)
|
||||
# logger.debug(f"Storing the following memory:\n{memory_to_add.dump()}")
|
||||
# memory_store.add(memory_to_add)
|
||||
|
||||
agent.summary_memory = update_running_summary(
|
||||
agent,
|
||||
current_memory=agent.summary_memory,
|
||||
new_events=newly_trimmed_messages,
|
||||
)
|
||||
current_context.insert(insertion_index, agent.summary_memory)
|
||||
api_manager = ApiManager()
|
||||
# inform the AI about its remaining budget (if it has one)
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
remaining_budget = api_manager.get_total_budget() - api_manager.get_total_cost()
|
||||
if remaining_budget < 0:
|
||||
remaining_budget = 0
|
||||
budget_message = f"Your remaining API budget is ${remaining_budget:.3f}" + (
|
||||
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
|
||||
if remaining_budget == 0
|
||||
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
|
||||
if remaining_budget < 0.005
|
||||
else " Budget nearly exceeded. Finish up.\n\n"
|
||||
if remaining_budget < 0.01
|
||||
else "\n\n"
|
||||
)
|
||||
logger.debug(budget_message)
|
||||
message_sequence.add("system", budget_message)
|
||||
current_tokens_used += count_message_tokens([message_sequence[-1]], model)
|
||||
|
||||
api_manager = ApiManager()
|
||||
# inform the AI about its remaining budget (if it has one)
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
remaining_budget = (
|
||||
api_manager.get_total_budget() - api_manager.get_total_cost()
|
||||
)
|
||||
if remaining_budget < 0:
|
||||
remaining_budget = 0
|
||||
system_message = (
|
||||
f"Your remaining API budget is ${remaining_budget:.3f}"
|
||||
+ (
|
||||
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
|
||||
if remaining_budget == 0
|
||||
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
|
||||
if remaining_budget < 0.005
|
||||
else " Budget nearly exceeded. Finish up.\n\n"
|
||||
if remaining_budget < 0.01
|
||||
else "\n\n"
|
||||
)
|
||||
)
|
||||
logger.debug(system_message)
|
||||
current_context.append(create_chat_message("system", system_message))
|
||||
# Append user input, the length of this is accounted for above
|
||||
message_sequence.append(user_input_msg)
|
||||
|
||||
# Append user input, the length of this is accounted for above
|
||||
current_context.extend([create_chat_message("user", user_input)])
|
||||
plugin_count = len(config.plugins)
|
||||
for i, plugin in enumerate(config.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
agent.config.prompt_generator, message_sequence.raw()
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
tokens_to_add = count_message_tokens(
|
||||
[Message("system", plugin_response)], model
|
||||
)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
logger.debug(f"Plugin response too long, skipping: {plugin_response}")
|
||||
logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
|
||||
break
|
||||
message_sequence.add("system", plugin_response)
|
||||
# Calculate remaining tokens
|
||||
tokens_remaining = token_limit - current_tokens_used
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative.
|
||||
# This should never happen, please submit a bug report at
|
||||
# https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
plugin_count = len(cfg.plugins)
|
||||
for i, plugin in enumerate(cfg.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
agent.config.prompt_generator, current_context
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
tokens_to_add = count_message_tokens(
|
||||
[create_chat_message("system", plugin_response)], model
|
||||
)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
logger.debug("Plugin response too long, skipping:", plugin_response)
|
||||
logger.debug("Plugins remaining at stop:", plugin_count - i)
|
||||
break
|
||||
current_context.append(create_chat_message("system", plugin_response))
|
||||
# Debug print the current context
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
logger.debug(f"Send Token Count: {current_tokens_used}")
|
||||
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
|
||||
logger.debug("------------ CONTEXT SENT TO AI ---------------")
|
||||
for message in message_sequence:
|
||||
# Skip printing the prompt
|
||||
if message.role == "system" and message.content == system_prompt:
|
||||
continue
|
||||
logger.debug(f"{message.role.capitalize()}: {message.content}")
|
||||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
message_sequence.raw(),
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
)
|
||||
|
||||
# Calculate remaining tokens
|
||||
tokens_remaining = token_limit - current_tokens_used
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative.
|
||||
# This should never happen, please submit a bug report at
|
||||
# https://www.github.com/Torantulino/Auto-GPT"
|
||||
# TODO: use a model defined elsewhere, so that model can contain
|
||||
# temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
prompt=message_sequence,
|
||||
max_tokens=tokens_remaining,
|
||||
)
|
||||
|
||||
# Debug print the current context
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
logger.debug(f"Send Token Count: {current_tokens_used}")
|
||||
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
|
||||
logger.debug("------------ CONTEXT SENT TO AI ---------------")
|
||||
for message in current_context:
|
||||
# Skip printing the prompt
|
||||
if message["role"] == "system" and message["content"] == prompt:
|
||||
continue
|
||||
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
|
||||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
current_context,
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
)
|
||||
# Update full message history
|
||||
agent.history.append(user_input_msg)
|
||||
agent.history.add("assistant", assistant_reply, "ai_response")
|
||||
|
||||
# TODO: use a model defined elsewhere, so that model can contain
|
||||
# temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=current_context,
|
||||
max_tokens=tokens_remaining,
|
||||
)
|
||||
|
||||
# Update full message history
|
||||
full_message_history.append(create_chat_message("user", user_input))
|
||||
full_message_history.append(
|
||||
create_chat_message("assistant", assistant_reply)
|
||||
)
|
||||
|
||||
return assistant_reply
|
||||
except RateLimitError:
|
||||
# TODO: When we switch to langchain, this is built in
|
||||
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
return assistant_reply
|
||||
|
||||
@@ -1,295 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import time
|
||||
from itertools import islice
|
||||
from typing import List, Optional
|
||||
|
||||
import numpy as np
|
||||
import openai
|
||||
import tiktoken
|
||||
from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError, Timeout
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
def retry_openai_api(
|
||||
num_retries: int = 10,
|
||||
backoff_base: float = 2.0,
|
||||
warn_user: bool = True,
|
||||
):
|
||||
"""Retry an OpenAI API call.
|
||||
|
||||
Args:
|
||||
num_retries int: Number of retries. Defaults to 10.
|
||||
backoff_base float: Base for exponential backoff. Defaults to 2.
|
||||
warn_user bool: Whether to warn the user. Defaults to True.
|
||||
"""
|
||||
retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}"
|
||||
api_key_error_msg = (
|
||||
f"Please double check that you have setup a "
|
||||
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
|
||||
f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
|
||||
)
|
||||
backoff_msg = (
|
||||
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
|
||||
)
|
||||
|
||||
def _wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def _wrapped(*args, **kwargs):
|
||||
user_warned = not warn_user
|
||||
num_attempts = num_retries + 1 # +1 for the first attempt
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
except RateLimitError:
|
||||
if attempt == num_attempts:
|
||||
raise
|
||||
|
||||
logger.debug(retry_limit_msg)
|
||||
if not user_warned:
|
||||
logger.double_check(api_key_error_msg)
|
||||
user_warned = True
|
||||
|
||||
except APIError as e:
|
||||
if (e.http_status != 502) or (attempt == num_attempts):
|
||||
raise
|
||||
|
||||
backoff = backoff_base ** (attempt + 2)
|
||||
logger.debug(backoff_msg.format(backoff=backoff))
|
||||
time.sleep(backoff)
|
||||
|
||||
return _wrapped
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
def call_ai_function(
|
||||
function: str, args: list, description: str, model: str | None = None
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
This is a magic function that can do anything with no-code. See
|
||||
https://github.com/Torantulino/AI-Functions for more info.
|
||||
|
||||
Args:
|
||||
function (str): The function to call
|
||||
args (list): The arguments to pass to the function
|
||||
description (str): The description of the function
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the function
|
||||
"""
|
||||
cfg = Config()
|
||||
if model is None:
|
||||
model = cfg.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma separated string
|
||||
args: str = ", ".join(args)
|
||||
messages: List[Message] = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}"
|
||||
f"\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
return create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
def create_chat_completion(
|
||||
messages: List[Message], # type: ignore
|
||||
model: Optional[str] = None,
|
||||
temperature: float = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API
|
||||
|
||||
Args:
|
||||
messages (List[Message]): The messages to send to the chat completion
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
||||
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
cfg = Config()
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
|
||||
num_retries = 10
|
||||
warned_user = False
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
)
|
||||
for plugin in cfg.plugins:
|
||||
if plugin.can_handle_chat_completion(
|
||||
messages=messages,
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
message = plugin.handle_chat_completion(
|
||||
messages=messages,
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
if message is not None:
|
||||
return message
|
||||
api_manager = ApiManager()
|
||||
response = None
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
try:
|
||||
if cfg.use_azure:
|
||||
response = api_manager.create_chat_completion(
|
||||
deployment_id=cfg.get_azure_deployment_id_for_model(model),
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
else:
|
||||
response = api_manager.create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
break
|
||||
except RateLimitError:
|
||||
logger.debug(
|
||||
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
||||
)
|
||||
if not warned_user:
|
||||
logger.double_check(
|
||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
||||
+ f"You can read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
|
||||
)
|
||||
warned_user = True
|
||||
except (APIError, Timeout) as e:
|
||||
if e.http_status != 502:
|
||||
raise
|
||||
if attempt == num_retries - 1:
|
||||
raise
|
||||
logger.debug(
|
||||
f"{Fore.RED}Error: ",
|
||||
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
||||
)
|
||||
time.sleep(backoff)
|
||||
if response is None:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO GET RESPONSE FROM OPENAI",
|
||||
Fore.RED,
|
||||
"Auto-GPT has failed to get a response from OpenAI's services. "
|
||||
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
|
||||
)
|
||||
logger.double_check()
|
||||
if cfg.debug_mode:
|
||||
raise RuntimeError(f"Failed to get response after {num_retries} retries")
|
||||
else:
|
||||
quit(1)
|
||||
resp = response.choices[0].message["content"]
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_on_response():
|
||||
continue
|
||||
resp = plugin.on_response(resp)
|
||||
return resp
|
||||
|
||||
|
||||
def batched(iterable, n):
|
||||
"""Batch data into tuples of length n. The last batch may be shorter."""
|
||||
# batched('ABCDEFG', 3) --> ABC DEF G
|
||||
if n < 1:
|
||||
raise ValueError("n must be at least one")
|
||||
it = iter(iterable)
|
||||
while batch := tuple(islice(it, n)):
|
||||
yield batch
|
||||
|
||||
|
||||
def chunked_tokens(text, tokenizer_name, chunk_length):
|
||||
tokenizer = tiktoken.get_encoding(tokenizer_name)
|
||||
tokens = tokenizer.encode(text)
|
||||
chunks_iterator = batched(tokens, chunk_length)
|
||||
yield from chunks_iterator
|
||||
|
||||
|
||||
def get_ada_embedding(text: str) -> List[float]:
|
||||
"""Get an embedding from the ada model.
|
||||
|
||||
Args:
|
||||
text (str): The text to embed.
|
||||
|
||||
Returns:
|
||||
List[float]: The embedding.
|
||||
"""
|
||||
cfg = Config()
|
||||
model = cfg.embedding_model
|
||||
text = text.replace("\n", " ")
|
||||
|
||||
if cfg.use_azure:
|
||||
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
|
||||
embedding = create_embedding(text, **kwargs)
|
||||
return embedding
|
||||
|
||||
|
||||
@retry_openai_api()
|
||||
def create_embedding(
|
||||
text: str,
|
||||
*_,
|
||||
**kwargs,
|
||||
) -> openai.Embedding:
|
||||
"""Create an embedding using the OpenAI API
|
||||
|
||||
Args:
|
||||
text (str): The text to embed.
|
||||
kwargs: Other arguments to pass to the OpenAI API embedding creation call.
|
||||
|
||||
Returns:
|
||||
openai.Embedding: The embedding object.
|
||||
"""
|
||||
cfg = Config()
|
||||
chunk_embeddings = []
|
||||
chunk_lengths = []
|
||||
for chunk in chunked_tokens(
|
||||
text,
|
||||
tokenizer_name=cfg.embedding_tokenizer,
|
||||
chunk_length=cfg.embedding_token_limit,
|
||||
):
|
||||
embedding = openai.Embedding.create(
|
||||
input=[chunk],
|
||||
api_key=cfg.openai_api_key,
|
||||
**kwargs,
|
||||
)
|
||||
api_manager = ApiManager()
|
||||
api_manager.update_cost(
|
||||
prompt_tokens=embedding.usage.prompt_tokens,
|
||||
completion_tokens=0,
|
||||
model=cfg.embedding_model,
|
||||
)
|
||||
chunk_embeddings.append(embedding["data"][0]["embedding"])
|
||||
chunk_lengths.append(len(chunk))
|
||||
|
||||
# do weighted avg
|
||||
chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=chunk_lengths)
|
||||
chunk_embeddings = chunk_embeddings / np.linalg.norm(
|
||||
chunk_embeddings
|
||||
) # normalize the length to one
|
||||
chunk_embeddings = chunk_embeddings.tolist()
|
||||
return chunk_embeddings
|
||||
@@ -7,4 +7,5 @@ COSTS = {
|
||||
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
|
||||
"text-davinci-003": {"prompt": 0.02, "completion": 0.02},
|
||||
}
|
||||
|
||||
@@ -1,37 +1,74 @@
|
||||
from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo
|
||||
from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo, TextModelInfo
|
||||
|
||||
OPEN_AI_CHAT_MODELS = {
|
||||
"gpt-3.5-turbo": ChatModelInfo(
|
||||
name="gpt-3.5-turbo",
|
||||
prompt_token_cost=0.002,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
"gpt-4": ChatModelInfo(
|
||||
name="gpt-4",
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
"gpt-4-32k": ChatModelInfo(
|
||||
name="gpt-4-32k",
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
info.name: info
|
||||
for info in [
|
||||
ChatModelInfo(
|
||||
name="gpt-3.5-turbo",
|
||||
prompt_token_cost=0.002,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-3.5-turbo-0301",
|
||||
prompt_token_cost=0.002,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4",
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4-0314",
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4-32k",
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4-32k-0314",
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
]
|
||||
}
|
||||
|
||||
OPEN_AI_TEXT_MODELS = {
|
||||
info.name: info
|
||||
for info in [
|
||||
TextModelInfo(
|
||||
name="text-davinci-003",
|
||||
prompt_token_cost=0.02,
|
||||
completion_token_cost=0.02,
|
||||
max_tokens=4097,
|
||||
),
|
||||
]
|
||||
}
|
||||
|
||||
OPEN_AI_EMBEDDING_MODELS = {
|
||||
"text-embedding-ada-002": EmbeddingModelInfo(
|
||||
name="text-embedding-ada-002",
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
info.name: info
|
||||
for info in [
|
||||
EmbeddingModelInfo(
|
||||
name="text-embedding-ada-002",
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
]
|
||||
}
|
||||
|
||||
OPEN_AI_MODELS = {
|
||||
OPEN_AI_MODELS: dict[str, ChatModelInfo | EmbeddingModelInfo | TextModelInfo] = {
|
||||
**OPEN_AI_CHAT_MODELS,
|
||||
**OPEN_AI_TEXT_MODELS,
|
||||
**OPEN_AI_EMBEDDING_MODELS,
|
||||
}
|
||||
|
||||
266
autogpt/llm/utils/__init__.py
Normal file
266
autogpt/llm/utils/__init__.py
Normal file
@@ -0,0 +1,266 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import time
|
||||
from typing import List, Literal, Optional
|
||||
from unittest.mock import patch
|
||||
|
||||
import openai
|
||||
import openai.api_resources.abstract.engine_api_resource as engine_api_resource
|
||||
import openai.util
|
||||
from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError
|
||||
from openai.openai_object import OpenAIObject
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
from ..api_manager import ApiManager
|
||||
from ..base import ChatSequence, Message
|
||||
from .token_counter import *
|
||||
|
||||
|
||||
def metered(func):
|
||||
"""Adds ApiManager metering to functions which make OpenAI API calls"""
|
||||
api_manager = ApiManager()
|
||||
|
||||
openai_obj_processor = openai.util.convert_to_openai_object
|
||||
|
||||
def update_usage_with_response(response: OpenAIObject):
|
||||
try:
|
||||
usage = response.usage
|
||||
logger.debug(f"Reported usage from call to model {response.model}: {usage}")
|
||||
api_manager.update_cost(
|
||||
response.usage.prompt_tokens,
|
||||
response.usage.completion_tokens if "completion_tokens" in usage else 0,
|
||||
response.model,
|
||||
)
|
||||
except Exception as err:
|
||||
logger.warn(f"Failed to update API costs: {err.__class__.__name__}: {err}")
|
||||
|
||||
def metering_wrapper(*args, **kwargs):
|
||||
openai_obj = openai_obj_processor(*args, **kwargs)
|
||||
if isinstance(openai_obj, OpenAIObject) and "usage" in openai_obj:
|
||||
update_usage_with_response(openai_obj)
|
||||
return openai_obj
|
||||
|
||||
def metered_func(*args, **kwargs):
|
||||
with patch.object(
|
||||
engine_api_resource.util,
|
||||
"convert_to_openai_object",
|
||||
side_effect=metering_wrapper,
|
||||
):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return metered_func
|
||||
|
||||
|
||||
def retry_openai_api(
|
||||
num_retries: int = 10,
|
||||
backoff_base: float = 2.0,
|
||||
warn_user: bool = True,
|
||||
):
|
||||
"""Retry an OpenAI API call.
|
||||
|
||||
Args:
|
||||
num_retries int: Number of retries. Defaults to 10.
|
||||
backoff_base float: Base for exponential backoff. Defaults to 2.
|
||||
warn_user bool: Whether to warn the user. Defaults to True.
|
||||
"""
|
||||
retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}"
|
||||
api_key_error_msg = (
|
||||
f"Please double check that you have setup a "
|
||||
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
|
||||
f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
|
||||
)
|
||||
backoff_msg = (
|
||||
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
|
||||
)
|
||||
|
||||
def _wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def _wrapped(*args, **kwargs):
|
||||
user_warned = not warn_user
|
||||
num_attempts = num_retries + 1 # +1 for the first attempt
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
except RateLimitError:
|
||||
if attempt == num_attempts:
|
||||
raise
|
||||
|
||||
logger.debug(retry_limit_msg)
|
||||
if not user_warned:
|
||||
logger.double_check(api_key_error_msg)
|
||||
user_warned = True
|
||||
|
||||
except APIError as e:
|
||||
if (e.http_status not in [502, 429]) or (attempt == num_attempts):
|
||||
raise
|
||||
|
||||
backoff = backoff_base ** (attempt + 2)
|
||||
logger.debug(backoff_msg.format(backoff=backoff))
|
||||
time.sleep(backoff)
|
||||
|
||||
return _wrapped
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
def call_ai_function(
|
||||
function: str,
|
||||
args: list,
|
||||
description: str,
|
||||
model: str | None = None,
|
||||
config: Config = None,
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
This is a magic function that can do anything with no-code. See
|
||||
https://github.com/Torantulino/AI-Functions for more info.
|
||||
|
||||
Args:
|
||||
function (str): The function to call
|
||||
args (list): The arguments to pass to the function
|
||||
description (str): The description of the function
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the function
|
||||
"""
|
||||
if model is None:
|
||||
model = config.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma separated string
|
||||
arg_str: str = ", ".join(args)
|
||||
|
||||
prompt = ChatSequence.for_model(
|
||||
model,
|
||||
[
|
||||
Message(
|
||||
"system",
|
||||
f"You are now the following python function: ```# {description}"
|
||||
f"\n{function}```\n\nOnly respond with your `return` value.",
|
||||
),
|
||||
Message("user", arg_str),
|
||||
],
|
||||
)
|
||||
return create_chat_completion(prompt=prompt, temperature=0)
|
||||
|
||||
|
||||
@metered
|
||||
@retry_openai_api()
|
||||
def create_text_completion(
|
||||
prompt: str,
|
||||
model: Optional[str],
|
||||
temperature: Optional[float],
|
||||
max_output_tokens: Optional[int],
|
||||
) -> str:
|
||||
cfg = Config()
|
||||
if model is None:
|
||||
model = cfg.fast_llm_model
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
|
||||
if cfg.use_azure:
|
||||
kwargs = {"deployment_id": cfg.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
|
||||
response = openai.Completion.create(
|
||||
**kwargs,
|
||||
prompt=prompt,
|
||||
temperature=temperature,
|
||||
max_tokens=max_output_tokens,
|
||||
api_key=cfg.openai_api_key,
|
||||
)
|
||||
return response.choices[0].text
|
||||
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
@metered
|
||||
@retry_openai_api()
|
||||
def create_chat_completion(
|
||||
prompt: ChatSequence,
|
||||
model: Optional[str] = None,
|
||||
temperature: float = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API
|
||||
|
||||
Args:
|
||||
messages (List[Message]): The messages to send to the chat completion
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
||||
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
cfg = Config()
|
||||
if model is None:
|
||||
model = prompt.model.name
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
)
|
||||
for plugin in cfg.plugins:
|
||||
if plugin.can_handle_chat_completion(
|
||||
messages=prompt.raw(),
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
message = plugin.handle_chat_completion(
|
||||
messages=prompt.raw(),
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
if message is not None:
|
||||
return message
|
||||
api_manager = ApiManager()
|
||||
response = None
|
||||
|
||||
if cfg.use_azure:
|
||||
kwargs = {"deployment_id": cfg.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
|
||||
response = api_manager.create_chat_completion(
|
||||
**kwargs,
|
||||
messages=prompt.raw(),
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
|
||||
resp = response.choices[0].message["content"]
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_on_response():
|
||||
continue
|
||||
resp = plugin.on_response(resp)
|
||||
return resp
|
||||
|
||||
|
||||
def check_model(
|
||||
model_name: str, model_type: Literal["smart_llm_model", "fast_llm_model"]
|
||||
) -> str:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
api_manager = ApiManager()
|
||||
models = api_manager.get_models()
|
||||
|
||||
if any(model_name in m["id"] for m in models):
|
||||
return model_name
|
||||
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"You do not have access to {model_name}. Setting {model_type} to "
|
||||
f"gpt-3.5-turbo.",
|
||||
)
|
||||
return "gpt-3.5-turbo"
|
||||
@@ -53,7 +53,7 @@ def count_message_tokens(
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
for key, value in message.raw().items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
@@ -10,6 +10,8 @@ CURRENT_CONTEXT_FILE_NAME = "current_context.json"
|
||||
NEXT_ACTION_FILE_NAME = "next_action.json"
|
||||
PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json"
|
||||
SUMMARY_FILE_NAME = "summary.txt"
|
||||
SUPERVISOR_FEEDBACK_FILE_NAME = "supervisor_feedback.txt"
|
||||
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME = "prompt_supervisor_feedback.json"
|
||||
USER_INPUT_FILE_NAME = "user_input.txt"
|
||||
|
||||
|
||||
|
||||
@@ -287,5 +287,8 @@ def print_assistant_thoughts(
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if speak_mode and assistant_thoughts_speak:
|
||||
say_text(assistant_thoughts_speak)
|
||||
if assistant_thoughts_speak:
|
||||
if speak_mode:
|
||||
say_text(assistant_thoughts_speak)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
|
||||
@@ -5,27 +5,44 @@ from pathlib import Path
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
|
||||
from autogpt.utils import (
|
||||
get_current_git_branch,
|
||||
get_latest_bulletin,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
)
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
||||
COMMAND_CATEGORIES = [
|
||||
"autogpt.commands.analyze_code",
|
||||
"autogpt.commands.audio_text",
|
||||
"autogpt.commands.execute_code",
|
||||
"autogpt.commands.file_operations",
|
||||
"autogpt.commands.git_operations",
|
||||
"autogpt.commands.google_search",
|
||||
"autogpt.commands.image_gen",
|
||||
"autogpt.commands.improve_code",
|
||||
"autogpt.commands.web_selenium",
|
||||
"autogpt.commands.write_tests",
|
||||
"autogpt.app",
|
||||
"autogpt.commands.task_statuses",
|
||||
]
|
||||
|
||||
|
||||
def run_auto_gpt(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -45,10 +62,13 @@ def run_auto_gpt(
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
|
||||
create_config(
|
||||
cfg,
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
@@ -60,6 +80,10 @@ def run_auto_gpt(
|
||||
skip_news,
|
||||
)
|
||||
|
||||
if cfg.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
|
||||
|
||||
if not cfg.skip_news:
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
@@ -119,39 +143,27 @@ def run_auto_gpt(
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
|
||||
command_categories = [
|
||||
"autogpt.commands.analyze_code",
|
||||
"autogpt.commands.audio_text",
|
||||
"autogpt.commands.execute_code",
|
||||
"autogpt.commands.file_operations",
|
||||
"autogpt.commands.git_operations",
|
||||
"autogpt.commands.google_search",
|
||||
"autogpt.commands.image_gen",
|
||||
"autogpt.commands.improve_code",
|
||||
"autogpt.commands.twitter",
|
||||
"autogpt.commands.web_selenium",
|
||||
"autogpt.commands.write_tests",
|
||||
"autogpt.app",
|
||||
"autogpt.commands.task_statuses",
|
||||
]
|
||||
logger.debug(
|
||||
f"The following command categories are disabled: {cfg.disabled_command_categories}"
|
||||
)
|
||||
command_categories = [
|
||||
x for x in command_categories if x not in cfg.disabled_command_categories
|
||||
enabled_command_categories = [
|
||||
x for x in COMMAND_CATEGORIES if x not in cfg.disabled_command_categories
|
||||
]
|
||||
|
||||
logger.debug(f"The following command categories are enabled: {command_categories}")
|
||||
logger.debug(
|
||||
f"The following command categories are enabled: {enabled_command_categories}"
|
||||
)
|
||||
|
||||
for command_category in command_categories:
|
||||
for command_category in enabled_command_categories:
|
||||
command_registry.import_commands(command_category)
|
||||
|
||||
ai_name = ""
|
||||
ai_config = construct_main_ai_config()
|
||||
ai_config.command_registry = command_registry
|
||||
if ai_config.ai_name:
|
||||
ai_name = ai_config.ai_name
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
|
||||
# add chat plugins capable of report to logger
|
||||
@@ -175,7 +187,6 @@ def run_auto_gpt(
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.local import LocalCache
|
||||
from autogpt.memory.no_memory import NoMemory
|
||||
|
||||
# List of supported memory backends
|
||||
# Add a backend to this list if the import attempt is successful
|
||||
supported_memory = ["local", "no_memory"]
|
||||
|
||||
try:
|
||||
from autogpt.memory.redismem import RedisMemory
|
||||
|
||||
supported_memory.append("redis")
|
||||
except ImportError:
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.pinecone import PineconeMemory
|
||||
|
||||
supported_memory.append("pinecone")
|
||||
except ImportError:
|
||||
PineconeMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
|
||||
supported_memory.append("weaviate")
|
||||
except ImportError:
|
||||
WeaviateMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.milvus import MilvusMemory
|
||||
|
||||
supported_memory.append("milvus")
|
||||
except ImportError:
|
||||
MilvusMemory = None
|
||||
|
||||
|
||||
def get_memory(cfg, init=False):
|
||||
memory = None
|
||||
if cfg.memory_backend == "pinecone":
|
||||
if not PineconeMemory:
|
||||
logger.warn(
|
||||
"Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = PineconeMemory(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
elif cfg.memory_backend == "redis":
|
||||
if not RedisMemory:
|
||||
logger.warn(
|
||||
"Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = RedisMemory(cfg)
|
||||
elif cfg.memory_backend == "weaviate":
|
||||
if not WeaviateMemory:
|
||||
logger.warn(
|
||||
"Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = WeaviateMemory(cfg)
|
||||
elif cfg.memory_backend == "milvus":
|
||||
if not MilvusMemory:
|
||||
logger.warn(
|
||||
"Error: pymilvus sdk is not installed."
|
||||
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
||||
)
|
||||
else:
|
||||
memory = MilvusMemory(cfg)
|
||||
elif cfg.memory_backend == "no_memory":
|
||||
memory = NoMemory(cfg)
|
||||
|
||||
if memory is None:
|
||||
memory = LocalCache(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
return memory
|
||||
|
||||
|
||||
def get_supported_memory_backends():
|
||||
return supported_memory
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_memory",
|
||||
"LocalCache",
|
||||
"RedisMemory",
|
||||
"PineconeMemory",
|
||||
"NoMemory",
|
||||
"MilvusMemory",
|
||||
"WeaviateMemory",
|
||||
]
|
||||
@@ -1,31 +0,0 @@
|
||||
"""Base class for memory providers."""
|
||||
import abc
|
||||
|
||||
from autogpt.singleton import AbstractSingleton
|
||||
|
||||
|
||||
class MemoryProviderSingleton(AbstractSingleton):
|
||||
@abc.abstractmethod
|
||||
def add(self, data):
|
||||
"""Adds to memory"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(self, data):
|
||||
"""Gets from memory"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear(self):
|
||||
"""Clears memory"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
"""Gets relevant memory for"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_stats(self):
|
||||
"""Get stats from memory"""
|
||||
pass
|
||||
@@ -1,126 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
from pathlib import Path
|
||||
from typing import Any, List
|
||||
|
||||
import numpy as np
|
||||
import orjson
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
EMBED_DIM = 1536
|
||||
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
||||
|
||||
|
||||
def create_default_embeddings():
|
||||
return np.zeros((0, EMBED_DIM)).astype(np.float32)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class CacheContent:
|
||||
texts: List[str] = dataclasses.field(default_factory=list)
|
||||
embeddings: np.ndarray = dataclasses.field(
|
||||
default_factory=create_default_embeddings
|
||||
)
|
||||
|
||||
|
||||
class LocalCache(MemoryProviderSingleton):
|
||||
"""A class that stores the memory in a local file"""
|
||||
|
||||
def __init__(self, cfg) -> None:
|
||||
"""Initialize a class instance
|
||||
|
||||
Args:
|
||||
cfg: Config object
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
workspace_path = Path(cfg.workspace_path)
|
||||
self.filename = workspace_path / f"{cfg.memory_index}.json"
|
||||
|
||||
self.filename.touch(exist_ok=True)
|
||||
|
||||
file_content = b"{}"
|
||||
with self.filename.open("w+b") as f:
|
||||
f.write(file_content)
|
||||
|
||||
self.data = CacheContent()
|
||||
|
||||
def add(self, text: str):
|
||||
"""
|
||||
Add text to our list of texts, add embedding as row to our
|
||||
embeddings-matrix
|
||||
|
||||
Args:
|
||||
text: str
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
if "Command Error:" in text:
|
||||
return ""
|
||||
self.data.texts.append(text)
|
||||
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
vector = np.array(embedding).astype(np.float32)
|
||||
vector = vector[np.newaxis, :]
|
||||
self.data.embeddings = np.concatenate(
|
||||
[
|
||||
self.data.embeddings,
|
||||
vector,
|
||||
],
|
||||
axis=0,
|
||||
)
|
||||
|
||||
with open(self.filename, "wb") as f:
|
||||
out = orjson.dumps(self.data, option=SAVE_OPTIONS)
|
||||
f.write(out)
|
||||
return text
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the data in memory.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.data = CacheContent()
|
||||
return "Obliviated"
|
||||
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def get_relevant(self, text: str, k: int) -> list[Any]:
|
||||
""" "
|
||||
matrix-vector mult to find score-for-each-row-of-matrix
|
||||
get indices for top-k winning scores
|
||||
return texts for those indices
|
||||
Args:
|
||||
text: str
|
||||
k: int
|
||||
|
||||
Returns: List[str]
|
||||
"""
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
scores = np.dot(self.data.embeddings, embedding)
|
||||
|
||||
top_k_indices = np.argsort(scores)[-k:][::-1]
|
||||
|
||||
return [self.data.texts[i] for i in top_k_indices]
|
||||
|
||||
def get_stats(self) -> tuple[int, tuple[int, ...]]:
|
||||
"""
|
||||
Returns: The stats of the local cache.
|
||||
"""
|
||||
return len(self.data.texts), self.data.embeddings.shape
|
||||
204
autogpt/memory/message_history.py
Normal file
204
autogpt/memory/message_history.py
Normal file
@@ -0,0 +1,204 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agent import Agent
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import (
|
||||
LLM_DEFAULT_RESPONSE_FORMAT,
|
||||
is_string_valid_json,
|
||||
)
|
||||
from autogpt.llm.base import ChatSequence, Message, MessageRole, MessageType
|
||||
from autogpt.llm.utils import create_chat_completion
|
||||
from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
@dataclass
|
||||
class MessageHistory:
|
||||
agent: Agent
|
||||
|
||||
messages: list[Message] = field(default_factory=list)
|
||||
summary: str = "I was created"
|
||||
|
||||
last_trimmed_index: int = 0
|
||||
|
||||
def __getitem__(self, i: int):
|
||||
return self.messages[i]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.messages)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.messages)
|
||||
|
||||
def add(
|
||||
self,
|
||||
role: MessageRole,
|
||||
content: str,
|
||||
type: MessageType | None = None,
|
||||
):
|
||||
return self.append(Message(role, content, type))
|
||||
|
||||
def append(self, message: Message):
|
||||
return self.messages.append(message)
|
||||
|
||||
def trim_messages(
|
||||
self,
|
||||
current_message_chain: list[Message],
|
||||
) -> tuple[Message, list[Message]]:
|
||||
"""
|
||||
Returns a list of trimmed messages: messages which are in the message history
|
||||
but not in current_message_chain.
|
||||
|
||||
Args:
|
||||
current_message_chain (list[Message]): The messages currently in the context.
|
||||
|
||||
Returns:
|
||||
Message: A message with the new running summary after adding the trimmed messages.
|
||||
list[Message]: A list of messages that are in full_message_history with an index higher than last_trimmed_index and absent from current_message_chain.
|
||||
"""
|
||||
# Select messages in full_message_history with an index higher than last_trimmed_index
|
||||
new_messages = [
|
||||
msg for i, msg in enumerate(self) if i > self.last_trimmed_index
|
||||
]
|
||||
|
||||
# Remove messages that are already present in current_message_chain
|
||||
new_messages_not_in_chain = [
|
||||
msg for msg in new_messages if msg not in current_message_chain
|
||||
]
|
||||
|
||||
if not new_messages_not_in_chain:
|
||||
return self.summary_message(), []
|
||||
|
||||
new_summary_message = self.update_running_summary(
|
||||
new_events=new_messages_not_in_chain
|
||||
)
|
||||
|
||||
# Find the index of the last message processed
|
||||
last_message = new_messages_not_in_chain[-1]
|
||||
self.last_trimmed_index = self.messages.index(last_message)
|
||||
|
||||
return new_summary_message, new_messages_not_in_chain
|
||||
|
||||
def per_cycle(self, messages: list[Message] | None = None):
|
||||
"""
|
||||
Yields:
|
||||
Message: a message containing user input
|
||||
Message: a message from the AI containing a proposed action
|
||||
Message: the message containing the result of the AI's proposed action
|
||||
"""
|
||||
messages = messages or self.messages
|
||||
for i in range(0, len(messages) - 1):
|
||||
ai_message = messages[i]
|
||||
if ai_message.type != "ai_response":
|
||||
continue
|
||||
user_message = (
|
||||
messages[i - 1] if i > 0 and messages[i - 1].role == "user" else None
|
||||
)
|
||||
result_message = messages[i + 1]
|
||||
try:
|
||||
assert is_string_valid_json(
|
||||
ai_message.content, LLM_DEFAULT_RESPONSE_FORMAT
|
||||
), "AI response is not a valid JSON object"
|
||||
assert result_message.type == "action_result"
|
||||
|
||||
yield user_message, ai_message, result_message
|
||||
except AssertionError as err:
|
||||
logger.debug(
|
||||
f"Invalid item in message history: {err}; Messages: {messages[i-1:i+2]}"
|
||||
)
|
||||
|
||||
def summary_message(self) -> Message:
|
||||
return Message(
|
||||
"system",
|
||||
f"This reminds you of these events from your past: \n{self.summary}",
|
||||
)
|
||||
|
||||
def update_running_summary(self, new_events: list[Message]) -> Message:
|
||||
"""
|
||||
This function takes a list of dictionaries representing new events and combines them with the current summary,
|
||||
focusing on key and potentially important information to remember. The updated summary is returned in a message
|
||||
formatted in the 1st person past tense.
|
||||
|
||||
Args:
|
||||
new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary.
|
||||
|
||||
Returns:
|
||||
str: A message containing the updated summary of actions, formatted in the 1st person past tense.
|
||||
|
||||
Example:
|
||||
new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}]
|
||||
update_running_summary(new_events)
|
||||
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
|
||||
"""
|
||||
cfg = Config()
|
||||
|
||||
if not new_events:
|
||||
return self.summary_message()
|
||||
|
||||
# Create a copy of the new_events list to prevent modifying the original list
|
||||
new_events = copy.deepcopy(new_events)
|
||||
|
||||
# Replace "assistant" with "you". This produces much better first person past tense results.
|
||||
for event in new_events:
|
||||
if event.role.lower() == "assistant":
|
||||
event.role = "you"
|
||||
|
||||
# Remove "thoughts" dictionary from "content"
|
||||
try:
|
||||
content_dict = json.loads(event.content)
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
event.content = json.dumps(content_dict)
|
||||
except json.decoder.JSONDecodeError:
|
||||
if cfg.debug_mode:
|
||||
logger.error(f"Error: Invalid JSON: {event.content}\n")
|
||||
|
||||
elif event.role.lower() == "system":
|
||||
event.role = "your computer"
|
||||
|
||||
# Delete all user messages
|
||||
elif event.role == "user":
|
||||
new_events.remove(event)
|
||||
|
||||
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
|
||||
|
||||
You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
|
||||
|
||||
Summary So Far:
|
||||
"""
|
||||
{self.summary}
|
||||
"""
|
||||
|
||||
Latest Development:
|
||||
"""
|
||||
{new_events or "Nothing new happened."}
|
||||
"""
|
||||
'''
|
||||
|
||||
prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.config.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
prompt.raw(),
|
||||
PROMPT_SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
self.summary = create_chat_completion(prompt)
|
||||
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.config.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
self.summary,
|
||||
SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
return self.summary_message()
|
||||
@@ -1,162 +0,0 @@
|
||||
""" Milvus memory storage provider."""
|
||||
import re
|
||||
|
||||
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
class MilvusMemory(MemoryProviderSingleton):
|
||||
"""Milvus memory storage provider."""
|
||||
|
||||
def __init__(self, cfg: Config) -> None:
|
||||
"""Construct a milvus memory storage connection.
|
||||
|
||||
Args:
|
||||
cfg (Config): Auto-GPT global config.
|
||||
"""
|
||||
self.configure(cfg)
|
||||
|
||||
connect_kwargs = {}
|
||||
if self.username:
|
||||
connect_kwargs["user"] = self.username
|
||||
connect_kwargs["password"] = self.password
|
||||
|
||||
connections.connect(
|
||||
**connect_kwargs,
|
||||
uri=self.uri or "",
|
||||
address=self.address or "",
|
||||
secure=self.secure,
|
||||
)
|
||||
|
||||
self.init_collection()
|
||||
|
||||
def configure(self, cfg: Config) -> None:
|
||||
# init with configuration.
|
||||
self.uri = None
|
||||
self.address = cfg.milvus_addr
|
||||
self.secure = cfg.milvus_secure
|
||||
self.username = cfg.milvus_username
|
||||
self.password = cfg.milvus_password
|
||||
self.collection_name = cfg.milvus_collection
|
||||
# use HNSW by default.
|
||||
self.index_params = {
|
||||
"metric_type": "IP",
|
||||
"index_type": "HNSW",
|
||||
"params": {"M": 8, "efConstruction": 64},
|
||||
}
|
||||
|
||||
if (self.username is None) != (self.password is None):
|
||||
raise ValueError(
|
||||
"Both username and password must be set to use authentication for Milvus"
|
||||
)
|
||||
|
||||
# configured address may be a full URL.
|
||||
if re.match(r"^(https?|tcp)://", self.address) is not None:
|
||||
self.uri = self.address
|
||||
self.address = None
|
||||
|
||||
if self.uri.startswith("https"):
|
||||
self.secure = True
|
||||
|
||||
# Zilliz Cloud requires AutoIndex.
|
||||
if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.uri) is not None:
|
||||
self.index_params = {
|
||||
"metric_type": "IP",
|
||||
"index_type": "AUTOINDEX",
|
||||
"params": {},
|
||||
}
|
||||
|
||||
def init_collection(self) -> None:
|
||||
"""Initialize collection in vector database."""
|
||||
fields = [
|
||||
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
|
||||
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536),
|
||||
FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535),
|
||||
]
|
||||
|
||||
# create collection if not exist and load it.
|
||||
self.schema = CollectionSchema(fields, "auto-gpt memory storage")
|
||||
self.collection = Collection(self.collection_name, self.schema)
|
||||
# create index if not exist.
|
||||
if not self.collection.has_index():
|
||||
self.collection.release()
|
||||
self.collection.create_index(
|
||||
"embeddings",
|
||||
self.index_params,
|
||||
index_name="embeddings",
|
||||
)
|
||||
self.collection.load()
|
||||
|
||||
def add(self, data) -> str:
|
||||
"""Add an embedding of data into memory.
|
||||
|
||||
Args:
|
||||
data (str): The raw text to construct embedding index.
|
||||
|
||||
Returns:
|
||||
str: log.
|
||||
"""
|
||||
embedding = get_ada_embedding(data)
|
||||
result = self.collection.insert([[embedding], [data]])
|
||||
_text = (
|
||||
"Inserting data into memory at primary key: "
|
||||
f"{result.primary_keys[0]}:\n data: {data}"
|
||||
)
|
||||
return _text
|
||||
|
||||
def get(self, data):
|
||||
"""Return the most relevant data in memory.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self) -> str:
|
||||
"""Drop the index in memory.
|
||||
|
||||
Returns:
|
||||
str: log.
|
||||
"""
|
||||
self.collection.drop()
|
||||
self.collection = Collection(self.collection_name, self.schema)
|
||||
self.collection.create_index(
|
||||
"embeddings",
|
||||
self.index_params,
|
||||
index_name="embeddings",
|
||||
)
|
||||
self.collection.load()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5):
|
||||
"""Return the top-k relevant data in memory.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant (int, optional): The max number of relevant data.
|
||||
Defaults to 5.
|
||||
|
||||
Returns:
|
||||
list: The top-k relevant data.
|
||||
"""
|
||||
# search the embedding and return the most relevant text.
|
||||
embedding = get_ada_embedding(data)
|
||||
search_params = {
|
||||
"metrics_type": "IP",
|
||||
"params": {"nprobe": 8},
|
||||
}
|
||||
result = self.collection.search(
|
||||
[embedding],
|
||||
"embeddings",
|
||||
search_params,
|
||||
num_relevant,
|
||||
output_fields=["raw_text"],
|
||||
)
|
||||
return [item.entity.value_of_field("raw_text") for item in result[0]]
|
||||
|
||||
def get_stats(self) -> str:
|
||||
"""
|
||||
Returns: The stats of the milvus cache.
|
||||
"""
|
||||
return f"Entities num: {self.collection.num_entities}"
|
||||
@@ -1,73 +0,0 @@
|
||||
"""A class that does not store any data. This is the default memory provider."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
class NoMemory(MemoryProviderSingleton):
|
||||
"""
|
||||
A class that does not store any data. This is the default memory provider.
|
||||
"""
|
||||
|
||||
def __init__(self, cfg):
|
||||
"""
|
||||
Initializes the NoMemory provider.
|
||||
|
||||
Args:
|
||||
cfg: The config object.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
pass
|
||||
|
||||
def add(self, data: str) -> str:
|
||||
"""
|
||||
Adds a data point to the memory. No action is taken in NoMemory.
|
||||
|
||||
Args:
|
||||
data: The data to add.
|
||||
|
||||
Returns: An empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
return None
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the memory. No action is taken in NoMemory.
|
||||
|
||||
Returns: An empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant: The number of relevant data to return.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
return None
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: An empty dictionary as there are no stats in NoMemory.
|
||||
"""
|
||||
return {}
|
||||
@@ -1,78 +0,0 @@
|
||||
import pinecone
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
pinecone_api_key = cfg.pinecone_api_key
|
||||
pinecone_region = cfg.pinecone_region
|
||||
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
|
||||
dimension = 1536
|
||||
metric = "cosine"
|
||||
pod_type = "p1"
|
||||
table_name = "auto-gpt"
|
||||
# this assumes we don't start with memory.
|
||||
# for now this works.
|
||||
# we'll need a more complicated and robust system if we want to start with
|
||||
# memory.
|
||||
self.vec_num = 0
|
||||
|
||||
try:
|
||||
pinecone.whoami()
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO CONNECT TO PINECONE",
|
||||
Fore.RED,
|
||||
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
||||
)
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Pinecone properly for use."
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
||||
"https://docs.agpt.co/configuration/memory/#pinecone-api-key-setup"
|
||||
f"{Style.RESET_ALL} to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if table_name not in pinecone.list_indexes():
|
||||
logger.typewriter_log(
|
||||
"Connecting Pinecone. This may take some time...", Fore.MAGENTA, ""
|
||||
)
|
||||
pinecone.create_index(
|
||||
table_name, dimension=dimension, metric=metric, pod_type=pod_type
|
||||
)
|
||||
self.index = pinecone.Index(table_name)
|
||||
|
||||
def add(self, data):
|
||||
vector = get_ada_embedding(data)
|
||||
# no metadata here. We may wish to change that long term.
|
||||
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
||||
self.vec_num += 1
|
||||
return _text
|
||||
|
||||
def get(self, data):
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self):
|
||||
self.index.delete(deleteAll=True)
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
:param data: The data to compare to.
|
||||
:param num_relevant: The number of relevant data to return. Defaults to 5
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
results = self.index.query(
|
||||
query_embedding, top_k=num_relevant, include_metadata=True
|
||||
)
|
||||
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
||||
return [str(item["metadata"]["raw_text"]) for item in sorted_results]
|
||||
|
||||
def get_stats(self):
|
||||
return self.index.describe_index_stats()
|
||||
@@ -1,156 +0,0 @@
|
||||
"""Redis memory provider."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import redis
|
||||
from colorama import Fore, Style
|
||||
from redis.commands.search.field import TextField, VectorField
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
from redis.commands.search.query import Query
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
SCHEMA = [
|
||||
TextField("data"),
|
||||
VectorField(
|
||||
"embedding",
|
||||
"HNSW",
|
||||
{"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class RedisMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
"""
|
||||
Initializes the Redis memory provider.
|
||||
|
||||
Args:
|
||||
cfg: The config object.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
redis_host = cfg.redis_host
|
||||
redis_port = cfg.redis_port
|
||||
redis_password = cfg.redis_password
|
||||
self.dimension = 1536
|
||||
self.redis = redis.Redis(
|
||||
host=redis_host,
|
||||
port=redis_port,
|
||||
password=redis_password,
|
||||
db=0, # Cannot be changed
|
||||
)
|
||||
self.cfg = cfg
|
||||
|
||||
# Check redis connection
|
||||
try:
|
||||
self.redis.ping()
|
||||
except redis.ConnectionError as e:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO CONNECT TO REDIS",
|
||||
Fore.RED,
|
||||
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
||||
)
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Redis properly for use. "
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
||||
f"https://docs.agpt.co/configuration/memory/#redis-setup{Style.RESET_ALL}"
|
||||
" to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if cfg.wipe_redis_on_start:
|
||||
self.redis.flushall()
|
||||
try:
|
||||
self.redis.ft(f"{cfg.memory_index}").create_index(
|
||||
fields=SCHEMA,
|
||||
definition=IndexDefinition(
|
||||
prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error creating Redis search index: ", e)
|
||||
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
||||
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
||||
|
||||
def add(self, data: str) -> str:
|
||||
"""
|
||||
Adds a data point to the memory.
|
||||
|
||||
Args:
|
||||
data: The data to add.
|
||||
|
||||
Returns: Message indicating that the data has been added.
|
||||
"""
|
||||
if "Command Error:" in data:
|
||||
return ""
|
||||
vector = get_ada_embedding(data)
|
||||
vector = np.array(vector).astype(np.float32).tobytes()
|
||||
data_dict = {b"data": data, "embedding": vector}
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
|
||||
_text = (
|
||||
f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}"
|
||||
)
|
||||
self.vec_num += 1
|
||||
pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num)
|
||||
pipe.execute()
|
||||
return _text
|
||||
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.redis.flushall()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant: The number of relevant data to return.
|
||||
|
||||
Returns: A list of the most relevant data.
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
|
||||
query = (
|
||||
Query(base_query)
|
||||
.return_fields("data", "vector_score")
|
||||
.sort_by("vector_score")
|
||||
.dialect(2)
|
||||
)
|
||||
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
|
||||
|
||||
try:
|
||||
results = self.redis.ft(f"{self.cfg.memory_index}").search(
|
||||
query, query_params={"vector": query_vector}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error calling Redis search: ", e)
|
||||
return None
|
||||
return [result.data for result in results.docs]
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: The stats of the memory index.
|
||||
"""
|
||||
return self.redis.ft(f"{self.cfg.memory_index}").info()
|
||||
138
autogpt/memory/vector/__init__.py
Normal file
138
autogpt/memory/vector/__init__.py
Normal file
@@ -0,0 +1,138 @@
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
from .memory_item import MemoryItem, MemoryItemRelevance
|
||||
from .providers.base import VectorMemoryProvider as VectorMemory
|
||||
from .providers.json_file import JSONFileMemory
|
||||
from .providers.no_memory import NoMemory
|
||||
|
||||
# List of supported memory backends
|
||||
# Add a backend to this list if the import attempt is successful
|
||||
supported_memory = ["json_file", "no_memory"]
|
||||
|
||||
# try:
|
||||
# from .providers.redis import RedisMemory
|
||||
|
||||
# supported_memory.append("redis")
|
||||
# except ImportError:
|
||||
# RedisMemory = None
|
||||
|
||||
# try:
|
||||
# from .providers.pinecone import PineconeMemory
|
||||
|
||||
# supported_memory.append("pinecone")
|
||||
# except ImportError:
|
||||
# PineconeMemory = None
|
||||
|
||||
# try:
|
||||
# from .providers.weaviate import WeaviateMemory
|
||||
|
||||
# supported_memory.append("weaviate")
|
||||
# except ImportError:
|
||||
# WeaviateMemory = None
|
||||
|
||||
# try:
|
||||
# from .providers.milvus import MilvusMemory
|
||||
|
||||
# supported_memory.append("milvus")
|
||||
# except ImportError:
|
||||
# MilvusMemory = None
|
||||
|
||||
|
||||
def get_memory(cfg: Config, init=False) -> VectorMemory:
|
||||
memory = None
|
||||
|
||||
match cfg.memory_backend:
|
||||
case "json_file":
|
||||
memory = JSONFileMemory(cfg)
|
||||
|
||||
case "pinecone":
|
||||
raise NotImplementedError(
|
||||
"The Pinecone memory backend has been rendered incompatible by work on "
|
||||
"the memory system, and was removed. Whether support will be added back "
|
||||
"in the future is subject to discussion, feel free to pitch in: "
|
||||
"https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
|
||||
)
|
||||
# if not PineconeMemory:
|
||||
# logger.warn(
|
||||
# "Error: Pinecone is not installed. Please install pinecone"
|
||||
# " to use Pinecone as a memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = PineconeMemory(cfg)
|
||||
# if init:
|
||||
# memory.clear()
|
||||
|
||||
case "redis":
|
||||
raise NotImplementedError(
|
||||
"The Redis memory backend has been rendered incompatible by work on "
|
||||
"the memory system, and has been removed temporarily."
|
||||
)
|
||||
# if not RedisMemory:
|
||||
# logger.warn(
|
||||
# "Error: Redis is not installed. Please install redis-py to"
|
||||
# " use Redis as a memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = RedisMemory(cfg)
|
||||
|
||||
case "weaviate":
|
||||
raise NotImplementedError(
|
||||
"The Weaviate memory backend has been rendered incompatible by work on "
|
||||
"the memory system, and was removed. Whether support will be added back "
|
||||
"in the future is subject to discussion, feel free to pitch in: "
|
||||
"https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
|
||||
)
|
||||
# if not WeaviateMemory:
|
||||
# logger.warn(
|
||||
# "Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
# " use Weaviate as a memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = WeaviateMemory(cfg)
|
||||
|
||||
case "milvus":
|
||||
raise NotImplementedError(
|
||||
"The Milvus memory backend has been rendered incompatible by work on "
|
||||
"the memory system, and was removed. Whether support will be added back "
|
||||
"in the future is subject to discussion, feel free to pitch in: "
|
||||
"https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
|
||||
)
|
||||
# if not MilvusMemory:
|
||||
# logger.warn(
|
||||
# "Error: pymilvus sdk is not installed."
|
||||
# "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = MilvusMemory(cfg)
|
||||
|
||||
case "no_memory":
|
||||
memory = NoMemory()
|
||||
|
||||
case _:
|
||||
raise ValueError(
|
||||
f"Unknown memory backend '{cfg.memory_backend}'. Please check your config."
|
||||
)
|
||||
|
||||
if memory is None:
|
||||
memory = JSONFileMemory(cfg)
|
||||
|
||||
return memory
|
||||
|
||||
|
||||
def get_supported_memory_backends():
|
||||
return supported_memory
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_memory",
|
||||
"MemoryItem",
|
||||
"MemoryItemRelevance",
|
||||
"JSONFileMemory",
|
||||
"NoMemory",
|
||||
"VectorMemory",
|
||||
# "RedisMemory",
|
||||
# "PineconeMemory",
|
||||
# "MilvusMemory",
|
||||
# "WeaviateMemory",
|
||||
]
|
||||
223
autogpt/memory/vector/memory_item.py
Normal file
223
autogpt/memory/vector/memory_item.py
Normal file
@@ -0,0 +1,223 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import json
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm import Message
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.logs import logger
|
||||
from autogpt.processing.text import chunk_content, split_text, summarize_text
|
||||
|
||||
from .utils import Embedding, get_embedding
|
||||
|
||||
MemoryDocType = Literal["webpage", "text_file", "code_file", "agent_history"]
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class MemoryItem:
|
||||
"""Memory object containing raw content as well as embeddings"""
|
||||
|
||||
raw_content: str
|
||||
summary: str
|
||||
chunks: list[str]
|
||||
chunk_summaries: list[str]
|
||||
e_summary: Embedding
|
||||
e_chunks: list[Embedding]
|
||||
metadata: dict
|
||||
|
||||
def relevance_for(self, query: str, e_query: Embedding | None = None):
|
||||
return MemoryItemRelevance.of(self, query, e_query)
|
||||
|
||||
@staticmethod
|
||||
def from_text(
|
||||
text: str,
|
||||
source_type: MemoryDocType,
|
||||
metadata: dict = {},
|
||||
how_to_summarize: str | None = None,
|
||||
question_for_summary: str | None = None,
|
||||
):
|
||||
cfg = Config()
|
||||
logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n")
|
||||
|
||||
chunks = [
|
||||
chunk
|
||||
for chunk, _ in (
|
||||
split_text(text, cfg.embedding_model)
|
||||
if source_type != "code_file"
|
||||
else chunk_content(text, cfg.embedding_model)
|
||||
)
|
||||
]
|
||||
logger.debug("Chunks: " + str(chunks))
|
||||
|
||||
chunk_summaries = [
|
||||
summary
|
||||
for summary, _ in [
|
||||
summarize_text(
|
||||
text_chunk,
|
||||
instruction=how_to_summarize,
|
||||
question=question_for_summary,
|
||||
)
|
||||
for text_chunk in chunks
|
||||
]
|
||||
]
|
||||
logger.debug("Chunk summaries: " + str(chunk_summaries))
|
||||
|
||||
e_chunks = get_embedding(chunks)
|
||||
|
||||
summary = (
|
||||
chunk_summaries[0]
|
||||
if len(chunks) == 1
|
||||
else summarize_text(
|
||||
"\n\n".join(chunk_summaries),
|
||||
instruction=how_to_summarize,
|
||||
question=question_for_summary,
|
||||
)[0]
|
||||
)
|
||||
logger.debug("Total summary: " + summary)
|
||||
|
||||
# TODO: investigate search performance of weighted average vs summary
|
||||
# e_average = np.average(e_chunks, axis=0, weights=[len(c) for c in chunks])
|
||||
e_summary = get_embedding(summary)
|
||||
|
||||
metadata["source_type"] = source_type
|
||||
|
||||
return MemoryItem(
|
||||
text,
|
||||
summary,
|
||||
chunks,
|
||||
chunk_summaries,
|
||||
e_summary,
|
||||
e_chunks,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_text_file(content: str, path: str):
|
||||
return MemoryItem.from_text(content, "text_file", {"location": path})
|
||||
|
||||
@staticmethod
|
||||
def from_code_file(content: str, path: str):
|
||||
# TODO: implement tailored code memories
|
||||
return MemoryItem.from_text(content, "code_file", {"location": path})
|
||||
|
||||
@staticmethod
|
||||
def from_ai_action(ai_message: Message, result_message: Message):
|
||||
# The result_message contains either user feedback
|
||||
# or the result of the command specified in ai_message
|
||||
|
||||
if ai_message["role"] != "assistant":
|
||||
raise ValueError(f"Invalid role on 'ai_message': {ai_message['role']}")
|
||||
|
||||
result = (
|
||||
result_message["content"]
|
||||
if result_message["content"].startswith("Command")
|
||||
else "None"
|
||||
)
|
||||
user_input = (
|
||||
result_message["content"]
|
||||
if result_message["content"].startswith("Human feedback")
|
||||
else "None"
|
||||
)
|
||||
memory_content = (
|
||||
f"Assistant Reply: {ai_message['content']}"
|
||||
"\n\n"
|
||||
f"Result: {result}"
|
||||
"\n\n"
|
||||
f"Human Feedback: {user_input}"
|
||||
)
|
||||
|
||||
return MemoryItem.from_text(
|
||||
text=memory_content,
|
||||
source_type="agent_history",
|
||||
how_to_summarize="if possible, also make clear the link between the command in the assistant's response and the command result. Do not mention the human feedback if there is none",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_webpage(content: str, url: str, question: str | None = None):
|
||||
return MemoryItem.from_text(
|
||||
text=content,
|
||||
source_type="webpage",
|
||||
metadata={"location": url},
|
||||
question_for_summary=question,
|
||||
)
|
||||
|
||||
def dump(self) -> str:
|
||||
token_length = count_string_tokens(self.raw_content, Config().embedding_model)
|
||||
return f"""
|
||||
=============== MemoryItem ===============
|
||||
Length: {token_length} tokens in {len(self.e_chunks)} chunks
|
||||
Metadata: {json.dumps(self.metadata, indent=2)}
|
||||
---------------- SUMMARY -----------------
|
||||
{self.summary}
|
||||
------------------ RAW -------------------
|
||||
{self.raw_content}
|
||||
==========================================
|
||||
"""
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class MemoryItemRelevance:
|
||||
"""
|
||||
Class that encapsulates memory relevance search functionality and data.
|
||||
Instances contain a MemoryItem and its relevance scores for a given query.
|
||||
"""
|
||||
|
||||
memory_item: MemoryItem
|
||||
for_query: str
|
||||
summary_relevance_score: float
|
||||
chunk_relevance_scores: list[float]
|
||||
|
||||
@staticmethod
|
||||
def of(
|
||||
memory_item: MemoryItem, for_query: str, e_query: Embedding | None = None
|
||||
) -> MemoryItemRelevance:
|
||||
e_query = e_query or get_embedding(for_query)
|
||||
_, srs, crs = MemoryItemRelevance.calculate_scores(memory_item, e_query)
|
||||
return MemoryItemRelevance(
|
||||
for_query=for_query,
|
||||
memory_item=memory_item,
|
||||
summary_relevance_score=srs,
|
||||
chunk_relevance_scores=crs,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def calculate_scores(
|
||||
memory: MemoryItem, compare_to: Embedding
|
||||
) -> tuple[float, float, list[float]]:
|
||||
"""
|
||||
Calculates similarity between given embedding and all embeddings of the memory
|
||||
|
||||
Returns:
|
||||
float: the aggregate (max) relevance score of the memory
|
||||
float: the relevance score of the memory summary
|
||||
list: the relevance scores of the memory chunks
|
||||
"""
|
||||
summary_relevance_score = np.dot(memory.e_summary, compare_to)
|
||||
chunk_relevance_scores = np.dot(memory.e_chunks, compare_to)
|
||||
logger.debug(f"Relevance of summary: {summary_relevance_score}")
|
||||
logger.debug(f"Relevance of chunks: {chunk_relevance_scores}")
|
||||
|
||||
relevance_scores = [summary_relevance_score, *chunk_relevance_scores]
|
||||
logger.debug(f"Relevance scores: {relevance_scores}")
|
||||
return max(relevance_scores), summary_relevance_score, chunk_relevance_scores
|
||||
|
||||
@property
|
||||
def score(self) -> float:
|
||||
"""The aggregate relevance score of the memory item for the given query"""
|
||||
return max([self.summary_relevance_score, *self.chunk_relevance_scores])
|
||||
|
||||
@property
|
||||
def most_relevant_chunk(self) -> tuple[str, float]:
|
||||
"""The most relevant chunk of the memory item + its score for the given query"""
|
||||
i_relmax = np.argmax(self.chunk_relevance_scores)
|
||||
return self.memory_item.chunks[i_relmax], self.chunk_relevance_scores[i_relmax]
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
f"{self.memory_item.summary} ({self.summary_relevance_score}) "
|
||||
f"{self.chunk_relevance_scores}"
|
||||
)
|
||||
7
autogpt/memory/vector/providers/__init__.py
Normal file
7
autogpt/memory/vector/providers/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from .json_file import JSONFileMemory
|
||||
from .no_memory import NoMemory
|
||||
|
||||
__all__ = [
|
||||
"JSONFileMemory",
|
||||
"NoMemory",
|
||||
]
|
||||
74
autogpt/memory/vector/providers/base.py
Normal file
74
autogpt/memory/vector/providers/base.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import abc
|
||||
import functools
|
||||
from typing import MutableSet, Sequence
|
||||
|
||||
import numpy as np
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.singleton import AbstractSingleton
|
||||
|
||||
from .. import MemoryItem, MemoryItemRelevance
|
||||
from ..utils import Embedding, get_embedding
|
||||
|
||||
|
||||
class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
|
||||
@abc.abstractmethod
|
||||
def __init__(self, config: Config):
|
||||
pass
|
||||
|
||||
def get(self, query: str) -> MemoryItemRelevance | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given query.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant Memory
|
||||
"""
|
||||
result = self.get_relevant(query, 1)
|
||||
return result[0] if result else None
|
||||
|
||||
def get_relevant(self, query: str, k: int) -> Sequence[MemoryItemRelevance]:
|
||||
"""
|
||||
Returns the top-k most relevant memories for the given query
|
||||
|
||||
Args:
|
||||
query: the query to compare stored memories to
|
||||
k: the number of relevant memories to fetch
|
||||
|
||||
Returns:
|
||||
list[MemoryItemRelevance] containing the top [k] relevant memories
|
||||
"""
|
||||
if len(self) < 1:
|
||||
return []
|
||||
|
||||
logger.debug(
|
||||
f"Searching for {k} relevant memories for query '{query}'; "
|
||||
f"{len(self)} memories in index"
|
||||
)
|
||||
|
||||
relevances = self.score_memories_for_relevance(query)
|
||||
logger.debug(f"Memory relevance scores: {[str(r) for r in relevances]}")
|
||||
|
||||
# take last k items and reverse
|
||||
top_k_indices = np.argsort([r.score for r in relevances])[-k:][::-1]
|
||||
|
||||
return [relevances[i] for i in top_k_indices]
|
||||
|
||||
def score_memories_for_relevance(
|
||||
self, for_query: str
|
||||
) -> Sequence[MemoryItemRelevance]:
|
||||
"""
|
||||
Returns MemoryItemRelevance for every memory in the index.
|
||||
Implementations may override this function for performance purposes.
|
||||
"""
|
||||
e_query: Embedding = get_embedding(for_query)
|
||||
return [m.relevance_for(for_query, e_query) for m in self]
|
||||
|
||||
def get_stats(self) -> tuple[int, int]:
|
||||
"""
|
||||
Returns:
|
||||
tuple (n_memories: int, n_chunks: int): the stats of the memory index
|
||||
"""
|
||||
return len(self), functools.reduce(lambda t, m: t + len(m.e_chunks), self, 0)
|
||||
68
autogpt/memory/vector/providers/json_file.py
Normal file
68
autogpt/memory/vector/providers/json_file.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Iterator
|
||||
|
||||
import orjson
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
from ..memory_item import MemoryItem
|
||||
from .base import VectorMemoryProvider
|
||||
|
||||
|
||||
class JSONFileMemory(VectorMemoryProvider):
|
||||
"""Memory backend that stores memories in a JSON file"""
|
||||
|
||||
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
||||
|
||||
file_path: Path
|
||||
memories: list[MemoryItem]
|
||||
|
||||
def __init__(self, cfg: Config) -> None:
|
||||
"""Initialize a class instance
|
||||
|
||||
Args:
|
||||
cfg: Config object
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
workspace_path = Path(cfg.workspace_path)
|
||||
self.file_path = workspace_path / f"{cfg.memory_index}.json"
|
||||
self.file_path.touch()
|
||||
logger.debug(f"Initialized {__name__} with index path {self.file_path}")
|
||||
|
||||
self.memories = []
|
||||
self.save_index()
|
||||
|
||||
def __iter__(self) -> Iterator[MemoryItem]:
|
||||
return iter(self.memories)
|
||||
|
||||
def __contains__(self, x: MemoryItem) -> bool:
|
||||
return x in self.memories
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.memories)
|
||||
|
||||
def add(self, item: MemoryItem):
|
||||
self.memories.append(item)
|
||||
self.save_index()
|
||||
return len(self.memories)
|
||||
|
||||
def discard(self, item: MemoryItem):
|
||||
try:
|
||||
self.remove(item)
|
||||
except:
|
||||
pass
|
||||
|
||||
def clear(self):
|
||||
"""Clears the data in memory."""
|
||||
self.memories.clear()
|
||||
self.save_index()
|
||||
|
||||
def save_index(self):
|
||||
logger.debug(f"Saving memory index to file {self.file_path}")
|
||||
with self.file_path.open("wb") as f:
|
||||
return f.write(orjson.dumps(self.memories, option=self.SAVE_OPTIONS))
|
||||
36
autogpt/memory/vector/providers/no_memory.py
Normal file
36
autogpt/memory/vector/providers/no_memory.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""A class that does not store any data. This is the default memory provider."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Iterator, Optional
|
||||
|
||||
from autogpt.config.config import Config
|
||||
|
||||
from .. import MemoryItem
|
||||
from .base import VectorMemoryProvider
|
||||
|
||||
|
||||
class NoMemory(VectorMemoryProvider):
|
||||
"""
|
||||
A class that does not store any data. This is the default memory provider.
|
||||
"""
|
||||
|
||||
def __init__(self, config: Optional[Config] = None):
|
||||
pass
|
||||
|
||||
def __iter__(self) -> Iterator[MemoryItem]:
|
||||
return iter([])
|
||||
|
||||
def __contains__(self, x: MemoryItem) -> bool:
|
||||
return False
|
||||
|
||||
def __len__(self) -> int:
|
||||
return 0
|
||||
|
||||
def add(self, item: MemoryItem):
|
||||
pass
|
||||
|
||||
def discard(self, item: MemoryItem):
|
||||
pass
|
||||
|
||||
def clear(self):
|
||||
pass
|
||||
70
autogpt/memory/vector/utils.py
Normal file
70
autogpt/memory/vector/utils.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from typing import Any, overload
|
||||
|
||||
import numpy as np
|
||||
import openai
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.utils import metered, retry_openai_api
|
||||
from autogpt.logs import logger
|
||||
|
||||
Embedding = list[np.float32] | np.ndarray[Any, np.dtype[np.float32]]
|
||||
"""Embedding vector"""
|
||||
TText = list[int]
|
||||
"""Token array representing text"""
|
||||
|
||||
|
||||
@overload
|
||||
def get_embedding(input: str | TText) -> Embedding:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def get_embedding(input: list[str] | list[TText]) -> list[Embedding]:
|
||||
...
|
||||
|
||||
|
||||
@metered
|
||||
@retry_openai_api()
|
||||
def get_embedding(
|
||||
input: str | TText | list[str] | list[TText],
|
||||
) -> Embedding | list[Embedding]:
|
||||
"""Get an embedding from the ada model.
|
||||
|
||||
Args:
|
||||
input: Input text to get embeddings for, encoded as a string or array of tokens.
|
||||
Multiple inputs may be given as a list of strings or token arrays.
|
||||
|
||||
Returns:
|
||||
List[float]: The embedding.
|
||||
"""
|
||||
cfg = Config()
|
||||
multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input)
|
||||
|
||||
if isinstance(input, str):
|
||||
input = input.replace("\n", " ")
|
||||
elif multiple and isinstance(input[0], str):
|
||||
input = [text.replace("\n", " ") for text in input]
|
||||
|
||||
model = cfg.embedding_model
|
||||
if cfg.use_azure:
|
||||
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
|
||||
logger.debug(
|
||||
f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
|
||||
f" with model '{model}'"
|
||||
+ (f" via Azure deployment '{kwargs['engine']}'" if cfg.use_azure else "")
|
||||
)
|
||||
|
||||
embeddings = openai.Embedding.create(
|
||||
input=input,
|
||||
api_key=cfg.openai_api_key,
|
||||
**kwargs,
|
||||
).data
|
||||
|
||||
if not multiple:
|
||||
return embeddings[0]["embedding"]
|
||||
|
||||
embeddings = sorted(embeddings, key=lambda x: x["index"])
|
||||
return [d["embedding"] for d in embeddings]
|
||||
@@ -1,127 +0,0 @@
|
||||
import weaviate
|
||||
from weaviate import Client
|
||||
from weaviate.embedded import EmbeddedOptions
|
||||
from weaviate.util import generate_uuid5
|
||||
|
||||
from autogpt.llm import get_ada_embedding
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
||||
def default_schema(weaviate_index):
|
||||
return {
|
||||
"class": weaviate_index,
|
||||
"properties": [
|
||||
{
|
||||
"name": "raw_text",
|
||||
"dataType": ["text"],
|
||||
"description": "original text for the embedding",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class WeaviateMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
auth_credentials = self._build_auth_credentials(cfg)
|
||||
|
||||
url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}"
|
||||
|
||||
if cfg.use_weaviate_embedded:
|
||||
self.client = Client(
|
||||
embedded_options=EmbeddedOptions(
|
||||
hostname=cfg.weaviate_host,
|
||||
port=int(cfg.weaviate_port),
|
||||
persistence_data_path=cfg.weaviate_embedded_path,
|
||||
)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
|
||||
)
|
||||
else:
|
||||
self.client = Client(url, auth_client_secret=auth_credentials)
|
||||
|
||||
self.index = WeaviateMemory.format_classname(cfg.memory_index)
|
||||
self._create_schema()
|
||||
|
||||
@staticmethod
|
||||
def format_classname(index):
|
||||
# weaviate uses capitalised index names
|
||||
# The python client uses the following code to format
|
||||
# index names before the corresponding class is created
|
||||
index = index.replace("-", "_")
|
||||
if len(index) == 1:
|
||||
return index.capitalize()
|
||||
return index[0].capitalize() + index[1:]
|
||||
|
||||
def _create_schema(self):
|
||||
schema = default_schema(self.index)
|
||||
if not self.client.schema.contains(schema):
|
||||
self.client.schema.create_class(schema)
|
||||
|
||||
def _build_auth_credentials(self, cfg):
|
||||
if cfg.weaviate_username and cfg.weaviate_password:
|
||||
return weaviate.AuthClientPassword(
|
||||
cfg.weaviate_username, cfg.weaviate_password
|
||||
)
|
||||
if cfg.weaviate_api_key:
|
||||
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
|
||||
else:
|
||||
return None
|
||||
|
||||
def add(self, data):
|
||||
vector = get_ada_embedding(data)
|
||||
|
||||
doc_uuid = generate_uuid5(data, self.index)
|
||||
data_object = {"raw_text": data}
|
||||
|
||||
with self.client.batch as batch:
|
||||
batch.add_data_object(
|
||||
uuid=doc_uuid,
|
||||
data_object=data_object,
|
||||
class_name=self.index,
|
||||
vector=vector,
|
||||
)
|
||||
|
||||
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
|
||||
|
||||
def get(self, data):
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self):
|
||||
self.client.schema.delete_all()
|
||||
|
||||
# weaviate does not yet have a neat way to just remove the items in an index
|
||||
# without removing the entire schema, therefore we need to re-create it
|
||||
# after a call to delete_all
|
||||
self._create_schema()
|
||||
|
||||
return "Obliterated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
query_embedding = get_ada_embedding(data)
|
||||
try:
|
||||
results = (
|
||||
self.client.query.get(self.index, ["raw_text"])
|
||||
.with_near_vector({"vector": query_embedding, "certainty": 0.7})
|
||||
.with_limit(num_relevant)
|
||||
.do()
|
||||
)
|
||||
|
||||
if len(results["data"]["Get"][self.index]) > 0:
|
||||
return [
|
||||
str(item["raw_text"]) for item in results["data"]["Get"][self.index]
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
except Exception as err:
|
||||
logger.warn(f"Unexpected error {err=}, {type(err)=}")
|
||||
return []
|
||||
|
||||
def get_stats(self):
|
||||
result = self.client.query.aggregate(self.index).with_meta_count().do()
|
||||
class_data = result["data"]["Aggregate"][self.index]
|
||||
|
||||
return class_data[0]["meta"] if class_data else {}
|
||||
@@ -1,33 +0,0 @@
|
||||
from autogpt.json_utils.utilities import (
|
||||
LLM_DEFAULT_RESPONSE_FORMAT,
|
||||
is_string_valid_json,
|
||||
)
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
def format_memory(assistant_reply, next_message_content):
|
||||
# the next_message_content is a variable to stores either the user_input or the command following the assistant_reply
|
||||
result = (
|
||||
"None" if next_message_content.startswith("Command") else next_message_content
|
||||
)
|
||||
user_input = (
|
||||
"None"
|
||||
if next_message_content.startswith("Human feedback")
|
||||
else next_message_content
|
||||
)
|
||||
|
||||
return f"Assistant Reply: {assistant_reply}\nResult: {result}\nHuman Feedback:{user_input}"
|
||||
|
||||
|
||||
def save_memory_trimmed_from_context_window(
|
||||
full_message_history, next_message_to_add_index, permanent_memory
|
||||
):
|
||||
while next_message_to_add_index >= 0:
|
||||
message_content = full_message_history[next_message_to_add_index]["content"]
|
||||
if is_string_valid_json(message_content, LLM_DEFAULT_RESPONSE_FORMAT):
|
||||
next_message = full_message_history[next_message_to_add_index + 1]
|
||||
memory_to_add = format_memory(message_content, next_message["content"])
|
||||
logger.debug(f"Storing the following memory: {memory_to_add}")
|
||||
permanent_memory.add(memory_to_add)
|
||||
|
||||
next_message_to_add_index -= 1
|
||||
@@ -1,143 +0,0 @@
|
||||
import copy
|
||||
import json
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.llm_utils import create_chat_completion
|
||||
from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME
|
||||
from autogpt.logs import logger
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def get_newly_trimmed_messages(
|
||||
full_message_history: List[Dict[str, str]],
|
||||
current_context: List[Dict[str, str]],
|
||||
last_memory_index: int,
|
||||
) -> Tuple[List[Dict[str, str]], int]:
|
||||
"""
|
||||
This function returns a list of dictionaries contained in full_message_history
|
||||
with an index higher than prev_index that are absent from current_context.
|
||||
|
||||
Args:
|
||||
full_message_history (list): A list of dictionaries representing the full message history.
|
||||
current_context (list): A list of dictionaries representing the current context.
|
||||
last_memory_index (int): An integer representing the previous index.
|
||||
|
||||
Returns:
|
||||
list: A list of dictionaries that are in full_message_history with an index higher than last_memory_index and absent from current_context.
|
||||
int: The new index value for use in the next loop.
|
||||
"""
|
||||
# Select messages in full_message_history with an index higher than last_memory_index
|
||||
new_messages = [
|
||||
msg for i, msg in enumerate(full_message_history) if i > last_memory_index
|
||||
]
|
||||
|
||||
# Remove messages that are already present in current_context
|
||||
new_messages_not_in_context = [
|
||||
msg for msg in new_messages if msg not in current_context
|
||||
]
|
||||
|
||||
# Find the index of the last message processed
|
||||
new_index = last_memory_index
|
||||
if new_messages_not_in_context:
|
||||
last_message = new_messages_not_in_context[-1]
|
||||
new_index = full_message_history.index(last_message)
|
||||
|
||||
return new_messages_not_in_context, new_index
|
||||
|
||||
|
||||
def update_running_summary(
|
||||
agent: Agent, current_memory: str, new_events: List[Dict[str, str]]
|
||||
) -> str:
|
||||
"""
|
||||
This function takes a list of dictionaries representing new events and combines them with the current summary,
|
||||
focusing on key and potentially important information to remember. The updated summary is returned in a message
|
||||
formatted in the 1st person past tense.
|
||||
|
||||
Args:
|
||||
new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary.
|
||||
|
||||
Returns:
|
||||
str: A message containing the updated summary of actions, formatted in the 1st person past tense.
|
||||
|
||||
Example:
|
||||
new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}]
|
||||
update_running_summary(new_events)
|
||||
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
|
||||
"""
|
||||
# Create a copy of the new_events list to prevent modifying the original list
|
||||
new_events = copy.deepcopy(new_events)
|
||||
|
||||
# Replace "assistant" with "you". This produces much better first person past tense results.
|
||||
for event in new_events:
|
||||
if event["role"].lower() == "assistant":
|
||||
event["role"] = "you"
|
||||
|
||||
# Remove "thoughts" dictionary from "content"
|
||||
try:
|
||||
content_dict = json.loads(event["content"])
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
event["content"] = json.dumps(content_dict)
|
||||
except json.decoder.JSONDecodeError:
|
||||
if cfg.debug_mode:
|
||||
logger.error(f"Error: Invalid JSON: {event['content']}\n")
|
||||
|
||||
elif event["role"].lower() == "system":
|
||||
event["role"] = "your computer"
|
||||
|
||||
# Delete all user messages
|
||||
elif event["role"] == "user":
|
||||
new_events.remove(event)
|
||||
|
||||
# This can happen at any point during execution, not just the beginning
|
||||
if len(new_events) == 0:
|
||||
new_events = "Nothing new happened."
|
||||
|
||||
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
|
||||
|
||||
You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
|
||||
|
||||
Summary So Far:
|
||||
"""
|
||||
{current_memory}
|
||||
"""
|
||||
|
||||
Latest Development:
|
||||
"""
|
||||
{new_events}
|
||||
"""
|
||||
'''
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
]
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
messages,
|
||||
PROMPT_SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
current_memory = create_chat_completion(messages, cfg.fast_llm_model)
|
||||
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
current_memory,
|
||||
SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
message_to_return = {
|
||||
"role": "system",
|
||||
"content": f"This reminds you of these events from your past: \n{current_memory}",
|
||||
}
|
||||
|
||||
return message_to_return
|
||||
@@ -68,7 +68,6 @@ class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
prompt (PromptGenerator): The prompt generator.
|
||||
messages (List[str]): The list of messages.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_planning(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
@@ -116,7 +115,6 @@ class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
Returns:
|
||||
Optional[str]: The resulting message.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
@@ -196,4 +194,56 @@ class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_text_embedding(self, text: str) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the text_embedding method.
|
||||
Args:
|
||||
text (str): The text to be convert to embedding.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the text_embedding method."""
|
||||
return False
|
||||
|
||||
def handle_text_embedding(self, text: str) -> list:
|
||||
"""This method is called when the chat completion is done.
|
||||
Args:
|
||||
text (str): The text to be convert to embedding.
|
||||
Returns:
|
||||
list: The text embedding.
|
||||
"""
|
||||
|
||||
def can_handle_user_input(self, user_input: str) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the user_input method.
|
||||
|
||||
Args:
|
||||
user_input (str): The user input.
|
||||
|
||||
Returns:
|
||||
bool: True if the plugin can handle the user_input method."""
|
||||
return False
|
||||
|
||||
def user_input(self, user_input: str) -> str:
|
||||
"""This method is called to request user input to the user.
|
||||
|
||||
Args:
|
||||
user_input (str): The question or prompt to ask the user.
|
||||
|
||||
Returns:
|
||||
str: The user input.
|
||||
"""
|
||||
|
||||
def can_handle_report(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the report method.
|
||||
|
||||
Returns:
|
||||
bool: True if the plugin can handle the report method."""
|
||||
return False
|
||||
|
||||
def report(self, message: str) -> None:
|
||||
"""This method is called to report a message to the user.
|
||||
|
||||
Args:
|
||||
message (str): The message to report.
|
||||
"""
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
"""Handles loading of plugins."""
|
||||
|
||||
import importlib
|
||||
import importlib.util
|
||||
import json
|
||||
import os
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import List
|
||||
from urllib.parse import urlparse
|
||||
from zipimport import zipimporter
|
||||
|
||||
import openapi_python_client
|
||||
import requests
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.cli import Config as OpenAPIConfig
|
||||
from openapi_python_client.config import Config as OpenAPIConfig
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
@@ -152,7 +152,7 @@ def initialize_openai_plugins(
|
||||
)
|
||||
prev_cwd = Path.cwd()
|
||||
os.chdir(openai_plugin_client_dir)
|
||||
Path("ai-plugin.json")
|
||||
|
||||
if not os.path.exists("client"):
|
||||
client_results = openapi_python_client.create_new_client(
|
||||
url=manifest_spec["manifest"]["api"]["url"],
|
||||
@@ -170,9 +170,13 @@ def initialize_openai_plugins(
|
||||
"client", "client/client/client.py"
|
||||
)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
try:
|
||||
spec.loader.exec_module(module)
|
||||
finally:
|
||||
os.chdir(prev_cwd)
|
||||
|
||||
client = module.Client(base_url=url)
|
||||
os.chdir(prev_cwd)
|
||||
manifest_spec["client"] = client
|
||||
return manifests_specs
|
||||
|
||||
@@ -262,10 +266,14 @@ def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
|
||||
True or False
|
||||
"""
|
||||
logger.debug(f"Checking if plugin {plugin_name} should be loaded")
|
||||
if plugin_name in cfg.plugins_denylist:
|
||||
if (
|
||||
plugin_name in cfg.plugins_denylist
|
||||
or "all" in cfg.plugins_denylist
|
||||
or "none" in cfg.plugins_allowlist
|
||||
):
|
||||
logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.")
|
||||
return False
|
||||
if plugin_name in cfg.plugins_allowlist:
|
||||
if plugin_name in cfg.plugins_allowlist or "all" in cfg.plugins_allowlist:
|
||||
logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.")
|
||||
return True
|
||||
ack = input(
|
||||
|
||||
@@ -1,170 +1,234 @@
|
||||
"""Text processing functions"""
|
||||
from typing import Dict, Generator, Optional
|
||||
from math import ceil
|
||||
from typing import Optional
|
||||
|
||||
import spacy
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
import tiktoken
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm import count_message_tokens, create_chat_completion
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.providers.openai import OPEN_AI_MODELS
|
||||
from autogpt.llm.utils import count_string_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.utils import batch
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
|
||||
model_max_input_tokens = OPEN_AI_MODELS[model].max_tokens - 1
|
||||
if max is not None and max > 0:
|
||||
return min(max, model_max_input_tokens)
|
||||
return model_max_input_tokens
|
||||
|
||||
|
||||
def must_chunk_content(
|
||||
text: str, for_model: str, max_chunk_length: Optional[int] = None
|
||||
) -> bool:
|
||||
return count_string_tokens(text, for_model) > _max_chunk_length(
|
||||
for_model, max_chunk_length
|
||||
)
|
||||
|
||||
|
||||
def chunk_content(
|
||||
content: str,
|
||||
for_model: str,
|
||||
max_chunk_length: Optional[int] = None,
|
||||
with_overlap=True,
|
||||
):
|
||||
"""Split content into chunks of approximately equal token length."""
|
||||
|
||||
MAX_OVERLAP = 200 # limit overlap to save tokens
|
||||
|
||||
if not must_chunk_content(content, for_model, max_chunk_length):
|
||||
yield content, count_string_tokens(content, for_model)
|
||||
return
|
||||
|
||||
max_chunk_length = max_chunk_length or _max_chunk_length(for_model)
|
||||
|
||||
tokenizer = tiktoken.encoding_for_model(for_model)
|
||||
|
||||
tokenized_text = tokenizer.encode(content)
|
||||
total_length = len(tokenized_text)
|
||||
n_chunks = ceil(total_length / max_chunk_length)
|
||||
|
||||
chunk_length = ceil(total_length / n_chunks)
|
||||
overlap = min(max_chunk_length - chunk_length, MAX_OVERLAP) if with_overlap else 0
|
||||
|
||||
for token_batch in batch(tokenized_text, chunk_length + overlap, overlap):
|
||||
yield tokenizer.decode(token_batch), len(token_batch)
|
||||
|
||||
|
||||
def summarize_text(
|
||||
text: str, instruction: Optional[str] = None, question: Optional[str] = None
|
||||
) -> tuple[str, None | list[tuple[str, str]]]:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
text (str): The text to summarize
|
||||
instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text"
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
list[(summary, chunk)]: Text chunks and their summary, if the text was chunked.
|
||||
None otherwise.
|
||||
"""
|
||||
if not text:
|
||||
raise ValueError("No text to summarize")
|
||||
|
||||
if instruction and question:
|
||||
raise ValueError("Parameters 'question' and 'instructions' cannot both be set")
|
||||
|
||||
model = CFG.fast_llm_model
|
||||
|
||||
if question:
|
||||
instruction = (
|
||||
f'include any information that can be used to answer the question "{question}". '
|
||||
"Do not directly answer the question itself"
|
||||
)
|
||||
|
||||
summarization_prompt = ChatSequence.for_model(model)
|
||||
|
||||
token_length = count_string_tokens(text, model)
|
||||
logger.info(f"Text length: {token_length} tokens")
|
||||
|
||||
# reserve 50 tokens for summary prompt, 500 for the response
|
||||
max_chunk_length = _max_chunk_length(model) - 550
|
||||
logger.info(f"Max chunk length: {max_chunk_length} tokens")
|
||||
|
||||
if not must_chunk_content(text, model, max_chunk_length):
|
||||
# summarization_prompt.add("user", text)
|
||||
summarization_prompt.add(
|
||||
"user",
|
||||
"Write a concise summary of the following text"
|
||||
f"{f'; {instruction}' if instruction is not None else ''}:"
|
||||
"\n\n\n"
|
||||
f'LITERAL TEXT: """{text}"""'
|
||||
"\n\n\n"
|
||||
"CONCISE SUMMARY: The text is best summarized as"
|
||||
# "Only respond with a concise summary or description of the user message."
|
||||
)
|
||||
|
||||
logger.debug(f"Summarizing with {model}:\n{summarization_prompt.dump()}\n")
|
||||
summary = create_chat_completion(
|
||||
summarization_prompt, temperature=0, max_tokens=500
|
||||
)
|
||||
|
||||
logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n")
|
||||
return summary.strip(), None
|
||||
|
||||
summaries: list[str] = []
|
||||
chunks = list(split_text(text, for_model=model, max_chunk_length=max_chunk_length))
|
||||
|
||||
for i, (chunk, chunk_length) in enumerate(chunks):
|
||||
logger.info(
|
||||
f"Summarizing chunk {i + 1} / {len(chunks)} of length {chunk_length} tokens"
|
||||
)
|
||||
summary, _ = summarize_text(chunk, instruction)
|
||||
summaries.append(summary)
|
||||
|
||||
logger.info(f"Summarized {len(chunks)} chunks")
|
||||
|
||||
summary, _ = summarize_text("\n\n".join(summaries))
|
||||
|
||||
return summary.strip(), [
|
||||
(summaries[i], chunks[i][0]) for i in range(0, len(chunks))
|
||||
]
|
||||
|
||||
|
||||
def split_text(
|
||||
text: str,
|
||||
max_length: int = CFG.browse_chunk_max_length,
|
||||
model: str = CFG.fast_llm_model,
|
||||
question: str = "",
|
||||
) -> Generator[str, None, None]:
|
||||
"""Split text into chunks of a maximum length
|
||||
for_model: str = CFG.fast_llm_model,
|
||||
with_overlap=True,
|
||||
max_chunk_length: Optional[int] = None,
|
||||
):
|
||||
"""Split text into chunks of sentences, with each chunk not exceeding the maximum length
|
||||
|
||||
Args:
|
||||
text (str): The text to split
|
||||
max_length (int, optional): The maximum length of each chunk. Defaults to 8192.
|
||||
for_model (str): The model to chunk for; determines tokenizer and constraints
|
||||
max_length (int, optional): The maximum length of each chunk
|
||||
|
||||
Yields:
|
||||
str: The next chunk of text
|
||||
|
||||
Raises:
|
||||
ValueError: If the text is longer than the maximum length
|
||||
ValueError: when a sentence is longer than the maximum length
|
||||
"""
|
||||
flattened_paragraphs = " ".join(text.split("\n"))
|
||||
nlp = spacy.load(CFG.browse_spacy_language_model)
|
||||
max_length = _max_chunk_length(for_model, max_chunk_length)
|
||||
|
||||
# flatten paragraphs to improve performance
|
||||
text = text.replace("\n", " ")
|
||||
text_length = count_string_tokens(text, for_model)
|
||||
|
||||
if text_length < max_length:
|
||||
yield text, text_length
|
||||
return
|
||||
|
||||
n_chunks = ceil(text_length / max_length)
|
||||
target_chunk_length = ceil(text_length / n_chunks)
|
||||
|
||||
nlp: spacy.language.Language = spacy.load(CFG.browse_spacy_language_model)
|
||||
nlp.add_pipe("sentencizer")
|
||||
doc = nlp(flattened_paragraphs)
|
||||
sentences = [sent.text.strip() for sent in doc.sents]
|
||||
doc = nlp(text)
|
||||
sentences = [sentence.text.strip() for sentence in doc.sents]
|
||||
|
||||
current_chunk = []
|
||||
current_chunk: list[str] = []
|
||||
current_chunk_length = 0
|
||||
last_sentence = None
|
||||
last_sentence_length = 0
|
||||
|
||||
for sentence in sentences:
|
||||
message_with_additional_sentence = [
|
||||
create_message(" ".join(current_chunk) + " " + sentence, question)
|
||||
]
|
||||
i = 0
|
||||
while i < len(sentences):
|
||||
sentence = sentences[i]
|
||||
sentence_length = count_string_tokens(sentence, for_model)
|
||||
expected_chunk_length = current_chunk_length + 1 + sentence_length
|
||||
|
||||
expected_token_usage = (
|
||||
count_message_tokens(messages=message_with_additional_sentence, model=model)
|
||||
+ 1
|
||||
)
|
||||
if expected_token_usage <= max_length:
|
||||
if (
|
||||
expected_chunk_length < max_length
|
||||
# try to create chunks of approximately equal size
|
||||
and expected_chunk_length - (sentence_length / 2) < target_chunk_length
|
||||
):
|
||||
current_chunk.append(sentence)
|
||||
else:
|
||||
yield " ".join(current_chunk)
|
||||
current_chunk = [sentence]
|
||||
message_this_sentence_only = [
|
||||
create_message(" ".join(current_chunk), question)
|
||||
current_chunk_length = expected_chunk_length
|
||||
|
||||
elif sentence_length < max_length:
|
||||
if last_sentence:
|
||||
yield " ".join(current_chunk), current_chunk_length
|
||||
current_chunk = []
|
||||
current_chunk_length = 0
|
||||
|
||||
if with_overlap:
|
||||
overlap_max_length = max_length - sentence_length - 1
|
||||
if last_sentence_length < overlap_max_length:
|
||||
current_chunk += [last_sentence]
|
||||
current_chunk_length += last_sentence_length + 1
|
||||
elif overlap_max_length > 5:
|
||||
# add as much from the end of the last sentence as fits
|
||||
current_chunk += [
|
||||
list(
|
||||
chunk_content(
|
||||
last_sentence,
|
||||
for_model,
|
||||
overlap_max_length,
|
||||
)
|
||||
).pop()[0],
|
||||
]
|
||||
current_chunk_length += overlap_max_length + 1
|
||||
|
||||
current_chunk += [sentence]
|
||||
current_chunk_length += sentence_length
|
||||
|
||||
else: # sentence longer than maximum length -> chop up and try again
|
||||
sentences[i : i + 1] = [
|
||||
chunk
|
||||
for chunk, _ in chunk_content(sentence, for_model, target_chunk_length)
|
||||
]
|
||||
expected_token_usage = (
|
||||
count_message_tokens(messages=message_this_sentence_only, model=model)
|
||||
+ 1
|
||||
)
|
||||
if expected_token_usage > max_length:
|
||||
raise ValueError(
|
||||
f"Sentence is too long in webpage: {expected_token_usage} tokens."
|
||||
)
|
||||
continue
|
||||
|
||||
i += 1
|
||||
last_sentence = sentence
|
||||
last_sentence_length = sentence_length
|
||||
|
||||
if current_chunk:
|
||||
yield " ".join(current_chunk)
|
||||
|
||||
|
||||
def summarize_text(
|
||||
url: str, text: str, question: str, driver: Optional[WebDriver] = None
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
url (str): The url of the text
|
||||
text (str): The text to summarize
|
||||
question (str): The question to ask the model
|
||||
driver (WebDriver): The webdriver to use to scroll the page
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
model = CFG.fast_llm_model
|
||||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
summaries = []
|
||||
chunks = list(
|
||||
split_text(
|
||||
text, max_length=CFG.browse_chunk_max_length, model=model, question=question
|
||||
),
|
||||
)
|
||||
scroll_ratio = 1 / len(chunks)
|
||||
|
||||
for i, chunk in enumerate(chunks):
|
||||
if driver:
|
||||
scroll_to_percentage(driver, scroll_ratio * i)
|
||||
logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||
|
||||
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
||||
|
||||
memory = get_memory(CFG)
|
||||
memory.add(memory_to_add)
|
||||
|
||||
messages = [create_message(chunk, question)]
|
||||
tokens_for_chunk = count_message_tokens(messages, model)
|
||||
logger.info(
|
||||
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
|
||||
)
|
||||
|
||||
summary = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
summaries.append(summary)
|
||||
logger.info(
|
||||
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
|
||||
)
|
||||
|
||||
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
logger.info(f"Summarized {len(chunks)} chunks.")
|
||||
|
||||
combined_summary = "\n".join(summaries)
|
||||
messages = [create_message(combined_summary, question)]
|
||||
|
||||
return create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
|
||||
def scroll_to_percentage(driver: WebDriver, ratio: float) -> None:
|
||||
"""Scroll to a percentage of the page
|
||||
|
||||
Args:
|
||||
driver (WebDriver): The webdriver to use
|
||||
ratio (float): The percentage to scroll to
|
||||
|
||||
Raises:
|
||||
ValueError: If the ratio is not between 0 and 1
|
||||
"""
|
||||
if ratio < 0 or ratio > 1:
|
||||
raise ValueError("Percentage should be between 0 and 1")
|
||||
driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
|
||||
|
||||
|
||||
def create_message(chunk: str, question: str) -> Dict[str, str]:
|
||||
"""Create a message for the chat completion
|
||||
|
||||
Args:
|
||||
chunk (str): The chunk of text to summarize
|
||||
question (str): The question to answer
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: The message to send to the chat completion
|
||||
"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the text,'
|
||||
" summarize the text.",
|
||||
}
|
||||
yield " ".join(current_chunk), current_chunk_length
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
""" A module for generating custom prompt strings."""
|
||||
import json
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
|
||||
|
||||
class PromptGenerator:
|
||||
@@ -19,7 +22,7 @@ class PromptGenerator:
|
||||
self.resources = []
|
||||
self.performance_evaluation = []
|
||||
self.goals = []
|
||||
self.command_registry = None
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
self.name = "Bob"
|
||||
self.role = "AI"
|
||||
self.response_format = {
|
||||
|
||||
@@ -2,7 +2,8 @@ from colorama import Fore
|
||||
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import ApiManager
|
||||
from autogpt.config.prompt_config import PromptConfig
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.setup import prompt_user
|
||||
@@ -27,46 +28,21 @@ def build_default_prompt_generator() -> PromptGenerator:
|
||||
# Initialize the PromptGenerator object
|
||||
prompt_generator = PromptGenerator()
|
||||
|
||||
# Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
|
||||
prompt_config = PromptConfig(CFG.prompt_settings_file)
|
||||
|
||||
# Add constraints to the PromptGenerator object
|
||||
prompt_generator.add_constraint(
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so"
|
||||
" immediately save important information to files."
|
||||
)
|
||||
prompt_generator.add_constraint(
|
||||
"If you are unsure how you previously did something or want to recall past"
|
||||
" events, thinking about similar events will help you remember."
|
||||
)
|
||||
prompt_generator.add_constraint("No user assistance")
|
||||
prompt_generator.add_constraint(
|
||||
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
||||
)
|
||||
for constraint in prompt_config.constraints:
|
||||
prompt_generator.add_constraint(constraint)
|
||||
|
||||
# Add resources to the PromptGenerator object
|
||||
prompt_generator.add_resource(
|
||||
"Internet access for searches and information gathering."
|
||||
)
|
||||
prompt_generator.add_resource("Long Term memory management.")
|
||||
prompt_generator.add_resource(
|
||||
"GPT-3.5 powered Agents for delegation of simple tasks."
|
||||
)
|
||||
prompt_generator.add_resource("File output.")
|
||||
for resource in prompt_config.resources:
|
||||
prompt_generator.add_resource(resource)
|
||||
|
||||
# Add performance evaluations to the PromptGenerator object
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Constructively self-criticize your big-picture behavior constantly."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Reflect on past decisions and strategies to refine your approach."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation("Write all code to a file.")
|
||||
for performance_evaluation in prompt_config.performance_evaluations:
|
||||
prompt_generator.add_performance_evaluation(performance_evaluation)
|
||||
|
||||
return prompt_generator
|
||||
|
||||
|
||||
@@ -108,6 +84,12 @@ Continue ({CFG.authorise_key}/{CFG.exit_key}): """
|
||||
config = prompt_user()
|
||||
config.save(CFG.ai_settings_file)
|
||||
|
||||
if CFG.restrict_to_workspace:
|
||||
logger.typewriter_log(
|
||||
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
|
||||
Fore.YELLOW,
|
||||
f"{CFG.workspace_path}",
|
||||
)
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(config.api_budget)
|
||||
|
||||
@@ -7,7 +7,8 @@ from jinja2 import Template
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.llm import create_chat_completion
|
||||
from autogpt.llm.base import ChatSequence, Message
|
||||
from autogpt.llm.chat import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
|
||||
@@ -175,17 +176,15 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig:
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
|
||||
).render(user_prompt=user_prompt)
|
||||
# Call LLM with the string as user input
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": system_prompt,
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt_ai_config_automatic,
|
||||
},
|
||||
]
|
||||
output = create_chat_completion(messages, CFG.fast_llm_model)
|
||||
output = create_chat_completion(
|
||||
ChatSequence.for_model(
|
||||
CFG.fast_llm_model,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("user", prompt_ai_config_automatic),
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
|
||||
@@ -20,5 +20,3 @@ class AbstractSingleton(abc.ABC, metaclass=Singleton):
|
||||
"""
|
||||
Abstract singleton class for ensuring only one instance of a class.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@@ -37,7 +37,6 @@ class VoiceBase(AbstractSingleton):
|
||||
"""
|
||||
Setup the voices, API key, etc.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _speech(self, text: str, voice_index: int = 0) -> bool:
|
||||
@@ -47,4 +46,3 @@ class VoiceBase(AbstractSingleton):
|
||||
Args:
|
||||
text (str): The text to play.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -12,7 +12,6 @@ class BrianSpeech(VoiceBase):
|
||||
|
||||
def _setup(self) -> None:
|
||||
"""Setup the voices, API key, etc."""
|
||||
pass
|
||||
|
||||
def _speech(self, text: str, _: int = 0) -> bool:
|
||||
"""Speak text using Brian with the streamelements API
|
||||
|
||||
@@ -4,7 +4,7 @@ import os
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
PLACEHOLDERS = {"your-voice-id"}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import threading
|
||||
from threading import Semaphore
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
from autogpt.speech.brian import BrianSpeech
|
||||
from autogpt.speech.eleven_labs import ElevenLabsSpeech
|
||||
|
||||
@@ -8,13 +8,20 @@ import time
|
||||
class Spinner:
|
||||
"""A simple spinner class"""
|
||||
|
||||
def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
message: str = "Loading...",
|
||||
delay: float = 0.1,
|
||||
plain_output: bool = False,
|
||||
) -> None:
|
||||
"""Initialize the spinner class
|
||||
|
||||
Args:
|
||||
message (str): The message to display.
|
||||
delay (float): The delay between each spinner update.
|
||||
plain_output (bool): Whether to display the spinner or not.
|
||||
"""
|
||||
self.plain_output = plain_output
|
||||
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
|
||||
self.delay = delay
|
||||
self.message = message
|
||||
@@ -23,11 +30,17 @@ class Spinner:
|
||||
|
||||
def spin(self) -> None:
|
||||
"""Spin the spinner"""
|
||||
if self.plain_output:
|
||||
self.print_message()
|
||||
return
|
||||
while self.running:
|
||||
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
|
||||
sys.stdout.flush()
|
||||
self.print_message()
|
||||
time.sleep(self.delay)
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
||||
|
||||
def print_message(self):
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
||||
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
|
||||
sys.stdout.flush()
|
||||
|
||||
def __enter__(self):
|
||||
"""Start the spinner"""
|
||||
@@ -57,9 +70,7 @@ class Spinner:
|
||||
new_message (str): New message to display.
|
||||
delay (float): The delay in seconds between each spinner update.
|
||||
"""
|
||||
time.sleep(delay)
|
||||
sys.stdout.write(
|
||||
f"\r{' ' * (len(self.message) + 2)}\r"
|
||||
) # Clear the current message
|
||||
sys.stdout.flush()
|
||||
self.delay = delay
|
||||
self.message = new_message
|
||||
if self.plain_output:
|
||||
self.print_message()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import functools
|
||||
import re
|
||||
from typing import Any, Callable
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
@@ -23,13 +24,16 @@ def validate_url(func: Callable[..., Any]) -> Any:
|
||||
ValueError if the url fails any of the validation tests
|
||||
"""
|
||||
# Most basic check if the URL is valid:
|
||||
if not url.startswith("http://") and not url.startswith("https://"):
|
||||
if not re.match(r"^https?://", url):
|
||||
raise ValueError("Invalid URL format")
|
||||
if not is_valid_url(url):
|
||||
raise ValueError("Missing Scheme or Network location")
|
||||
# Restrict access to local files
|
||||
if check_local_file_access(url):
|
||||
raise ValueError("Access to local files is restricted")
|
||||
# Check URL length
|
||||
if len(url) > 2000:
|
||||
raise ValueError("URL is too long")
|
||||
|
||||
return func(sanitize_url(url), *args, **kwargs)
|
||||
|
||||
|
||||
@@ -6,15 +6,17 @@ import yaml
|
||||
from colorama import Fore, Style
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
# Use readline if available (for clean_input)
|
||||
try:
|
||||
import readline
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from autogpt.config import Config
|
||||
def batch(iterable, max_batch_length: int, overlap: int = 0):
|
||||
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
|
||||
# batched('ABCDEFG', 3) --> ABC DEF G
|
||||
if max_batch_length < 1:
|
||||
raise ValueError("n must be at least one")
|
||||
for i in range(0, len(iterable), max_batch_length - overlap):
|
||||
yield iterable[i : i + max_batch_length]
|
||||
|
||||
|
||||
def clean_input(prompt: str = "", talk=False):
|
||||
@@ -153,3 +155,24 @@ def markdown_to_ansi_style(markdown: str):
|
||||
|
||||
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
||||
return "\n".join(ansi_lines)
|
||||
|
||||
|
||||
def get_legal_warning() -> str:
|
||||
legal_text = """
|
||||
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
|
||||
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
|
||||
|
||||
## Introduction
|
||||
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
|
||||
|
||||
## No Liability for Actions of the System
|
||||
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
|
||||
|
||||
## User Responsibility and Respondeat Superior Liability
|
||||
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
|
||||
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
|
||||
|
||||
## Indemnification
|
||||
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
||||
"""
|
||||
return legal_text
|
||||
|
||||
@@ -120,7 +120,8 @@ class Workspace:
|
||||
|
||||
logger.debug(f"Resolved root as '{root}'")
|
||||
|
||||
if relative_path.is_absolute():
|
||||
# Allow exception for absolute paths if they are contained in your workspace directory.
|
||||
if relative_path.is_absolute() and not relative_path.is_relative_to(root):
|
||||
raise ValueError(
|
||||
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
|
||||
)
|
||||
|
||||
@@ -75,14 +75,13 @@ Needs improvement.
|
||||
Not what I need."""
|
||||
# TODO: add questions above, to distract it even more.
|
||||
|
||||
command = f"{sys.executable} -m autogpt"
|
||||
command = [sys.executable, "-m", "autogpt"]
|
||||
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
shell=True,
|
||||
)
|
||||
|
||||
stdout_output, stderr_output = process.communicate(input_data.encode())
|
||||
|
||||
@@ -3,7 +3,7 @@ import logging
|
||||
|
||||
from autogpt.commands.file_operations import ingest_file, list_files
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.memory.vector import VectorMemory, get_memory
|
||||
|
||||
cfg = Config()
|
||||
|
||||
@@ -21,14 +21,14 @@ def configure_logging():
|
||||
return logging.getLogger("AutoGPT-Ingestion")
|
||||
|
||||
|
||||
def ingest_directory(directory, memory, args):
|
||||
def ingest_directory(directory: str, memory: VectorMemory, args):
|
||||
"""
|
||||
Ingest all files in a directory by calling the ingest_file function for each file.
|
||||
|
||||
:param directory: The directory containing the files to ingest
|
||||
:param memory: An object with an add() method to store the chunks in memory
|
||||
"""
|
||||
global logger
|
||||
logger = logging.getLogger("AutoGPT-Ingestion")
|
||||
try:
|
||||
files = list_files(directory)
|
||||
for file in files:
|
||||
|
||||
@@ -5,17 +5,9 @@ version: "3.9"
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
depends_on:
|
||||
- redis
|
||||
build: ./
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ./:/app
|
||||
profiles: ["exclude-from-up"]
|
||||
|
||||
redis:
|
||||
image: "redis/redis-stack-server:latest"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Creating Challenges for AutoGPT
|
||||
# Creating Challenges for Auto-GPT
|
||||
|
||||
🏹 We're on the hunt for talented Challenge Creators! 🎯
|
||||
|
||||
@@ -14,7 +14,7 @@ Are you ready to play a pivotal role in Auto-GPT's journey? Apply now to become
|
||||
|
||||
|
||||
# Getting Started
|
||||
Clone the original AutoGPT repo and checkout to master branch
|
||||
Clone the original Auto-GPT repo and checkout to master branch
|
||||
|
||||
|
||||
The challenges are not written using a specific framework. They try to be very agnostic
|
||||
@@ -33,7 +33,7 @@ Create your agent fixture.
|
||||
|
||||
```python
|
||||
def kubernetes_agent(
|
||||
agent_test_config, memory_local_cache, workspace: Workspace
|
||||
agent_test_config, memory_json_file, workspace: Workspace
|
||||
):
|
||||
# Please choose the commands your agent will need to beat the challenges, the full list is available in the main.py
|
||||
# (we 're working on a better way to design this, for now you have to look at main.py)
|
||||
@@ -56,7 +56,7 @@ def kubernetes_agent(
|
||||
agent = Agent(
|
||||
# We also give the AI a name
|
||||
ai_name="Kubernetes-Demo",
|
||||
memory=memory_local_cache,
|
||||
memory=memory_json_file,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
@@ -101,7 +101,6 @@ def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||
@pytest.mark.skip("This challenge hasn't been beaten yet.")
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@run_multiple_times(3)
|
||||
def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs
|
||||
@@ -131,5 +130,3 @@ def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> Non
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
# Information Retrieval Challenge A
|
||||
|
||||
**Status**: Current level to beat: level 1
|
||||
**Status**: Current level to beat: level 2
|
||||
|
||||
**Command to try**:
|
||||
|
||||
```
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py --level=2
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
The agent's goal is to find the revenue of Tesla in 2022.
|
||||
The agent's goal is to find the revenue of Tesla:
|
||||
- level 1 asks the revenue of Tesla in 2022 and explicitly asks to search for 'tesla revenue 2022'
|
||||
- level 2 is identical but doesn't ask to search for 'tesla revenue 2022'
|
||||
- level 3 asks for tesla's revenue by year since its creation.
|
||||
|
||||
It should write the result in a file called output.txt.
|
||||
|
||||
|
||||
22
docs/challenges/information_retrieval/challenge_b.md
Normal file
22
docs/challenges/information_retrieval/challenge_b.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Information Retrieval Challenge B
|
||||
|
||||
**Status**: Beaten
|
||||
|
||||
**Command to try**:
|
||||
|
||||
```
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_b.py
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
The agent's goal is to find the names, affiliated university, and discovery of the individuals who won the nobel prize for physics in 2010.
|
||||
|
||||
It should write the result in a file called 2010_nobel_prize_winners.txt.
|
||||
|
||||
The agent should be able to beat this test consistently (this is the hardest part).
|
||||
|
||||
## Objective
|
||||
|
||||
The objective of this challenge is to test the agent's ability to retrieve multiple pieces of related information in a consistent way.
|
||||
The agent should not use google to perform the task, because it should already know the answer. This why the task fails after 2 cycles (1 cycle to retrieve information, 1 cycle to write the file)
|
||||
@@ -1,3 +1,9 @@
|
||||
!!! warning
|
||||
The Pinecone, Milvus and Weaviate memory backends were rendered incompatible
|
||||
by work on the memory system, and have been removed in `master`.
|
||||
Whether support will be added back in the future is subject to discussion,
|
||||
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
|
||||
|
||||
## Setting Your Cache Type
|
||||
|
||||
By default, Auto-GPT set up with Docker Compose will use Redis as its memory backend.
|
||||
@@ -6,7 +12,7 @@ Otherwise, the default is LocalCache (which stores memory in a JSON file).
|
||||
To switch to a different backend, change the `MEMORY_BACKEND` in `.env`
|
||||
to the value that you want:
|
||||
|
||||
* `local` uses a local JSON cache file
|
||||
* `json_file` uses a local JSON cache file
|
||||
* `pinecone` uses the Pinecone.io account you configured in your ENV settings
|
||||
* `redis` will use the redis cache that you configured
|
||||
* `milvus` will use the milvus cache that you configured
|
||||
|
||||
@@ -2,4 +2,6 @@
|
||||
|
||||
Welcome to Auto-GPT. Please follow the [Installation](/setup/) guide to get started.
|
||||
|
||||
It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data.
|
||||
NOTE: It is recommended to use a virtual machine/container (docker) for tasks that require high security measures to prevent any potential harm to the main computer's system and data. If you are considering to use Auto-GPT outside a virtualized/containerized environment, you are *strongly* advised to use a separate user account just for running Auto-GPT. This is even more important if you are going to allow Auto-GPT to write/execute scripts and run shell commands!
|
||||
|
||||
It is for these reasons that executing python scripts is explicitly disabled when running outside a container environment.
|
||||
|
||||
@@ -34,40 +34,43 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
|
||||
### Set up with Docker
|
||||
|
||||
1. Make sure you have Docker installed, see [requirements](#requirements)
|
||||
2. Pull the latest image from [Docker Hub]
|
||||
2. Create a project directory for Auto-GPT
|
||||
|
||||
:::shell
|
||||
docker pull significantgravitas/auto-gpt
|
||||
mkdir Auto-GPT
|
||||
cd Auto-GPT
|
||||
|
||||
3. Create a folder for Auto-GPT
|
||||
4. In the folder, create a file called `docker-compose.yml` with the following contents:
|
||||
3. In the project directory, create a file called `docker-compose.yml` with the following contents:
|
||||
|
||||
:::yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
auto-gpt:
|
||||
image: significantgravitas/auto-gpt
|
||||
depends_on:
|
||||
- redis
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
profiles: ["exclude-from-up"]
|
||||
volumes:
|
||||
- ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace
|
||||
- ./data:/app/data
|
||||
## allow auto-gpt to write logs to disk
|
||||
- ./logs:/app/logs
|
||||
## uncomment following lines if you have / want to make use of these files
|
||||
#- ./azure.yaml:/app/azure.yaml
|
||||
#- ./ai_settings.yaml:/app/ai_settings.yaml
|
||||
redis:
|
||||
image: "redis/redis-stack-server:latest"
|
||||
## uncomment following lines if you want to make use of these files
|
||||
## you must have them existing in the same folder as this docker-compose.yml
|
||||
#- type: bind
|
||||
# source: ./azure.yaml
|
||||
# target: /app/azure.yaml
|
||||
#- type: bind
|
||||
# source: ./ai_settings.yaml
|
||||
# target: /app/ai_settings.yaml
|
||||
|
||||
5. Create the necessary [configuration](#configuration) files. If needed, you can find
|
||||
4. Create the necessary [configuration](#configuration) files. If needed, you can find
|
||||
templates in the [repository].
|
||||
5. Pull the latest image from [Docker Hub]
|
||||
|
||||
:::shell
|
||||
docker pull significantgravitas/auto-gpt
|
||||
|
||||
6. Continue to [Run with Docker](#run-with-docker)
|
||||
|
||||
!!! note "Docker only supports headless browsing"
|
||||
@@ -101,7 +104,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
|
||||
### Set up without Git/Docker
|
||||
|
||||
!!! warning
|
||||
We recommend to use Git or Docker, to make updating easier.
|
||||
We recommend to use Git or Docker, to make updating easier. Also note that some features such as Python execution will only work inside docker for security reasons.
|
||||
|
||||
1. Download `Source code (zip)` from the [latest stable release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest)
|
||||
2. Extract the zip-file into a folder
|
||||
@@ -138,9 +141,9 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
|
||||
|
||||
:::yaml
|
||||
# Please specify all of these values as double-quoted strings
|
||||
# Replace string in angled brackets (<>) to your own ID
|
||||
# Replace string in angled brackets (<>) to your own deployment Name
|
||||
azure_model_map:
|
||||
fast_llm_model_deployment_id: "<my-fast-llm-deployment-id>"
|
||||
fast_llm_model_deployment_id: "<auto-gpt-deployment>"
|
||||
...
|
||||
|
||||
Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model.
|
||||
@@ -169,7 +172,7 @@ If you need to upgrade Docker Compose to a newer version, you can follow the ins
|
||||
|
||||
Once you have a recent version of docker-compose, run the commands below in your Auto-GPT folder.
|
||||
|
||||
1. Build the image. If you have pulled the image from Docker Hub, skip this step.
|
||||
1. Build the image. If you have pulled the image from Docker Hub, skip this step (NOTE: You *will* need to do this if you are modifying requirements.txt to add/remove depedencies like Python libs/frameworks)
|
||||
|
||||
:::shell
|
||||
docker-compose build auto-gpt
|
||||
@@ -211,6 +214,19 @@ docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuou
|
||||
|
||||
### Run without Docker
|
||||
|
||||
#### Create a Virtual Environment
|
||||
|
||||
Create a virtual environment to run in.
|
||||
|
||||
``` shell
|
||||
python -m venv venvAutoGPT
|
||||
source venvAutoGPT/bin/activate
|
||||
pip3 install --upgrade pip
|
||||
```
|
||||
|
||||
!!! warning
|
||||
Due to security reasons, certain features (like Python execution) will by default be disabled when running without docker. So, even if you want to run the program outside a docker container, you currently still need docker to actually run scripts.
|
||||
|
||||
Simply run the startup script in your terminal. This will install any necessary Python
|
||||
packages and launch Auto-GPT.
|
||||
|
||||
|
||||
@@ -23,10 +23,13 @@ Running with `--help` lists all the possible command line arguments you can pass
|
||||
Here are some common arguments you can use when running Auto-GPT:
|
||||
|
||||
* Run Auto-GPT with a different AI Settings file
|
||||
|
||||
:::shell
|
||||
./run.sh --ai-settings <filename>
|
||||
|
||||
``` shell
|
||||
./run.sh --ai-settings <filename>
|
||||
```
|
||||
* Run Auto-GPT with a different Prompt Settings file
|
||||
``` shell
|
||||
./run.sh --prompt-settings <filename>
|
||||
```
|
||||
* Specify a memory backend
|
||||
|
||||
:::shell
|
||||
|
||||
2
hooks/post-checkout
Normal file
2
hooks/post-checkout
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
git submodule update --init --remote --recursive
|
||||
4
hooks/post-rewrite
Normal file
4
hooks/post-rewrite
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
case "$1" in
|
||||
rebase) git submodule update --init --recursive ;;
|
||||
esac
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user