mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-24 01:14:22 +01:00
Merge branch 'master' of https://github.com/BillSchumacher/Auto-GPT into plugin-support
This commit is contained in:
@@ -5,15 +5,10 @@
|
||||
EXECUTE_LOCAL_COMMANDS=False
|
||||
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
|
||||
BROWSE_CHUNK_MAX_LENGTH=8192
|
||||
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
|
||||
BROWSE_SUMMARY_MAX_TOKEN=300
|
||||
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
AI_SETTINGS_FILE=ai_settings.yaml
|
||||
# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome).
|
||||
# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
@@ -54,6 +49,7 @@ SMART_TOKEN_LIMIT=8000
|
||||
# local - Default
|
||||
# pinecone - Pinecone (if configured)
|
||||
# redis - Redis (if configured)
|
||||
# milvus - Milvus (if configured)
|
||||
MEMORY_BACKEND=local
|
||||
|
||||
### PINECONE
|
||||
@@ -63,7 +59,7 @@ PINECONE_API_KEY=your-pinecone-api-key
|
||||
PINECONE_ENV=your-pinecone-region
|
||||
|
||||
### REDIS
|
||||
# REDIS_HOST - Redis host (Default: localhost)
|
||||
# REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
# REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PASSWORD - Redis password (Default: "")
|
||||
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)
|
||||
@@ -135,9 +131,16 @@ GITHUB_API_KEY=github_pat_123
|
||||
GITHUB_USERNAME=your-github-username
|
||||
|
||||
################################################################################
|
||||
### SEARCH PROVIDER
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome).
|
||||
# HEADLESS_BROWSER - Whether to run the browser in headless mode (defaults to True)
|
||||
# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# USE_WEB_BROWSER=chrome
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
### GOOGLE
|
||||
# GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
|
||||
4
.envrc
Normal file
4
.envrc
Normal file
@@ -0,0 +1,4 @@
|
||||
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
|
||||
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
|
||||
|
||||
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
|
||||
10
.flake8
10
.flake8
@@ -1,12 +1,12 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
extend-ignore = E203
|
||||
select = "E303, W293, W291, W292, E305, E231, E302"
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
*.pyc,
|
||||
.env
|
||||
venv/*
|
||||
.venv/*
|
||||
reports/*
|
||||
dist/*
|
||||
venv*/*,
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
|
||||
69
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
69
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -2,6 +2,20 @@ name: Bug report 🐛
|
||||
description: Create a bug report for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
@@ -28,14 +42,32 @@ body:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
- type: checkboxes
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: GPT-3 or GPT-4
|
||||
label: Which Operating System are you using?
|
||||
description: >
|
||||
Please select the operating system you were using to run Auto-GPT when this problem occurred.
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
- MacOS
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other (Please specify in your problem)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the limitations of GPT-3.5
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- label: I am using Auto-GPT with GPT-3 (GPT-3.5)
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce 🕹
|
||||
@@ -52,9 +84,34 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
description: |
|
||||
If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml.
|
||||
description: >
|
||||
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
|
||||
value: |
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your Logs 📒
|
||||
description: |
|
||||
Please include the log showing your error and the command that caused it, if applicable.
|
||||
You can copy it from your terminal or from `logs/activity.log`.
|
||||
This will help us understand your issue better!
|
||||
|
||||
<details>
|
||||
<summary><i>Example</i></summary>
|
||||
```log
|
||||
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
|
||||
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
|
||||
Traceback (most recent call last):
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
|
||||
self._interpret_response_line(
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
|
||||
raise self.handle_error_response(
|
||||
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
|
||||
```
|
||||
</details>
|
||||
value: |
|
||||
```log
|
||||
<insert your logs here>
|
||||
```
|
||||
|
||||
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,3 +1,10 @@
|
||||
<!-- ⚠️ At the moment any non-essential commands are not being merged.
|
||||
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
|
||||
We are expecting to ship plugin support within the week (PR #757).
|
||||
Resources:
|
||||
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
|
||||
-->
|
||||
|
||||
<!-- 📢 Announcement
|
||||
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
||||
|
||||
@@ -30,4 +37,4 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guide lines. -->
|
||||
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
|
||||
|
||||
15
.github/workflows/ci.yml
vendored
15
.github/workflows/ci.yml
vendored
@@ -6,7 +6,10 @@ on:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- '**'
|
||||
pull_request_target:
|
||||
branches:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -32,7 +35,15 @@ jobs:
|
||||
|
||||
- name: Lint with flake8
|
||||
continue-on-error: false
|
||||
run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302
|
||||
run: flake8
|
||||
|
||||
- name: Check black formatting
|
||||
continue-on-error: false
|
||||
run: black . --check
|
||||
|
||||
- name: Check isort formatting
|
||||
continue-on-error: false
|
||||
run: isort . --check
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
|
||||
28
.github/workflows/pr-label.yml
vendored
Normal file
28
.github/workflows/pr-label.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: "Pull Request auto-label"
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
pull_request_target:
|
||||
types: [opened, synchronize]
|
||||
concurrency:
|
||||
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
conflicts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Update PRs with conflict labels
|
||||
uses: eps1lon/actions-label-merge-conflict@releases/2.x
|
||||
with:
|
||||
dirtyLabel: "conflicts"
|
||||
#removeOnDirtyLabel: "PR: ready to ship"
|
||||
repoToken: "${{ secrets.GITHUB_TOKEN }}"
|
||||
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
|
||||
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -127,6 +127,7 @@ celerybeat.pid
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
@@ -158,3 +159,6 @@ vicuna-*
|
||||
.DS_Store
|
||||
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
10
.isort.cfg
10
.isort.cfg
@@ -1,10 +0,0 @@
|
||||
[settings]
|
||||
profile = black
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = True
|
||||
force_grid_wrap = 0
|
||||
use_parentheses = True
|
||||
ensure_newline_before_comments = True
|
||||
line_length = 88
|
||||
skip = venv,env,node_modules,.env,.venv,dist
|
||||
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
||||
@@ -1,36 +1,29 @@
|
||||
repos:
|
||||
- repo: https://github.com/sourcery-ai/sourcery
|
||||
rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags
|
||||
hooks:
|
||||
- id: sourcery
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v0.9.2
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
args: [ '--maxkb=500' ]
|
||||
args: ['--maxkb=500']
|
||||
- id: check-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: local
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort-local
|
||||
entry: isort
|
||||
language: python
|
||||
types: [ python ]
|
||||
exclude: .+/(dist|.venv|venv|build)/.+
|
||||
pass_filenames: true
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
name: black-local
|
||||
entry: black
|
||||
language: python
|
||||
types: [ python ]
|
||||
exclude: .+/(dist|.venv|venv|build)/.+
|
||||
pass_filenames: true
|
||||
language_version: python3.10
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
|
||||
2
BULLETIN.md
Normal file
2
BULLETIN.md
Normal file
@@ -0,0 +1,2 @@
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
@@ -20,6 +20,12 @@ This document provides guidelines and best practices to help you contribute effe
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
> ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates!
|
||||
|
||||
## Getting Started
|
||||
|
||||
To start contributing, follow these steps:
|
||||
|
||||
15
Dockerfile
15
Dockerfile
@@ -5,6 +5,16 @@ FROM python:3.11-slim
|
||||
RUN apt-get -y update
|
||||
RUN apt-get -y install git chromium-driver
|
||||
|
||||
# Install Xvfb and other dependencies for headless browser testing
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates
|
||||
|
||||
# Install Firefox / Chromium
|
||||
RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
|
||||
&& echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y chromium firefox-esr
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
@@ -17,8 +27,9 @@ RUN chown appuser:appuser /home/appuser
|
||||
USER appuser
|
||||
|
||||
# Copy the requirements.txt file and install the requirements
|
||||
COPY --chown=appuser:appuser requirements-docker.txt .
|
||||
RUN pip install --no-cache-dir --user -r requirements-docker.txt
|
||||
COPY --chown=appuser:appuser requirements.txt .
|
||||
RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
|
||||
pip install --no-cache-dir --user -r requirements.txt
|
||||
|
||||
# Copy the application files
|
||||
COPY --chown=appuser:appuser autogpt/ ./autogpt
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
"""Auto-GPT: A GPT powered AI Assistant"""
|
||||
import autogpt.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.cli.main()
|
||||
|
||||
|
||||
"""Main script for the autogpt package."""
|
||||
import logging
|
||||
|
||||
@@ -71,7 +78,3 @@ def main() -> None:
|
||||
triggering_prompt=triggering_prompt,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -130,10 +130,12 @@ class Agent:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
|
||||
if console_input.lower().rstrip() == "y":
|
||||
if console_input.lower().strip() == "y":
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
print("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith("y -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
from __future__ import annotations
|
||||
from typing import List
|
||||
|
||||
from typing import Union
|
||||
|
||||
|
||||
from autogpt.config.config import Config, Singleton
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.types.openai import Message
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import List, NoReturn, Union, Dict
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.audio_text import read_audio_from_file
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.evaluate_code import evaluate_code
|
||||
from autogpt.commands.execute_code import execute_python_file, execute_shell
|
||||
from autogpt.commands.execute_code import (
|
||||
execute_python_file,
|
||||
execute_shell,
|
||||
execute_shell_popen,
|
||||
)
|
||||
from autogpt.commands.file_operations import (
|
||||
append_to_file,
|
||||
delete_file,
|
||||
download_file,
|
||||
read_file,
|
||||
search_files,
|
||||
write_to_file,
|
||||
@@ -118,9 +124,8 @@ def execute_command(
|
||||
arguments (dict): The arguments for the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command"""
|
||||
memory = get_memory(CFG)
|
||||
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
|
||||
@@ -129,7 +134,7 @@ def execute_command(
|
||||
return cmd(**arguments)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name)
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return memory.add(arguments["string"])
|
||||
@@ -141,7 +146,15 @@ def execute_command(
|
||||
if not CFG.allow_downloads:
|
||||
return "Error: You do not have user authorization to download files locally."
|
||||
return download_file(arguments["url"], arguments["file"])
|
||||
|
||||
elif command_name == "execute_shell_popen":
|
||||
if CFG.execute_local_commands:
|
||||
return execute_shell_popen(arguments["command_line"])
|
||||
else:
|
||||
return (
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction."
|
||||
)
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
@@ -163,7 +176,7 @@ def execute_command(
|
||||
|
||||
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a google search
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
@@ -178,7 +191,7 @@ def get_text_summary(url: str, question: str) -> str:
|
||||
|
||||
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a google search
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
|
||||
137
autogpt/cli.py
Normal file
137
autogpt/cli.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import click
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
import logging
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.prompt import construct_prompt
|
||||
from autogpt.utils import get_latest_bulletin
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
create_config(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
)
|
||||
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
||||
ai_name = ""
|
||||
if not cfg.skip_news:
|
||||
motd = get_latest_bulletin()
|
||||
if motd:
|
||||
logger.typewriter_log("NEWS: ", Fore.GREEN, motd)
|
||||
system_prompt = construct_prompt()
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -13,7 +13,7 @@ CFG = Config()
|
||||
|
||||
|
||||
@command("execute_python_file", "Execute Python File", '"file": "<file>"')
|
||||
def execute_python_file(file: str):
|
||||
def execute_python_file(file: str) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
@@ -45,7 +45,10 @@ def execute_python_file(file: str):
|
||||
try:
|
||||
client = docker.from_env()
|
||||
|
||||
image_name = "python:3.10"
|
||||
# You can replace this with the desired Python image/version
|
||||
# You can find available Python images on Docker Hub:
|
||||
# https://hub.docker.com/_/python
|
||||
image_name = "python:3-alpine"
|
||||
try:
|
||||
client.images.get(image_name)
|
||||
print(f"Image '{image_name}' found locally")
|
||||
@@ -62,9 +65,6 @@ def execute_python_file(file: str):
|
||||
elif status:
|
||||
print(status)
|
||||
|
||||
# You can replace 'python:3.8' with the desired Python image/version
|
||||
# You can find available Python images on Docker Hub:
|
||||
# https://hub.docker.com/_/python
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
f"python {file}",
|
||||
@@ -135,6 +135,35 @@ def execute_shell(command_line: str) -> str:
|
||||
return output
|
||||
|
||||
|
||||
def execute_shell_popen(command_line) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
Args:
|
||||
command_line (str): The command line to execute
|
||||
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if str(WORKSPACE_PATH) not in current_dir:
|
||||
os.chdir(WORKSPACE_PATH)
|
||||
|
||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
||||
|
||||
do_not_show_output = subprocess.DEVNULL
|
||||
process = subprocess.Popen(
|
||||
command_line, shell=True, stdout=do_not_show_output, stderr=do_not_show_output
|
||||
)
|
||||
|
||||
# Change back to whatever the prior working dir was
|
||||
|
||||
os.chdir(current_dir)
|
||||
|
||||
return f"Subprocess started with PID:'{str(process.pid)}'"
|
||||
|
||||
|
||||
def we_are_running_in_a_docker_container() -> bool:
|
||||
"""Check if we are running in a Docker container
|
||||
|
||||
|
||||
@@ -5,10 +5,11 @@ import os
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
from typing import Generator, List
|
||||
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from requests.adapters import Retry
|
||||
from colorama import Fore, Back
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
@@ -72,9 +73,14 @@ def split_file(
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + overlap]
|
||||
chunk = content[start : end + overlap - 1]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
|
||||
if len(chunk) <= overlap:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
start += max_length - overlap
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ CFG = Config()
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
def clone_repository(repo_url: str, clone_path: str) -> str:
|
||||
"""Clone a github repository locally
|
||||
"""Clone a GitHub repository locally
|
||||
|
||||
Args:
|
||||
repo_url (str): The URL of the repository to clone
|
||||
|
||||
@@ -13,7 +13,7 @@ CFG = Config()
|
||||
|
||||
@command("google", "Google Search", '"query": "<search>"', not CFG.google_api_key)
|
||||
def google_search(query: str, num_results: int = 8) -> str:
|
||||
"""Return the results of a google search
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
query (str): The search query.
|
||||
@@ -45,7 +45,7 @@ def google_search(query: str, num_results: int = 8) -> str:
|
||||
"Configure google_api_key.",
|
||||
)
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a google search using the official Google API
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
Args:
|
||||
query (str): The search query.
|
||||
|
||||
@@ -58,9 +58,28 @@ def check_local_file_access(url: str) -> bool:
|
||||
"""
|
||||
local_prefixes = [
|
||||
"file:///",
|
||||
"file://localhost/",
|
||||
"file://localhost",
|
||||
"http://localhost",
|
||||
"http://localhost/",
|
||||
"https://localhost",
|
||||
"https://localhost/",
|
||||
"http://2130706433",
|
||||
"http://2130706433/",
|
||||
"https://2130706433",
|
||||
"https://2130706433/",
|
||||
"http://127.0.0.1/",
|
||||
"http://127.0.0.1",
|
||||
"https://127.0.0.1/",
|
||||
"https://127.0.0.1",
|
||||
"https://0.0.0.0/",
|
||||
"https://0.0.0.0",
|
||||
"http://0.0.0.0/",
|
||||
"http://0.0.0.0",
|
||||
"http://0000",
|
||||
"http://0000/",
|
||||
"https://0000",
|
||||
"https://0000/",
|
||||
]
|
||||
return any(url.startswith(prefix) for prefix in local_prefixes)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium import webdriver
|
||||
@@ -83,7 +84,15 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = webdriver.Safari(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if CFG.selenium_headless:
|
||||
options.add_argument("--headless")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=ChromeDriverManager().install(), options=options
|
||||
)
|
||||
|
||||
@@ -26,15 +26,14 @@ class Config(metaclass=Singleton):
|
||||
self.speak_mode = False
|
||||
self.skip_reprompt = False
|
||||
self.allow_downloads = False
|
||||
self.skip_news = False
|
||||
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
|
||||
self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300))
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "1"))
|
||||
@@ -90,7 +89,11 @@ class Config(metaclass=Singleton):
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
)
|
||||
|
||||
# User agent headers to use when browsing web
|
||||
# Selenium browser settings
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
|
||||
|
||||
# User agent header to use when making HTTP requests
|
||||
# Some websites might just completely deny request with an error code if
|
||||
# no user agent was found.
|
||||
self.user_agent = os.getenv(
|
||||
@@ -98,6 +101,7 @@ class Config(metaclass=Singleton):
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
@@ -197,10 +201,6 @@ class Config(metaclass=Singleton):
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
def set_browse_summary_max_token(self, value: int) -> None:
|
||||
"""Set the browse_website command summary max token value."""
|
||||
self.browse_summary_max_token = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
@@ -250,5 +250,5 @@ def check_openai_api_key() -> None:
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
)
|
||||
print("You can get your key from https://beta.openai.com/account/api-keys")
|
||||
print("You can get your key from https://platform.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""This module contains the argument parsing logic for the script."""
|
||||
import argparse
|
||||
"""Configurator module."""
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from colorama import Fore, Back, Style
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
@@ -10,72 +10,45 @@ from autogpt.memory import get_supported_memory_backends
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def parse_arguments() -> None:
|
||||
"""Parses the arguments passed to the script
|
||||
def create_config(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Returns:
|
||||
None
|
||||
Args:
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
gpt3only (bool): Whether to enable GPT3.5 only mode
|
||||
gpt4only (bool): Whether to enable GPT4 only mode
|
||||
memory_type (str): The type of memory backend to use
|
||||
browser_name (str): The name of the browser to use when using selenium to scrape the web
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Process arguments.")
|
||||
parser.add_argument(
|
||||
"--continuous", "-c", action="store_true", help="Enable Continuous Mode"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--continuous-limit",
|
||||
"-l",
|
||||
type=int,
|
||||
dest="continuous_limit",
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
|
||||
parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
|
||||
parser.add_argument(
|
||||
"--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
|
||||
)
|
||||
parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
|
||||
parser.add_argument(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
dest="memory_type",
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
dest="skip_reprompt",
|
||||
action="store_true",
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use-browser",
|
||||
"-b",
|
||||
dest="browser_name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
dest="ai_settings_file",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically"
|
||||
" skip the re-prompt.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--allow-downloads",
|
||||
action="store_true",
|
||||
dest="allow_downloads",
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_debug_mode(True)
|
||||
|
||||
if args.continuous:
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
@@ -86,31 +59,31 @@ def parse_arguments() -> None:
|
||||
)
|
||||
CFG.set_continuous_mode(True)
|
||||
|
||||
if args.continuous_limit:
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
CFG.set_continuous_limit(args.continuous_limit)
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if args.continuous_limit and not args.continuous:
|
||||
parser.error("--continuous-limit can only be used with --continuous")
|
||||
if continuous_limit and not continuous:
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
if args.speak:
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_speak_mode(True)
|
||||
|
||||
if args.gpt3only:
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
|
||||
if args.gpt4only:
|
||||
if gpt4only:
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
|
||||
if args.memory_type:
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
chosen = args.memory_type
|
||||
chosen = memory_type
|
||||
if chosen not in supported_memory:
|
||||
logger.typewriter_log(
|
||||
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
|
||||
@@ -121,12 +94,12 @@ def parse_arguments() -> None:
|
||||
else:
|
||||
CFG.memory_backend = chosen
|
||||
|
||||
if args.skip_reprompt:
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if args.ai_settings_file:
|
||||
file = args.ai_settings_file
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
@@ -139,7 +112,7 @@ def parse_arguments() -> None:
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if args.allow_downloads:
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
@@ -154,5 +127,8 @@ def parse_arguments() -> None:
|
||||
)
|
||||
CFG.allow_downloads = True
|
||||
|
||||
if args.browser_name:
|
||||
CFG.selenium_web_browser = args.browser_name
|
||||
if skip_news:
|
||||
CFG.skip_news = True
|
||||
|
||||
if browser_name:
|
||||
CFG.selenium_web_browser = browser_name
|
||||
@@ -45,7 +45,7 @@ def fix_json(json_string: str, schema: str) -> str:
|
||||
try:
|
||||
json.loads(result_string) # just check the validity
|
||||
return result_string
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
except json.JSONDecodeError:
|
||||
# Get the call stack:
|
||||
# import traceback
|
||||
# call_stack = traceback.format_exc()
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -10,10 +10,9 @@ CFG = Config()
|
||||
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
|
||||
from autogpt.json_fixes.parsing import (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets,
|
||||
fix_and_parse_json,
|
||||
)
|
||||
|
||||
from autogpt.json_fixes.parsing import fix_and_parse_json
|
||||
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
if assistant_reply_json == {}:
|
||||
|
||||
@@ -4,8 +4,10 @@ from __future__ import annotations
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
from colorama import Fore
|
||||
from regex import regex
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.auto_fix import fix_json
|
||||
from autogpt.json_fixes.bracket_termination import balance_braces
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import json
|
||||
|
||||
from jsonschema import Draft7Validator
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
@@ -4,11 +4,12 @@ import time
|
||||
from typing import List, Optional
|
||||
|
||||
import openai
|
||||
from colorama import Fore
|
||||
from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.types.openai import Message
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -70,6 +71,7 @@ def create_chat_completion(
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
num_retries = 10
|
||||
warned_user = False
|
||||
if CFG.debug_mode:
|
||||
print(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
@@ -112,6 +114,12 @@ def create_chat_completion(
|
||||
print(
|
||||
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
||||
)
|
||||
if not warned_user:
|
||||
logger.double_check(
|
||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
||||
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
|
||||
)
|
||||
warned_user = True
|
||||
except APIError as e:
|
||||
if e.http_status != 502:
|
||||
raise
|
||||
@@ -124,7 +132,17 @@ def create_chat_completion(
|
||||
)
|
||||
time.sleep(backoff)
|
||||
if response is None:
|
||||
logger.typewriter_log(
|
||||
"FAILED TO GET RESPONSE FROM OPENAI",
|
||||
Fore.RED,
|
||||
"Auto-GPT has failed to get a response from OpenAI's services. "
|
||||
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
|
||||
)
|
||||
logger.double_check()
|
||||
if CFG.debug_mode:
|
||||
raise RuntimeError(f"Failed to get response after {num_retries} retries")
|
||||
else:
|
||||
quit(1)
|
||||
resp = response.choices[0].message["content"]
|
||||
for plugin in CFG.plugins:
|
||||
if not plugin.can_handle_on_response():
|
||||
@@ -134,7 +152,7 @@ def create_chat_completion(
|
||||
|
||||
|
||||
def create_embedding_with_ada(text) -> list:
|
||||
"""Create a embedding with text-ada-002 using the OpenAI SDK"""
|
||||
"""Create an embedding with text-ada-002 using the OpenAI SDK"""
|
||||
num_retries = 10
|
||||
for attempt in range(num_retries):
|
||||
backoff = 2 ** (attempt + 2)
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import os
|
||||
from typing import Any
|
||||
from typing import Any, List
|
||||
|
||||
import numpy as np
|
||||
import orjson
|
||||
|
||||
@@ -40,7 +40,7 @@ class MilvusMemory(MemoryProviderSingleton):
|
||||
self.collection.load()
|
||||
|
||||
def add(self, data) -> str:
|
||||
"""Add a embedding of data into memory.
|
||||
"""Add an embedding of data into memory.
|
||||
|
||||
Args:
|
||||
data (str): The raw text to construct embedding index.
|
||||
|
||||
@@ -43,9 +43,18 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
else:
|
||||
self.client = Client(url, auth_client_secret=auth_credentials)
|
||||
|
||||
self.index = cfg.memory_index
|
||||
self.index = WeaviateMemory.format_classname(cfg.memory_index)
|
||||
self._create_schema()
|
||||
|
||||
@staticmethod
|
||||
def format_classname(index):
|
||||
# weaviate uses capitalised index names
|
||||
# The python client uses the following code to format
|
||||
# index names before the corresponding class is created
|
||||
if len(index) == 1:
|
||||
return index.capitalize()
|
||||
return index[0].capitalize() + index[1:]
|
||||
|
||||
def _create_schema(self):
|
||||
schema = default_schema(self.index)
|
||||
if not self.client.schema.contains(schema):
|
||||
|
||||
@@ -80,7 +80,6 @@ def summarize_text(
|
||||
summary = create_chat_completion(
|
||||
model=CFG.fast_llm_model,
|
||||
messages=messages,
|
||||
max_tokens=CFG.browse_summary_max_token,
|
||||
)
|
||||
summaries.append(summary)
|
||||
print(f"Added chunk {i + 1} summary to memory")
|
||||
@@ -97,7 +96,6 @@ def summarize_text(
|
||||
return create_chat_completion(
|
||||
model=CFG.fast_llm_model,
|
||||
messages=messages,
|
||||
max_tokens=CFG.browse_summary_max_token,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Setup the AI and its goals"""
|
||||
"""Set up the AI and its goals"""
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
@@ -17,6 +17,13 @@ def prompt_user() -> AIConfig:
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
speak_text=True,
|
||||
|
||||
@@ -14,7 +14,7 @@ class BrianSpeech(VoiceBase):
|
||||
"""Setup the voices, API key, etc."""
|
||||
pass
|
||||
|
||||
def _speech(self, text: str) -> bool:
|
||||
def _speech(self, text: str, _: int = 0) -> bool:
|
||||
"""Speak text using Brian with the streamelements API
|
||||
|
||||
Args:
|
||||
|
||||
@@ -14,7 +14,7 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
"""ElevenLabs speech class"""
|
||||
|
||||
def _setup(self) -> None:
|
||||
"""Setup the voices, API key, etc.
|
||||
"""Set up the voices, API key, etc.
|
||||
|
||||
Returns:
|
||||
None: None
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import os
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
@@ -37,3 +40,28 @@ def readable_file_size(size, decimal_places=2):
|
||||
break
|
||||
size /= 1024.0
|
||||
return f"{size:.{decimal_places}f} {unit}"
|
||||
|
||||
|
||||
def get_bulletin_from_web() -> str:
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.text
|
||||
except:
|
||||
return ""
|
||||
|
||||
|
||||
def get_latest_bulletin() -> str:
|
||||
exists = os.path.exists("CURRENT_BULLETIN.md")
|
||||
current_bulletin = ""
|
||||
if exists:
|
||||
current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read()
|
||||
new_bulletin = get_bulletin_from_web()
|
||||
is_new_news = new_bulletin != current_bulletin
|
||||
|
||||
if new_bulletin and is_new_news:
|
||||
open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}"
|
||||
return current_bulletin
|
||||
|
||||
@@ -35,7 +35,7 @@ def safe_path_join(base: Path, *paths: str | Path) -> Path:
|
||||
"""
|
||||
joined_path = base.joinpath(*paths).resolve()
|
||||
|
||||
if not str(joined_path.absolute()).startswith(str(base.absolute())):
|
||||
if not joined_path.is_relative_to(base):
|
||||
raise ValueError(
|
||||
f"Attempted to access path '{joined_path}' outside of working directory '{base}'."
|
||||
)
|
||||
|
||||
@@ -483,7 +483,7 @@ How to Become a Freelance Artificial Intelligence Engineer
|
||||
|
||||
Springboard
|
||||
https://www.springboard.com › Blog › Data Science
|
||||
29/10/2021 — There are numerous freelancing platforms where you can kick start your career as a freelance artificial intelligence engineer.
|
||||
29/10/2021 — There are numerous freelancing platforms where you can kick-start your career as a freelance artificial intelligence engineer.
|
||||
More to ask
|
||||
Is AI good for freelancing?
|
||||
What business can I start with AI?
|
||||
|
||||
@@ -8,4 +8,33 @@ readme = "README.md"
|
||||
line-length = 88
|
||||
target-version = ['py310']
|
||||
include = '\.pyi?$'
|
||||
extend-exclude = ""
|
||||
packages = ["autogpt"]
|
||||
extend-exclude = '.+/(dist|.venv|venv|build)/.+'
|
||||
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
force_grid_wrap = 0
|
||||
use_parentheses = true
|
||||
ensure_newline_before_comments = true
|
||||
line_length = 88
|
||||
sections = [
|
||||
"FUTURE",
|
||||
"STDLIB",
|
||||
"THIRDPARTY",
|
||||
"FIRSTPARTY",
|
||||
"LOCALFOLDER"
|
||||
]
|
||||
skip = '''
|
||||
.tox
|
||||
__pycache__
|
||||
*.pyc
|
||||
.env
|
||||
venv*/*
|
||||
.venv/*
|
||||
reports/*
|
||||
dist/*
|
||||
|
||||
'''
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
beautifulsoup4
|
||||
colorama==0.4.6
|
||||
openai==0.27.2
|
||||
playsound==1.2.2
|
||||
python-dotenv==1.0.0
|
||||
pyyaml==6.0
|
||||
readability-lxml==0.8.1
|
||||
requests
|
||||
tiktoken==0.3.3
|
||||
gTTS==2.3.1
|
||||
docker
|
||||
duckduckgo-search
|
||||
google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
|
||||
pinecone-client==2.2.1
|
||||
redis
|
||||
orjson
|
||||
Pillow
|
||||
selenium
|
||||
webdriver-manager
|
||||
coverage
|
||||
flake8
|
||||
numpy
|
||||
pre-commit
|
||||
black
|
||||
isort
|
||||
gitpython==3.1.31
|
||||
tweepy
|
||||
@@ -19,6 +19,7 @@ selenium
|
||||
webdriver-manager
|
||||
jsonschema
|
||||
tweepy
|
||||
click
|
||||
|
||||
##Dev
|
||||
coverage
|
||||
@@ -32,6 +33,8 @@ gitpython==3.1.31
|
||||
abstract-singleton
|
||||
auto-gpt-plugin-template
|
||||
|
||||
# Items below this point will not be included in the Docker Image
|
||||
|
||||
# Testing dependencies
|
||||
pytest
|
||||
asynctest
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
argument="--continuous"
|
||||
./run.sh "$argument"
|
||||
|
||||
./run.sh --continuous $@
|
||||
|
||||
@@ -12,20 +12,10 @@ from autogpt.memory.base import get_ada_embedding
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
|
||||
|
||||
@mock.patch.dict(
|
||||
os.environ,
|
||||
{
|
||||
"WEAVIATE_HOST": "127.0.0.1",
|
||||
"WEAVIATE_PROTOCOL": "http",
|
||||
"WEAVIATE_PORT": "8080",
|
||||
"WEAVIATE_USERNAME": "",
|
||||
"WEAVIATE_PASSWORD": "",
|
||||
"MEMORY_INDEX": "AutogptTests",
|
||||
},
|
||||
)
|
||||
class TestWeaviateMemory(unittest.TestCase):
|
||||
cfg = None
|
||||
client = None
|
||||
index = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
@@ -47,6 +37,8 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}"
|
||||
)
|
||||
|
||||
cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
|
||||
|
||||
"""
|
||||
In order to run these tests you will need a local instance of
|
||||
Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose
|
||||
@@ -59,7 +51,7 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
try:
|
||||
self.client.schema.delete_class(self.cfg.memory_index)
|
||||
self.client.schema.delete_class(self.index)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -68,8 +60,8 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
def test_add(self):
|
||||
doc = "You are a Titan name Thanos and you are looking for the Infinity Stones"
|
||||
self.memory.add(doc)
|
||||
result = self.client.query.get(self.cfg.memory_index, ["raw_text"]).do()
|
||||
actual = result["data"]["Get"][self.cfg.memory_index]
|
||||
result = self.client.query.get(self.index, ["raw_text"]).do()
|
||||
actual = result["data"]["Get"][self.index]
|
||||
|
||||
self.assertEqual(len(actual), 1)
|
||||
self.assertEqual(actual[0]["raw_text"], doc)
|
||||
@@ -81,7 +73,7 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
batch.add_data_object(
|
||||
uuid=get_valid_uuid(uuid4()),
|
||||
data_object={"raw_text": doc},
|
||||
class_name=self.cfg.memory_index,
|
||||
class_name=self.index,
|
||||
vector=get_ada_embedding(doc),
|
||||
)
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ try:
|
||||
|
||||
def setUp(self) -> None:
|
||||
"""Set up the test environment"""
|
||||
self.cfg = MockConfig()
|
||||
self.cfg = mock_config()
|
||||
self.memory = MilvusMemory(self.cfg)
|
||||
|
||||
def test_add(self) -> None:
|
||||
|
||||
Reference in New Issue
Block a user