diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 379f6310..02f580a0 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,10 +1,10 @@ -# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster +# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3-bullseye, 3.10-bullseye, 3-buster, 3.10-buster ARG VARIANT=3-bullseye -FROM --platform=linux/amd64 python:3.8 +FROM --platform=linux/amd64 python:3.10 RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ # Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131 - && apt-get purge -y imagemagick imagemagick-6-common + && apt-get purge -y imagemagick imagemagick-6-common # Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897 # They are installed by the base image (python) which does not have the patch. @@ -25,4 +25,4 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ # && apt-get -y install --no-install-recommends # [Optional] Uncomment this line to install global node packages. -# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 \ No newline at end of file +# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 diff --git a/.env.template b/.env.template index 09deeb93..6e521af1 100644 --- a/.env.template +++ b/.env.template @@ -3,15 +3,14 @@ ################################################################################ # EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False) EXECUTE_LOCAL_COMMANDS=False +# RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True) +RESTRICT_TO_WORKSPACE=True # BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory BROWSE_CHUNK_MAX_LENGTH=8192 # USER_AGENT - Define the user-agent used by the requests library to browse website (string) # USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" # AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) AI_SETTINGS_FILE=ai_settings.yaml -# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome). -# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser -# USE_WEB_BROWSER=chrome ################################################################################ ### LLM PROVIDER @@ -108,14 +107,22 @@ MILVUS_COLLECTION=autogpt ### OPEN AI # IMAGE_PROVIDER - Image provider (Example: dalle) IMAGE_PROVIDER=dalle +# IMAGE_SIZE - Image size (Example: 256) +# DALLE: 256, 512, 1024 +IMAGE_SIZE=256 ### HUGGINGFACE -# STABLE DIFFUSION -# (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4) -# Set in image_gen.py) +# HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4) +HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4 # HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token) HUGGINGFACE_API_TOKEN=your-huggingface-api-token +### STABLE DIFFUSION WEBUI +# SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860) +SD_WEBUI_URL=http://127.0.0.1:7860 +# SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password) +SD_WEBUI_AUTH= + ################################################################################ ### AUDIO TO TEXT PROVIDER ################################################################################ @@ -134,9 +141,16 @@ GITHUB_API_KEY=github_pat_123 GITHUB_USERNAME=your-github-username ################################################################################ -### SEARCH PROVIDER +### WEB BROWSING ################################################################################ +### BROWSER +# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome). +# HEADLESS_BROWSER - Whether to run the browser in headless mode (defaults to True) +# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser +# USE_WEB_BROWSER=chrome +# HEADLESS_BROWSER=True + ### GOOGLE # GOOGLE_API_KEY - Google API key (Example: my-google-api-key) # CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 7f1d2771..6645142e 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -2,6 +2,20 @@ name: Bug report 🐛 description: Create a bug report for Auto-GPT. labels: ['status: needs triage'] body: + - type: markdown + attributes: + value: | + ### ⚠️ Before you continue + * Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on + * If you need help, you can ask in the [discussions] section or in [#tech-support] + * **Throughly search the [existing issues] before creating a new one** + + [backlog]: https://github.com/orgs/Significant-Gravitas/projects/1 + [roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2 + [discord]: https://discord.gg/autogpt + [discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions + [#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184 + [existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue - type: checkboxes attributes: label: ⚠️ Search for existing issues first ⚠️ @@ -28,14 +42,32 @@ body: - Provide commit-hash (`git rev-parse HEAD` gets it) - If it's a pip/packages issue, provide pip version, python version - If it's a crash, provide traceback. - - type: checkboxes + - type: dropdown attributes: - label: GPT-3 or GPT-4 + label: Which Operating System are you using? + description: > + Please select the operating system you were using to run Auto-GPT when this problem occurred. + options: + - Windows + - Linux + - MacOS + - Docker + - Devcontainer / Codespace + - Windows Subsystem for Linux (WSL) + - Other (Please specify in your problem) + validations: + required: true + - type: dropdown + attributes: + label: GPT-3 or GPT-4? description: > If you are using Auto-GPT with `--gpt3only`, your problems may be caused by - the limitations of GPT-3.5 + the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5. options: - - label: I am using Auto-GPT with GPT-3 (GPT-3.5) + - GPT-3.5 + - GPT-4 + validations: + required: true - type: textarea attributes: label: Steps to reproduce 🕹 @@ -52,9 +84,34 @@ body: - type: textarea attributes: label: Your prompt 📝 - description: | - If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. + description: > + If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file. value: | ```yaml # Paste your prompt here ``` + - type: textarea + attributes: + label: Your Logs 📒 + description: | + Please include the log showing your error and the command that caused it, if applicable. + You can copy it from your terminal or from `logs/activity.log`. + This will help us understand your issue better! + +
+ Example + ```log + INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'} + INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-= + Traceback (most recent call last): + File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response + self._interpret_response_line( + File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line + raise self.handle_error_response( + openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length. + ``` +
+ value: | + ```log + + ``` diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index cf7ffbf3..a4f28a3d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,10 @@ + +