Merge branch 'master' into pinecone-memory

This commit is contained in:
Toran Bruce Richards
2023-04-06 11:05:01 +01:00
committed by GitHub
14 changed files with 216 additions and 42 deletions

View File

@@ -6,3 +6,7 @@ SMART_LLM_MODEL="gpt-4"
FAST_LLM_MODEL="gpt-3.5-turbo" FAST_LLM_MODEL="gpt-3.5-turbo"
GOOGLE_API_KEY= GOOGLE_API_KEY=
CUSTOM_SEARCH_ENGINE_ID= CUSTOM_SEARCH_ENGINE_ID=
USE_AZURE=False
OPENAI_API_BASE=your-base-url-for-azure
OPENAI_API_VERSION=api-version-for-azure
OPENAI_DEPLOYMENT_ID=deployment-id-for-azure

39
.github/ISSUE_TEMPLATE/1.bug.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Bug report 🐛
description: Create a bug report for Auto-GPT.
labels: ['status: needs triage']
body:
- type: markdown
attributes:
value: |
Please provide a searchable summary of the issue in the title above ⬆️.
Thanks for contributing by creating an issue! ❤️
- type: checkboxes
attributes:
label: Duplicates
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues
required: true
- type: textarea
attributes:
label: Steps to reproduce 🕹
description: |
**⚠️ Issues that we can't reproduce will be closed.**
- type: textarea
attributes:
label: Current behavior 😯
description: Describe what happens instead of the expected behavior.
- type: textarea
attributes:
label: Expected behavior 🤔
description: Describe what should happen.
- type: textarea
attributes:
label: Your prompt 📝
description: |
Please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml.
value: |
```yaml
# Paste your prompt here
```

29
.github/ISSUE_TEMPLATE/2.feature.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Feature request 🚀
description: Suggest a new idea for Auto-GPT.
labels: ['status: needs triage']
body:
- type: markdown
attributes:
value: |
Please provide a searchable summary of the issue in the title above ⬆️.
Thanks for contributing by creating an issue! ❤️
- type: checkboxes
attributes:
label: Duplicates
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues
required: true
- type: textarea
attributes:
label: Summary 💡
description: Describe how it should work.
- type: textarea
attributes:
label: Examples 🌈
description: Provide a link to other implementations, or screenshots of the expected behavior.
- type: textarea
attributes:
label: Motivation 🔦
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.

2
.gitignore vendored
View File

@@ -4,7 +4,7 @@ scripts/node_modules/
scripts/__pycache__/keys.cpython-310.pyc scripts/__pycache__/keys.cpython-310.pyc
package-lock.json package-lock.json
*.pyc *.pyc
scripts/auto_gpt_workspace/* auto_gpt_workspace/*
*.mpeg *.mpeg
.env .env
last_run_ai_settings.yaml last_run_ai_settings.yaml

View File

@@ -92,6 +92,7 @@ pip install -r requirements.txt
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_API_BASE`, `OPENAI_API_VERSION` and `OPENAI_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section
## 🔧 Usage ## 🔧 Usage

View File

@@ -6,7 +6,7 @@ from llm_utils import create_chat_completion
cfg = Config() cfg = Config()
def scrape_text(url): def scrape_text(url):
response = requests.get(url) response = requests.get(url, headers=cfg.user_agent_header)
# Check if the response contains an HTTP error # Check if the response contains an HTTP error
if response.status_code >= 400: if response.status_code >= 400:
@@ -40,7 +40,7 @@ def format_hyperlinks(hyperlinks):
def scrape_links(url): def scrape_links(url):
response = requests.get(url) response = requests.get(url, headers=cfg.user_agent_header)
# Check if the response contains an HTTP error # Check if the response contains an HTTP error
if response.status_code >= 400: if response.status_code >= 400:

View File

@@ -6,7 +6,7 @@ import agent_manager as agents
import speak import speak
from config import Config from config import Config
import ai_functions as ai import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from execute_code import execute_python_file from execute_code import execute_python_file
from json_parser import fix_and_parse_json from json_parser import fix_and_parse_json
from duckduckgo_search import ddg from duckduckgo_search import ddg
@@ -16,6 +16,13 @@ from googleapiclient.errors import HttpError
cfg = Config() cfg = Config()
def is_valid_int(value):
try:
int(value)
return True
except ValueError:
return False
def get_command(response): def get_command(response):
try: try:
response_json = fix_and_parse_json(response) response_json = fix_and_parse_json(response)
@@ -80,6 +87,8 @@ def execute_command(command_name, arguments):
return append_to_file(arguments["file"], arguments["text"]) return append_to_file(arguments["file"], arguments["text"])
elif command_name == "delete_file": elif command_name == "delete_file":
return delete_file(arguments["file"]) return delete_file(arguments["file"])
elif command_name == "search_files":
return search_files(arguments["directory"])
elif command_name == "browse_website": elif command_name == "browse_website":
return browse_website(arguments["url"], arguments["question"]) return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if # TODO: Change these to take in a file rather than pasted code, if
@@ -173,6 +182,49 @@ def get_hyperlinks(url):
return link_list return link_list
def commit_memory(string):
_text = f"""Committing memory with string "{string}" """
mem.permanent_memory.append(string)
return _text
def delete_memory(key):
if key >= 0 and key < len(mem.permanent_memory):
_text = "Deleting memory with key " + str(key)
del mem.permanent_memory[key]
print(_text)
return _text
else:
print("Invalid key, cannot delete memory.")
return None
def overwrite_memory(key, string):
# Check if the key is a valid integer
if is_valid_int(key):
key_int = int(key)
# Check if the integer key is within the range of the permanent_memory list
if 0 <= key_int < len(mem.permanent_memory):
_text = "Overwriting memory with key " + str(key) + " and string " + string
# Overwrite the memory slot with the given integer key and string
mem.permanent_memory[key_int] = string
print(_text)
return _text
else:
print(f"Invalid key '{key}', out of range.")
return None
# Check if the key is a valid string
elif isinstance(key, str):
_text = "Overwriting memory with key " + key + " and string " + string
# Overwrite the memory slot with the given string key and string
mem.permanent_memory[key] = string
print(_text)
return _text
else:
print(f"Invalid key '{key}', must be an integer or a string.")
return None
def shutdown(): def shutdown():
print("Shutting down...") print("Shutting down...")
quit() quit()
@@ -203,13 +255,20 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model):
def message_agent(key, message): def message_agent(key, message):
global cfg global cfg
# Check if the key is a valid integer
if is_valid_int(key):
agent_response = agents.message_agent(int(key), message)
# Check if the key is a valid string
elif isinstance(key, str):
agent_response = agents.message_agent(key, message) agent_response = agents.message_agent(key, message)
else:
return "Invalid key, must be an integer or a string."
# Speak response # Speak response
if cfg.speak_mode: if cfg.speak_mode:
speak.say_text(agent_response, 1) speak.say_text(agent_response, 1)
return agent_response
return f"Agent {key} responded: {agent_response}"
def list_agents(): def list_agents():

View File

@@ -35,6 +35,16 @@ class Config(metaclass=Singleton):
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.openai_api_key = os.getenv("OPENAI_API_KEY") self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.use_azure = False
self.use_azure = os.getenv("USE_AZURE") == 'True'
if self.use_azure:
self.openai_api_base = os.getenv("OPENAI_API_BASE")
self.openai_api_version = os.getenv("OPENAI_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_DEPLOYMENT_ID")
openai.api_type = "azure"
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
self.google_api_key = os.getenv("GOOGLE_API_KEY") self.google_api_key = os.getenv("GOOGLE_API_KEY")
@@ -43,6 +53,10 @@ class Config(metaclass=Singleton):
self.pinecone_api_key = os.getenv("PINECONE_API_KEY") self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_region = os.getenv("PINECONE_ENV") self.pinecone_region = os.getenv("PINECONE_ENV")
# User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
# Initialize the OpenAI API client # Initialize the OpenAI API client
openai.api_key = self.openai_api_key openai.api_key = self.openai_api_key

View File

@@ -1,15 +1,14 @@
import os import os
from pathlib import Path from pathlib import Path
SRC_DIR = Path(__file__).parent
def load_prompt(): def load_prompt():
try: try:
# get directory of this file: # get directory of this file:
file_dir = Path(os.path.dirname(os.path.realpath(__file__))) file_dir = Path(__file__).parent
data_dir = file_dir / "data" prompt_file_path = file_dir / "data" / "prompt.txt"
prompt_file = data_dir / "prompt.txt"
# Load the promt from data/prompt.txt # Load the prompt from data/prompt.txt
with open(SRC_DIR/ "data/prompt.txt", "r") as prompt_file: with open(prompt_file_path, "r") as prompt_file:
prompt = prompt_file.read() prompt = prompt_file.read()
return prompt return prompt

View File

@@ -8,20 +8,21 @@ CONSTRAINTS:
COMMANDS: COMMANDS:
1. Google Search: "google", args: "input": "<search>" 1. Google Search: "google", args: "input": "<search>"
2. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>" 5. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"
3. Start GPT Agent: "start_agent", args: "name": <name>, "task": "<short_task_desc>", "prompt": "<prompt>" 6. Start GPT Agent: "start_agent", args: "name": <name>, "task": "<short_task_desc>", "prompt": "<prompt>"
4. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>" 7. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>"
5. List GPT Agents: "list_agents", args: "" 8. List GPT Agents: "list_agents", args: ""
6. Delete GPT Agent: "delete_agent", args: "key": "<key>" 9. Delete GPT Agent: "delete_agent", args: "key": "<key>"
7. Write to file: "write_to_file", args: "file": "<file>", "text": "<text>" 10. Write to file: "write_to_file", args: "file": "<file>", "text": "<text>"
8. Read file: "read_file", args: "file": "<file>" 11. Read file: "read_file", args: "file": "<file>"
9. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>" 12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
10. Delete file: "delete_file", args: "file": "<file>" 13. Delete file: "delete_file", args: "file": "<file>"
11. Evaluate Code: "evaluate_code", args: "code": "<full _code_string>" 14. Search Files: "search_files", args: "directory": "<directory>"
12. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>" 15. Evaluate Code: "evaluate_code", args: "code": "<full _code_string>"
13. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>" 16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
14. Execute Python File: "execute_python_file", args: "file": "<file>" 17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
15. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>" 18. Execute Python File: "execute_python_file", args: "file": "<file>"
19. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
RESOURCES: RESOURCES:
@@ -41,12 +42,6 @@ You should only respond in JSON format as described below
RESPONSE FORMAT: RESPONSE FORMAT:
{ {
"command": {
"name": "command name",
"args":{
"arg name": "value"
}
},
"thoughts": "thoughts":
{ {
"text": "thought", "text": "thought",
@@ -54,6 +49,12 @@ RESPONSE FORMAT:
"plan": "- short bulleted\n- list that conveys\n- long-term plan", "plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism", "criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user" "speak": "thoughts summary to say to user"
},
"command": {
"name": "command name",
"args":{
"arg name": "value"
}
} }
} }

View File

@@ -58,3 +58,20 @@ def delete_file(filename):
return "File deleted successfully." return "File deleted successfully."
except Exception as e: except Exception as e:
return "Error: " + str(e) return "Error: " + str(e)
def search_files(directory):
found_files = []
if directory == "" or directory == "/":
search_directory = working_directory
else:
search_directory = safe_join(working_directory, directory)
for root, _, files in os.walk(search_directory):
for file in files:
if file.startswith('.'):
continue
relative_path = os.path.relpath(os.path.join(root, file), working_directory)
found_files.append(relative_path)
return found_files

View File

@@ -24,6 +24,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
""" """
try: try:
json_str = json_str.replace('\t', '')
return json.loads(json_str) return json.loads(json_str)
except Exception as e: except Exception as e:
# Let's do something manually - sometimes GPT responds with something BEFORE the braces: # Let's do something manually - sometimes GPT responds with something BEFORE the braces:
@@ -67,7 +68,8 @@ def fix_json(json_str: str, schema: str, debug=False) -> str:
print(f"Fixed JSON: {result_string}") print(f"Fixed JSON: {result_string}")
print("----------- END OF FIX ATTEMPT ----------------") print("----------- END OF FIX ATTEMPT ----------------")
try: try:
return json.loads(result_string) json.loads(result_string) # just check the validity
return result_string
except: except:
# Get the call stack: # Get the call stack:
# import traceback # import traceback

View File

@@ -6,6 +6,15 @@ openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better # Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str: def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
if cfg.use_azure:
response = openai.ChatCompletion.create(
deployment_id=cfg.openai_deployment_id,
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens
)
else:
response = openai.ChatCompletion.create( response = openai.ChatCompletion.create(
model=model, model=model,
messages=messages, messages=messages,

View File

@@ -42,7 +42,7 @@ def say_text(text, voice_index=0):
if not cfg.elevenlabs_api_key: if not cfg.elevenlabs_api_key:
gtts_speech(text) gtts_speech(text)
else: else:
success = eleven_labs_speech(text) success = eleven_labs_speech(text, voice_index)
if not success: if not success:
gtts_speech(text) gtts_speech(text)