Merge remote-tracking branch 'upstream/master' into fix-user-feedback-json-error

This commit is contained in:
Wlad
2023-04-11 01:27:27 +02:00
11 changed files with 80 additions and 43 deletions

View File

@@ -2,13 +2,14 @@ PINECONE_API_KEY=your-pinecone-api-key
PINECONE_ENV=your-pinecone-region
OPENAI_API_KEY=your-openai-api-key
ELEVENLABS_API_KEY=your-elevenlabs-api-key
SMART_LLM_MODEL="gpt-4"
FAST_LLM_MODEL="gpt-3.5-turbo"
SMART_LLM_MODEL=gpt-4
FAST_LLM_MODEL=gpt-3.5-turbo
GOOGLE_API_KEY=
CUSTOM_SEARCH_ENGINE_ID=
USE_AZURE=False
OPENAI_API_BASE=your-base-url-for-azure
OPENAI_API_VERSION=api-version-for-azure
OPENAI_DEPLOYMENT_ID=deployment-id-for-azure
OPENAI_AZURE_API_BASE=your-base-url-for-azure
OPENAI_AZURE_API_VERSION=api-version-for-azure
OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure
IMAGE_PROVIDER=dalle
HUGGINGFACE_API_TOKEN=
USE_MAC_OS_TTS=False

View File

@@ -59,7 +59,7 @@ Your support is greatly appreciated
## 📋 Requirements
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
- OpenAI API key
- PINECONE API key
- [PINECONE API key](https://www.pinecone.io/)
Optional:
- ElevenLabs Key (If you want the AI to speak)
@@ -92,8 +92,8 @@ pip install -r requirements.txt
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://beta.elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_API_BASE`, `OPENAI_API_VERSION` and `OPENAI_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section
## 🔧 Usage
@@ -179,8 +179,7 @@ MEMORY_INDEX=whatever
## 🌲 Pinecone API Key Setup
Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories
are loaded for the agent at any given time.
Pinecone enables the storage of vast amounts of vector-based memory, allowing for only relevant memories to be loaded for the agent at any given time.
1. Go to app.pinecone.io and make an account if you don't already have one.
2. Choose the `Starter` plan to avoid being charged.

View File

@@ -5,12 +5,21 @@ from llm_utils import create_chat_completion
cfg = Config()
# Define and check for local file address prefixes
def check_local_file_access(url):
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
return any(url.startswith(prefix) for prefix in local_prefixes)
def scrape_text(url):
"""Scrape text from a webpage"""
# Most basic check if the URL is valid:
if not url.startswith('http'):
return "Error: Invalid URL"
# Restrict access to local files
if check_local_file_access(url):
return "Error: Access to local files is restricted"
try:
response = requests.get(url, headers=cfg.user_agent_header)
except requests.exceptions.RequestException as e:

View File

@@ -42,9 +42,6 @@ def get_command(response):
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
if not arguments:
arguments = {}
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"

View File

@@ -46,15 +46,18 @@ class Config(metaclass=Singleton):
self.use_azure = False
self.use_azure = os.getenv("USE_AZURE") == 'True'
if self.use_azure:
self.openai_api_base = os.getenv("OPENAI_API_BASE")
self.openai_api_version = os.getenv("OPENAI_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_DEPLOYMENT_ID")
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE")
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID")
openai.api_type = "azure"
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
self.google_api_key = os.getenv("GOOGLE_API_KEY")
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")

View File

@@ -1,6 +1,7 @@
import json
import random
import commands as cmd
import utils
from memory import get_memory
import data
import chat
@@ -16,9 +17,18 @@ from ai_config import AIConfig
import traceback
import yaml
import argparse
import logging
cfg = Config()
def configure_logging():
logging.basicConfig(filename='log.txt',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
return logging.getLogger('AutoGPT')
def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key:
@@ -29,7 +39,6 @@ def check_openai_api_key():
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)
def print_to_console(
title,
title_color,
@@ -39,10 +48,12 @@ def print_to_console(
max_typing_speed=0.01):
"""Prints text to the console with a typing effect"""
global cfg
global logger
if speak_text and cfg.speak_mode:
speak.say_text(f"{title}. {content}")
print(title_color + title + " " + Style.RESET_ALL, end="")
if content:
logger.info(title + ': ' + content)
if isinstance(content, list):
content = " ".join(content)
words = content.split()
@@ -140,6 +151,8 @@ def print_assistant_thoughts(assistant_reply):
# Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak:
speak.say_text(assistant_thoughts_speak)
return assistant_reply_json
except json.decoder.JSONDecodeError as e:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
if cfg.speak_mode:
@@ -166,12 +179,12 @@ def load_variables(config_file="config.yaml"):
# Prompt the user for input if config file is missing or empty values
if not ai_name:
ai_name = input("Name your AI: ")
ai_name = utils.clean_input("Name your AI: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
if not ai_role:
ai_role = input(f"{ai_name} is: ")
ai_role = utils.clean_input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
@@ -181,7 +194,7 @@ def load_variables(config_file="config.yaml"):
print("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = input(f"Goal {i+1}: ")
ai_goal = utils.clean_input(f"Goal {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
@@ -214,7 +227,7 @@ def construct_prompt():
Fore.GREEN,
f"Would you like me to return to being {config.ai_name}?",
speak_text=True)
should_continue = input(f"""Continue with the last settings?
should_continue = utils.clean_input(f"""Continue with the last settings?
Name: {config.ai_name}
Role: {config.ai_role}
Goals: {config.ai_goals}
@@ -249,7 +262,7 @@ def prompt_user():
"Name your AI: ",
Fore.GREEN,
"For example, 'Entrepreneur-GPT'")
ai_name = input("AI Name: ")
ai_name = utils.clean_input("AI Name: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
@@ -264,7 +277,7 @@ def prompt_user():
"Describe your AI's role: ",
Fore.GREEN,
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
ai_role = input(f"{ai_name} is: ")
ai_role = utils.clean_input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
@@ -276,7 +289,7 @@ def prompt_user():
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
ai_goals = []
for i in range(5):
ai_goal = input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
@@ -328,6 +341,7 @@ def parse_arguments():
# TODO: fill in llm values here
check_openai_api_key()
cfg = Config()
logger = configure_logging()
parse_arguments()
ai_name = ""
prompt = construct_prompt()
@@ -356,7 +370,7 @@ while True:
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
assistant_reply = print_assistant_thoughts(assistant_reply)
# Get command name and arguments
try:
@@ -379,7 +393,7 @@ while True:
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break

View File

@@ -39,8 +39,14 @@ def gtts_speech(text):
playsound("speech.mp3")
os.remove("speech.mp3")
def macos_tts_speech(text):
os.system(f'say "{text}"')
def say_text(text, voice_index=0):
if not cfg.elevenlabs_api_key:
if cfg.use_mac_os_tts == 'True':
macos_tts_speech(text)
else:
gtts_speech(text)
else:
success = eleven_labs_speech(text, voice_index)

8
scripts/utils.py Normal file
View File

@@ -0,0 +1,8 @@
def clean_input(prompt: str=''):
try:
return input(prompt)
except KeyboardInterrupt:
print("You interrupted Auto-GPT")
print("Quitting...")
exit(0)