resolved latest conflicts

This commit is contained in:
cs0lar
2023-04-12 18:52:06 +01:00
31 changed files with 527 additions and 97 deletions

0
scripts/__init__.py Normal file
View File

View File

@@ -13,7 +13,7 @@ def create_agent(task, prompt, model):
messages = [{"role": "user", "content": prompt}, ]
# Start GTP3 instance
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
@@ -41,7 +41,7 @@ def message_agent(key, message):
# Add user message to message history before sending to agent
messages.append({"role": "user", "content": message})
# Start GTP3 instance
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,

View File

@@ -42,7 +42,7 @@ class AIConfig:
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
Returns:
cls (object): A instance of given cls object
cls (object): An instance of given cls object
"""
try:
@@ -80,7 +80,7 @@ class AIConfig:
None
Returns:
full_prompt (str): A string containing the intitial prompt for the user including the ai_name, ai_role and ai_goals.
full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals.
"""
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
@@ -92,4 +92,3 @@ class AIConfig:
full_prompt += f"\n\n{data.load_prompt()}"
return full_prompt

View File

@@ -2,9 +2,31 @@ import requests
from bs4 import BeautifulSoup
from config import Config
from llm_utils import create_chat_completion
from urllib.parse import urlparse, urljoin
cfg = Config()
# Function to check if the URL is valid
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
# Function to sanitize the URL
def sanitize_url(url):
return urljoin(url, urlparse(url).path)
# Function to make a request with a specified timeout and handle exceptions
def make_request(url, timeout=10):
try:
response = requests.get(url, headers=cfg.user_agent_header, timeout=timeout)
response.raise_for_status()
return response
except requests.exceptions.RequestException as e:
return "Error: " + str(e)
# Define and check for local file address prefixes
def check_local_file_access(url):
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
@@ -12,22 +34,29 @@ def check_local_file_access(url):
def scrape_text(url):
"""Scrape text from a webpage"""
# Most basic check if the URL is valid:
# Basic check if the URL is valid
if not url.startswith('http'):
return "Error: Invalid URL"
# Restrict access to local files
if check_local_file_access(url):
return "Error: Access to local files is restricted"
try:
response = requests.get(url, headers=cfg.user_agent_header)
except requests.exceptions.RequestException as e:
return "Error: " + str(e)
# Validate the input URL
if not is_valid_url(url):
# Sanitize the input URL
sanitized_url = sanitize_url(url)
# Check if the response contains an HTTP error
if response.status_code >= 400:
return "Error: HTTP " + str(response.status_code) + " error"
# Make the request with a timeout and handle exceptions
response = make_request(sanitized_url)
if isinstance(response, str):
return response
else:
# Sanitize the input URL
sanitized_url = sanitize_url(url)
response = requests.get(sanitized_url, headers=cfg.user_agent_header)
soup = BeautifulSoup(response.text, "html.parser")

View File

@@ -63,15 +63,15 @@ def chat_with_ai(
"""
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
if cfg.debug:
if cfg.debug_mode:
print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
if cfg.debug:
if cfg.debug_mode:
print('Memory Stats: ', permanent_memory.get_stats())
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
@@ -110,7 +110,7 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context
if cfg.debug:
if cfg.debug_mode:
print(f"Token limit: {token_limit}")
print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}")
@@ -141,6 +141,6 @@ def chat_with_ai(
return assistant_reply
except openai.error.RateLimitError:
# TODO: WHen we switch to langchain, this is built in
# TODO: When we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10)

View File

@@ -110,7 +110,7 @@ def execute_command(command_name, arguments):
elif command_name == "task_complete":
shutdown()
else:
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format."
# All errors, return "Error: + error message"
except Exception as e:
return "Error: " + str(e)

View File

@@ -33,7 +33,7 @@ class Config(metaclass=Singleton):
def __init__(self):
"""Initialize the Config class"""
self.debug = False
self.debug_mode = False
self.continuous_mode = False
self.speak_mode = False
@@ -49,11 +49,15 @@ class Config(metaclass=Singleton):
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE")
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID")
self.azure_chat_deployment_id = os.getenv("OPENAI_AZURE_CHAT_DEPLOYMENT_ID")
self.azure_embeddigs_deployment_id = os.getenv("OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID")
openai.api_type = "azure"
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
@@ -84,7 +88,7 @@ class Config(metaclass=Singleton):
self.redis_password = os.getenv("REDIS_PASSWORD", "")
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
# Note that indexes must be created on db 0 in redis, this is not configureable.
# Note that indexes must be created on db 0 in redis, this is not configurable.
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
# Initialize the OpenAI API client
@@ -98,9 +102,6 @@ class Config(metaclass=Singleton):
"""Set the speak mode value."""
self.speak_mode = value
def set_debug_mode(self, value: bool):
self.debug_mode = value
def set_fast_llm_model(self, value: str):
"""Set the fast LLM model value."""
self.fast_llm_model = value
@@ -125,6 +126,14 @@ class Config(metaclass=Singleton):
"""Set the ElevenLabs API key value."""
self.elevenlabs_api_key = value
def set_elevenlabs_voice_1_id(self, value: str):
"""Set the ElevenLabs Voice 1 ID value."""
self.elevenlabs_voice_1_id = value
def set_elevenlabs_voice_2_id(self, value: str):
"""Set the ElevenLabs Voice 2 ID value."""
self.elevenlabs_voice_2_id = value
def set_google_api_key(self, value: str):
"""Set the Google API key value."""
self.google_api_key = value
@@ -143,4 +152,4 @@ class Config(metaclass=Singleton):
def set_debug_mode(self, value: bool):
"""Set the debug mode value."""
self.debug = value
self.debug_mode = value

View File

@@ -26,7 +26,7 @@ JSON_SCHEMA = """
"""
def fix_and_parse_json(
def fix_and_parse_json(
json_str: str,
try_to_fix_with_gpt: bool = True
) -> Union[str, Dict[Any, Any]]:
@@ -35,8 +35,8 @@ def fix_and_parse_json(
json_str = json_str.replace('\t', '')
return json.loads(json_str)
except json.JSONDecodeError as _: # noqa: F841
json_str = correct_json(json_str)
try:
json_str = correct_json(json_str)
return json.loads(json_str)
except json.JSONDecodeError as _: # noqa: F841
pass
@@ -53,7 +53,8 @@ def fix_and_parse_json(
last_brace_index = json_str.rindex("}")
json_str = json_str[:last_brace_index+1]
return json.loads(json_str)
except json.JSONDecodeError as e: # noqa: F841
# Can throw a ValueError if there is no "{" or "}" in the json_str
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
if try_to_fix_with_gpt:
print("Warning: Failed to parse AI output, attempting to fix."
"\n If you see this warning frequently, it's likely that"
@@ -67,22 +68,22 @@ def fix_and_parse_json(
else:
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
print("Failed to fix ai output, telling the AI.")
print("Failed to fix AI output, telling the AI.")
return json_str
else:
raise e
def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully complient with the provided schema."""
# Try to fix the JSON using gpt:
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
# Try to fix the JSON using GPT:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"]
description_string = "Fixes the provided JSON string to make it parseable"\
" and fully complient with the provided schema.\n If an object or"\
" and fully compliant with the provided schema.\n If an object or"\
" field specified in the schema isn't contained within the correct"\
" JSON, it is ommited.\n This function is brilliant at guessing"\
" JSON, it is omitted.\n This function is brilliant at guessing"\
" when the format is incorrect."
# If it doesn't already start with a "`", add one:
@@ -91,7 +92,7 @@ def fix_json(json_str: str, schema: str) -> str:
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
if cfg.debug:
if cfg.debug_mode:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")
print("-----------")

View File

@@ -88,7 +88,7 @@ def fix_invalid_escape(json_str: str, error_message: str) -> str:
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
if cfg.debug:
if cfg.debug_mode:
print('json loads error - fix invalid escape', e)
error_message = str(e)
return json_str
@@ -103,12 +103,12 @@ def correct_json(json_str: str) -> str:
"""
try:
if cfg.debug:
if cfg.debug_mode:
print("json", json_str)
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
if cfg.debug:
if cfg.debug_mode:
print('json loads error', e)
error_message = str(e)
if error_message.startswith('Invalid \\escape'):
@@ -119,7 +119,7 @@ def correct_json(json_str: str) -> str:
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
if cfg.debug:
if cfg.debug_mode:
print('json loads error - add quotes', e)
error_message = str(e)
if balanced_str := balance_braces(json_str):

View File

@@ -9,7 +9,7 @@ def create_chat_completion(messages, model=None, temperature=None, max_tokens=No
"""Create a chat completion using the OpenAI API"""
if cfg.use_azure:
response = openai.ChatCompletion.create(
deployment_id=cfg.openai_deployment_id,
deployment_id=cfg.azure_chat_deployment_id,
model=model,
messages=messages,
temperature=temperature,

View File

@@ -66,22 +66,54 @@ def print_to_console(
max_typing_speed = max_typing_speed * 0.95
print()
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
print_to_console("Attempting to fix JSON by finding outermost brackets\n", Fore.RED, "")
try:
# Use regex to search for JSON objects
import regex
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
print_to_console("Apparently json was fixed.", Fore.GREEN,"")
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("Apparently json was fixed.")
else:
raise ValueError("No valid JSON object found")
except (json.JSONDecodeError, ValueError) as e:
if cfg.speak_mode:
speak.say_text("Didn't work. I will have to ignore this response then.")
print_to_console("Error: Invalid JSON, setting it to empty JSON now.\n", Fore.RED, "")
json_string = {}
return json_string
def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console"""
global ai_name
global cfg
try:
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
try:
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
except json.JSONDecodeError as e:
print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
if isinstance(assistant_reply_json, str):
try:
assistant_reply_json = json.loads(assistant_reply_json)
except json.JSONDecodeError as e:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
assistant_reply_json = {}
print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json)
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
@@ -117,9 +149,12 @@ def print_assistant_thoughts(assistant_reply):
# Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak:
speak.say_text(assistant_thoughts_speak)
except json.decoder.JSONDecodeError:
return assistant_reply_json
except json.decoder.JSONDecodeError as e:
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
if cfg.speak_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.")
# All other errors, return "Error: + error message"
except Exception as e:
@@ -170,7 +205,7 @@ def load_variables(config_file="config.yaml"):
documents = yaml.dump(config, file)
prompt = data.load_prompt()
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications."""
# Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
@@ -266,6 +301,7 @@ def prompt_user():
def parse_arguments():
"""Parses the arguments passed to the script"""
global cfg
cfg.set_debug_mode(False)
cfg.set_continuous_mode(False)
cfg.set_speak_mode(False)
@@ -274,6 +310,7 @@ def parse_arguments():
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
args = parser.parse_args()
if args.continuous:
@@ -288,13 +325,13 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.gpt3only:
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model)
if args.gpt4only:
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_fast_llm_model(cfg.smart_llm_model)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
@@ -337,7 +374,9 @@ while True:
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(assistant_reply)
command_name, arguments = cmd.get_command(attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
print_to_console("Error: \n", Fore.RED, str(e))
@@ -390,7 +429,7 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name.lower().startswith( "error" ):
if command_name is not None and command_name.lower().startswith( "error" ):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
@@ -415,4 +454,3 @@ while True:
chat.create_chat_message(
"system", "Unable to execute command"))
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")

View File

@@ -1,12 +1,16 @@
"""Base class for memory providers."""
import abc
from config import AbstractSingleton
from config import AbstractSingleton, Config
import openai
cfg = Config()
def get_ada_embedding(text):
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
if cfg.use_azure:
return openai.Embedding.create(input=[text], engine=cfg.azure_embeddigs_deployment_id, model="text-embedding-ada-002")["data"][0]["embedding"]
else:
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
class MemoryProviderSingleton(AbstractSingleton):

View File

@@ -54,8 +54,8 @@ class LocalCache(MemoryProviderSingleton):
vector = vector[np.newaxis, :]
self.data.embeddings = np.concatenate(
[
vector,
self.data.embeddings,
vector,
],
axis=0,
)

View File

@@ -7,9 +7,21 @@ import gtts
import threading
from threading import Lock, Semaphore
# Default voice IDs
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
# TODO: Nicer names for these ids
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
# Retrieve custom voice IDs from the Config class
custom_voice_1 = cfg.elevenlabs_voice_1_id
custom_voice_2 = cfg.elevenlabs_voice_2_id
# Placeholder values that should be treated as empty
placeholders = {"your-voice-id"}
# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs
voices = [
custom_voice_1 if custom_voice_1 and custom_voice_1 not in placeholders else default_voices[0],
custom_voice_2 if custom_voice_2 and custom_voice_2 not in placeholders else default_voices[1]
]
tts_headers = {
"Content-Type": "application/json",
@@ -46,15 +58,21 @@ def gtts_speech(text):
playsound("speech.mp3", True)
os.remove("speech.mp3")
def macos_tts_speech(text):
os.system(f'say "{text}"')
def macos_tts_speech(text, voice_index=0):
if voice_index == 0:
os.system(f'say "{text}"')
else:
if voice_index == 1:
os.system(f'say -v "Ava (Premium)" "{text}"')
else:
os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0):
def speak():
if not cfg.elevenlabs_api_key:
if cfg.use_mac_os_tts == 'True':
macos_tts_speech(text)
macos_tts_speech(text, voice_index)
else:
gtts_speech(text)
else:

View File

@@ -20,7 +20,7 @@ class Spinner:
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b' * (len(self.message) + 2))
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
def __enter__(self):
"""Start the spinner"""