Merge branch 'master' into prompt-generator

This commit is contained in:
Alrik Olson
2023-04-13 10:54:39 -07:00
25 changed files with 173 additions and 132 deletions

View File

@@ -32,7 +32,7 @@ jobs:
- name: Lint with flake8 - name: Lint with flake8
continue-on-error: false continue-on-error: false
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
- name: Run unittest tests with coverage - name: Run unittest tests with coverage
run: | run: |

View File

@@ -77,32 +77,32 @@ Optional:
To install Auto-GPT, follow these steps: To install Auto-GPT, follow these steps:
0. Make sure you have all the **requirements** above, if not, install/get them. 1. Make sure you have all the **requirements** above, if not, install/get them.
_The following commands should be executed in a CMD, Bash or Powershell window. To do this, go to a folder on your computer, click in the folder path at the top and type CMD, then press enter._ _The following commands should be executed in a CMD, Bash or Powershell window. To do this, go to a folder on your computer, click in the folder path at the top and type CMD, then press enter._
1. Clone the repository: 2. Clone the repository:
For this step you need Git installed, but you can just download the zip file instead by clicking the button at the top of this page ☝️ For this step you need Git installed, but you can just download the zip file instead by clicking the button at the top of this page ☝️
``` ```
git clone https://github.com/Torantulino/Auto-GPT.git git clone https://github.com/Torantulino/Auto-GPT.git
``` ```
2. Navigate to the project directory: 3. Navigate to the project directory:
_(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)_ _(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)_
``` ```
cd 'Auto-GPT' cd 'Auto-GPT'
``` ```
3. Install the required dependencies: 4. Install the required dependencies:
_(Again, type this into your CMD window)_ _(Again, type this into your CMD window)_
``` ```
pip install -r requirements.txt pip install -r requirements.txt
``` ```
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. 5. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then: - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then:
@@ -348,11 +348,13 @@ coverage run -m unittest discover tests
## Run linter ## Run linter
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information.
To run the linter, run the following command:
``` ```
flake8 scripts/ tests/ flake8 scripts/ tests/
# Or, if you want to run flake8 with the same configuration as the CI: # Or, if you want to run flake8 with the same configuration as the CI:
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
``` ```

View File

@@ -1,3 +1,4 @@
azure_api_type: azure_ad
azure_api_base: your-base-url-for-azure azure_api_base: your-base-url-for-azure
azure_api_version: api-version-for-azure azure_api_version: api-version-for-azure
azure_model_map: azure_model_map:

View File

@@ -6,6 +6,7 @@ agents = {} # key, (task, full_message_history, model)
# Create new GPT agent # Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit # TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model): def create_agent(task, prompt, model):
"""Create a new agent and return its key""" """Create a new agent and return its key"""
global next_key global next_key

View File

@@ -2,6 +2,7 @@ import yaml
import os import os
from prompt import get_prompt from prompt import get_prompt
class AIConfig: class AIConfig:
""" """
A class object that contains the configuration information for the AI A class object that contains the configuration information for the AI

View File

@@ -3,6 +3,8 @@ from config import Config
cfg = Config() cfg = Config()
from llm_utils import create_chat_completion from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See # This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info. # https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None): def call_ai_function(function, args, description, model=None):

View File

@@ -9,6 +9,7 @@ import logging
cfg = Config() cfg = Config()
def create_chat_message(role, content): def create_chat_message(role, content):
""" """
Create a chat message with the given role and content. Create a chat message with the given role and content.

View File

@@ -24,6 +24,7 @@ def is_valid_int(value):
except ValueError: except ValueError:
return False return False
def get_command(response): def get_command(response):
"""Parse the response and return the command name and arguments""" """Parse the response and return the command name and arguments"""
try: try:
@@ -135,6 +136,7 @@ def google_search(query, num_results=8):
return json.dumps(search_results, ensure_ascii=False, indent=4) return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8): def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API""" """Return the results of a google search using the official Google API"""
from googleapiclient.discovery import build from googleapiclient.discovery import build
@@ -171,6 +173,7 @@ def google_official_search(query, num_results=8):
# Return the list of search result URLs # Return the list of search result URLs
return search_results_links return search_results_links
def browse_website(url, question): def browse_website(url, question):
"""Browse a website and return the summary and links""" """Browse a website and return the summary and links"""
summary = get_text_summary(url, question) summary = get_text_summary(url, question)

View File

@@ -44,14 +44,13 @@ class Config(metaclass=Singleton):
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.openai_api_key = os.getenv("OPENAI_API_KEY") self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.temperature = int(os.getenv("TEMPERATURE", "1")) self.temperature = float(os.getenv("TEMPERATURE", "1"))
self.use_azure = False
self.use_azure = os.getenv("USE_AZURE") == 'True' self.use_azure = os.getenv("USE_AZURE") == 'True'
self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True' self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True'
if self.use_azure: if self.use_azure:
self.load_azure_config() self.load_azure_config()
openai.api_type = "azure" openai.api_type = self.openai_api_type
openai.api_base = self.openai_api_base openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version openai.api_version = self.openai_api_version
@@ -73,7 +72,7 @@ class Config(metaclass=Singleton):
# User agent headers to use when browsing web # User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found. # Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} self.user_agent_header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "") self.redis_password = os.getenv("REDIS_PASSWORD", "")
@@ -121,8 +120,9 @@ class Config(metaclass=Singleton):
config_params = yaml.load(file, Loader=yaml.FullLoader) config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError: except FileNotFoundError:
config_params = {} config_params = {}
self.openai_api_base = config_params.get("azure_api_base", "") self.openai_api_type = os.getenv("OPENAI_API_TYPE", config_params.get("azure_api_type", "azure"))
self.openai_api_version = config_params.get("azure_api_version", "") self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE", config_params.get("azure_api_base", ""))
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION", config_params.get("azure_api_version", ""))
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
def set_continuous_mode(self, value: bool): def set_continuous_mode(self, value: bool):

View File

@@ -67,6 +67,7 @@ def execute_python_file(file):
except Exception as e: except Exception as e:
return f"Error: {str(e)}" return f"Error: {str(e)}"
def execute_shell(command_line): def execute_shell(command_line):
current_dir = os.getcwd() current_dir = os.getcwd()

View File

@@ -38,7 +38,7 @@ def write_to_file(filename, text):
directory = os.path.dirname(filepath) directory = os.path.dirname(filepath)
if not os.path.exists(directory): if not os.path.exists(directory):
os.makedirs(directory) os.makedirs(directory)
with open(filepath, "w") as f: with open(filepath, "w", encoding='utf-8') as f:
f.write(text) f.write(text)
return "File written to successfully." return "File written to successfully."
except Exception as e: except Exception as e:
@@ -65,6 +65,7 @@ def delete_file(filename):
except Exception as e: except Exception as e:
return "Error: " + str(e) return "Error: " + str(e)
def search_files(directory): def search_files(directory):
found_files = [] found_files = []

View File

@@ -11,6 +11,7 @@ cfg = Config()
working_directory = "auto_gpt_workspace" working_directory = "auto_gpt_workspace"
def generate_image(prompt): def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg" filename = str(uuid.uuid4()) + ".jpg"

View File

@@ -4,6 +4,7 @@ cfg = Config()
openai.api_key = cfg.openai_api_key openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better # Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str: def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
"""Create a chat completion using the OpenAI API""" """Create a chat completion using the OpenAI API"""

View File

@@ -157,6 +157,7 @@ class TypingConsoleHandler(logging.StreamHandler):
except Exception: except Exception:
self.handleError(record) self.handleError(record)
class ConsoleHandler(logging.StreamHandler): class ConsoleHandler(logging.StreamHandler):
def emit(self, record): def emit(self, record):
msg = self.format(record) msg = self.format(record)
@@ -166,11 +167,11 @@ class ConsoleHandler(logging.StreamHandler):
self.handleError(record) self.handleError(record)
'''
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
'''
class AutoGptFormatter(logging.Formatter): class AutoGptFormatter(logging.Formatter):
"""
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
"""
def format(self, record: LogRecord) -> str: def format(self, record: LogRecord) -> str:
if (hasattr(record, 'color')): if (hasattr(record, 'color')):
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL

View File

@@ -20,16 +20,18 @@ from prompt import get_prompt
cfg = Config() cfg = Config()
def check_openai_api_key(): def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable.""" """Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key: if not cfg.openai_api_key:
print( print(
Fore.RED + Fore.RED +
"Please set your OpenAI API key in config.py or as an environment variable." "Please set your OpenAI API key in .env or as an environment variable."
) )
print("You can get your key from https://beta.openai.com/account/api-keys") print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1) exit(1)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string): def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode: if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
@@ -58,6 +60,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
return json_string return json_string
def print_assistant_thoughts(assistant_reply): def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console""" """Prints the assistant's thoughts to the console"""
global ai_name global ai_name
@@ -262,6 +265,7 @@ def prompt_user():
config = AIConfig(ai_name, ai_role, ai_goals) config = AIConfig(ai_name, ai_role, ai_goals)
return config return config
def parse_arguments(): def parse_arguments():
"""Parses the arguments passed to the script""" """Parses the arguments passed to the script"""
global cfg global cfg
@@ -316,118 +320,123 @@ def parse_arguments():
cfg.memory_backend = chosen cfg.memory_backend = chosen
# TODO: fill in llm values here def main():
check_openai_api_key() global ai_name, memory
parse_arguments() # TODO: fill in llm values here
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) check_openai_api_key()
ai_name = "" parse_arguments()
prompt = construct_prompt() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
# print(prompt) ai_name = ""
# Initialize variables prompt = construct_prompt()
full_message_history = [] # print(prompt)
result = None # Initialize variables
next_action_count = 0 full_message_history = []
# Make a constant: result = None
user_input = "Determine which next command to use, and respond using the format specified above:" next_action_count = 0
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
while True:
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
prompt,
user_input,
full_message_history,
memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Initialize memory and make sure it is empty. # Print Assistant thoughts
# this is particularly important for indexing and referencing pinecone memory print_assistant_thoughts(assistant_reply)
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop # Get command name and arguments
while True: try:
# Send message to AI, get response command_name, arguments = cmd.get_command(
with Spinner("Thinking... "): attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
assistant_reply = chat.chat_with_ai( if cfg.speak_mode:
prompt, speak.say_text(f"I want to execute {command_name}")
user_input, except Exception as e:
full_message_history, logger.error("Error: \n", str(e))
memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts if not cfg.continuous_mode and next_action_count == 0:
print_assistant_thoughts(assistant_reply) ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# Get command name and arguments # to exit
try: user_input = ""
command_name, arguments = cmd.get_command(attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower().rstrip() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
next_action_count = abs(int(console_input.split(" ")[1]))
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
continue
break
elif console_input.lower() == "n":
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
break
if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log( logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", "NEXT ACTION: ",
Fore.MAGENTA, Fore.CYAN,
"") f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
elif user_input == "EXIT": print(
print("Exiting...", flush=True) f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
break flush=True)
else: while True:
# Print command console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
logger.typewriter_log( if console_input.lower().rstrip() == "y":
"NEXT ACTION: ", user_input = "GENERATE NEXT COMMAND JSON"
Fore.CYAN, break
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") elif console_input.lower().startswith("y -"):
try:
next_action_count = abs(int(console_input.split(" ")[1]))
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
continue
break
elif console_input.lower() == "n":
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
break
# Execute command if user_input == "GENERATE NEXT COMMAND JSON":
if command_name is not None and command_name.lower().startswith( "error" ): logger.typewriter_log(
result = f"Command {command_name} threw the following error: " + arguments "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
elif command_name == "human_feedback": Fore.MAGENTA,
result = f"Human feedback: {user_input}" "")
else: elif user_input == "EXIT":
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" print("Exiting...", flush=True)
if next_action_count > 0: break
next_action_count -= 1 else:
# Print command
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
memory_to_add = f"Assistant Reply: {assistant_reply} " \ # Execute command
f"\nResult: {result} " \ if command_name is not None and command_name.lower().startswith("error"):
f"\nHuman Feedback: {user_input} " result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
else:
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
if next_action_count > 0:
next_action_count -= 1
memory.add(memory_to_add) memory_to_add = f"Assistant Reply: {assistant_reply} " \
f"\nResult: {result} " \
f"\nHuman Feedback: {user_input} "
# Check if there's a result from the command append it to the message memory.add(memory_to_add)
# history
if result is not None: # Check if there's a result from the command append it to the message
full_message_history.append(chat.create_chat_message("system", result)) # history
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) if result is not None:
else: full_message_history.append(chat.create_chat_message("system", result))
full_message_history.append( logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
chat.create_chat_message( else:
"system", "Unable to execute command")) full_message_history.append(
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") chat.create_chat_message(
"system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
if __name__ == "__main__":
main()

View File

@@ -19,6 +19,7 @@ except ImportError:
print("Pinecone not installed. Skipping import.") print("Pinecone not installed. Skipping import.")
PineconeMemory = None PineconeMemory = None
def get_memory(cfg, init=False): def get_memory(cfg, init=False):
memory = None memory = None
if cfg.memory_backend == "pinecone": if cfg.memory_backend == "pinecone":
@@ -44,6 +45,7 @@ def get_memory(cfg, init=False):
memory.clear() memory.clear()
return memory return memory
def get_supported_memory_backends(): def get_supported_memory_backends():
return supported_memory return supported_memory

View File

@@ -2,6 +2,7 @@ from typing import Optional, List, Any
from memory.base import MemoryProviderSingleton from memory.base import MemoryProviderSingleton
class NoMemory(MemoryProviderSingleton): class NoMemory(MemoryProviderSingleton):
def __init__(self, cfg): def __init__(self, cfg):
""" """

View File

@@ -5,6 +5,7 @@ from memory.base import MemoryProviderSingleton, get_ada_embedding
from logger import logger from logger import logger
from colorama import Fore, Style from colorama import Fore, Style
class PineconeMemory(MemoryProviderSingleton): class PineconeMemory(MemoryProviderSingleton):
def __init__(self, cfg): def __init__(self, cfg):
pinecone_api_key = cfg.pinecone_api_key pinecone_api_key = cfg.pinecone_api_key

View File

@@ -31,6 +31,7 @@ tts_headers = {
mutex_lock = Lock() # Ensure only one sound is played at a time mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0): def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API""" """Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
@@ -51,6 +52,7 @@ def eleven_labs_speech(text, voice_index=0):
print("Response content:", response.content) print("Response content:", response.content)
return False return False
def gtts_speech(text): def gtts_speech(text):
tts = gtts.gTTS(text) tts = gtts.gTTS(text)
with mutex_lock: with mutex_lock:
@@ -58,6 +60,7 @@ def gtts_speech(text):
playsound("speech.mp3", True) playsound("speech.mp3", True)
os.remove("speech.mp3") os.remove("speech.mp3")
def macos_tts_speech(text, voice_index=0): def macos_tts_speech(text, voice_index=0):
if voice_index == 0: if voice_index == 0:
os.system(f'say "{text}"') os.system(f'say "{text}"')
@@ -67,6 +70,7 @@ def macos_tts_speech(text, voice_index=0):
else: else:
os.system(f'say -v Samantha "{text}"') os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0): def say_text(text, voice_index=0):
def speak(): def speak():

View File

@@ -1,6 +1,7 @@
import tiktoken import tiktoken
from typing import List, Dict from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int: def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
""" """
Returns the number of tokens used by a list of messages. Returns the number of tokens used by a list of messages.
@@ -41,6 +42,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens return num_tokens
def count_string_tokens(string: str, model_name: str) -> int: def count_string_tokens(string: str, model_name: str) -> int:
""" """
Returns the number of tokens in a text string. Returns the number of tokens in a text string.

View File

@@ -8,6 +8,7 @@ sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
from config import Config from config import Config
from memory.local import LocalCache from memory.local import LocalCache
class TestLocalCache(unittest.TestCase): class TestLocalCache(unittest.TestCase):
def random_string(self, length): def random_string(self, length):

View File

@@ -4,6 +4,7 @@ import sys
sys.path.append(os.path.abspath('../scripts')) sys.path.append(os.path.abspath('../scripts'))
from memory.local import LocalCache from memory.local import LocalCache
def MockConfig(): def MockConfig():
return type('MockConfig', (object,), { return type('MockConfig', (object,), {
'debug_mode': False, 'debug_mode': False,
@@ -12,6 +13,7 @@ def MockConfig():
'memory_index': 'auto-gpt', 'memory_index': 'auto-gpt',
}) })
class TestLocalCache(unittest.TestCase): class TestLocalCache(unittest.TestCase):
def setUp(self): def setUp(self):

View File

@@ -1,6 +1,7 @@
import unittest import unittest
from scripts.config import Config from scripts.config import Config
class TestConfig(unittest.TestCase): class TestConfig(unittest.TestCase):
def test_singleton(self): def test_singleton(self):

View File

@@ -3,6 +3,7 @@ import tests.context
from scripts.json_parser import fix_and_parse_json from scripts.json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase): class TestParseJson(unittest.TestCase):
def test_valid_json(self): def test_valid_json(self):
# Test that a valid JSON string is parsed correctly # Test that a valid JSON string is parsed correctly
@@ -52,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },
@@ -91,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },

View File

@@ -5,6 +5,7 @@ import sys
sys.path.append(os.path.abspath('../scripts')) sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json from json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase): class TestParseJson(unittest.TestCase):
def test_valid_json(self): def test_valid_json(self):
# Test that a valid JSON string is parsed correctly # Test that a valid JSON string is parsed correctly
@@ -52,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },
@@ -91,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },