Merge branch 'Torantulino:master' into kinance-resolve-debug-config-conflict

This commit is contained in:
kinance
2023-04-11 08:49:43 +09:00
committed by GitHub
11 changed files with 77 additions and 42 deletions

View File

@@ -34,7 +34,7 @@ class AIConfig:
@classmethod
def load(cls: object, config_file: str=SAVE_FILE) -> object:
"""
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists,
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists,
else returns class with no parameters.
Parameters:
@@ -42,7 +42,7 @@ class AIConfig:
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
Returns:
cls (object): A instance of given cls object
cls (object): A instance of given cls object
"""
try:
@@ -61,11 +61,11 @@ class AIConfig:
"""
Saves the class parameters to the specified file yaml file path as a yaml file.
Parameters:
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
Returns:
None
None
"""
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
@@ -76,7 +76,7 @@ class AIConfig:
"""
Returns a prompt to the user with the class information in an organized fashion.
Parameters:
Parameters:
None
Returns:

View File

@@ -27,7 +27,7 @@ def evaluate_code(code: str) -> List[str]:
def improve_code(suggestions: List[str], code: str) -> str:
"""
A function that takes in code and suggestions and returns a response from create chat completion api call.
A function that takes in code and suggestions and returns a response from create chat completion api call.
Parameters:
suggestions (List): A list of suggestions around what needs to be improved.

View File

@@ -5,12 +5,21 @@ from llm_utils import create_chat_completion
cfg = Config()
# Define and check for local file address prefixes
def check_local_file_access(url):
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
return any(url.startswith(prefix) for prefix in local_prefixes)
def scrape_text(url):
"""Scrape text from a webpage"""
# Most basic check if the URL is valid:
if not url.startswith('http'):
return "Error: Invalid URL"
# Restrict access to local files
if check_local_file_access(url):
return "Error: Access to local files is restricted"
try:
response = requests.get(url, headers=cfg.user_agent_header)
except requests.exceptions.RequestException as e:
@@ -126,4 +135,4 @@ def summarize_text(text, question):
max_tokens=300,
)
return final_summary
return final_summary

View File

@@ -42,9 +42,6 @@ def get_command(response):
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
if not arguments:
arguments = {}
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"

View File

@@ -37,7 +37,7 @@ class Config(metaclass=Singleton):
self.continuous_mode = False
self.speak_mode = False
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
@@ -46,15 +46,18 @@ class Config(metaclass=Singleton):
self.use_azure = False
self.use_azure = os.getenv("USE_AZURE") == 'True'
if self.use_azure:
self.openai_api_base = os.getenv("OPENAI_API_BASE")
self.openai_api_version = os.getenv("OPENAI_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_DEPLOYMENT_ID")
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE")
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID")
openai.api_type = "azure"
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
self.google_api_key = os.getenv("GOOGLE_API_KEY")
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")

View File

@@ -1,6 +1,7 @@
import json
import random
import commands as cmd
import utils
from memory import get_memory
import data
import chat
@@ -16,9 +17,18 @@ from ai_config import AIConfig
import traceback
import yaml
import argparse
import logging
cfg = Config()
def configure_logging():
logging.basicConfig(filename='log.txt',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
return logging.getLogger('AutoGPT')
def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key:
@@ -29,7 +39,6 @@ def check_openai_api_key():
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)
def print_to_console(
title,
title_color,
@@ -39,10 +48,12 @@ def print_to_console(
max_typing_speed=0.01):
"""Prints text to the console with a typing effect"""
global cfg
global logger
if speak_text and cfg.speak_mode:
speak.say_text(f"{title}. {content}")
print(title_color + title + " " + Style.RESET_ALL, end="")
if content:
logger.info(title + ': ' + content)
if isinstance(content, list):
content = " ".join(content)
words = content.split()
@@ -133,12 +144,12 @@ def load_variables(config_file="config.yaml"):
# Prompt the user for input if config file is missing or empty values
if not ai_name:
ai_name = input("Name your AI: ")
ai_name = utils.clean_input("Name your AI: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
if not ai_role:
ai_role = input(f"{ai_name} is: ")
ai_role = utils.clean_input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
@@ -148,7 +159,7 @@ def load_variables(config_file="config.yaml"):
print("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = input(f"Goal {i+1}: ")
ai_goal = utils.clean_input(f"Goal {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
@@ -181,7 +192,7 @@ def construct_prompt():
Fore.GREEN,
f"Would you like me to return to being {config.ai_name}?",
speak_text=True)
should_continue = input(f"""Continue with the last settings?
should_continue = utils.clean_input(f"""Continue with the last settings?
Name: {config.ai_name}
Role: {config.ai_role}
Goals: {config.ai_goals}
@@ -216,7 +227,7 @@ def prompt_user():
"Name your AI: ",
Fore.GREEN,
"For example, 'Entrepreneur-GPT'")
ai_name = input("AI Name: ")
ai_name = utils.clean_input("AI Name: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
@@ -231,7 +242,7 @@ def prompt_user():
"Describe your AI's role: ",
Fore.GREEN,
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
ai_role = input(f"{ai_name} is: ")
ai_role = utils.clean_input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
@@ -243,7 +254,7 @@ def prompt_user():
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
ai_goals = []
for i in range(5):
ai_goal = input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
@@ -281,7 +292,7 @@ def parse_arguments():
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
cfg.set_debug_mode(True)
if args.gpt3only:
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
@@ -292,6 +303,7 @@ def parse_arguments():
# TODO: fill in llm values here
check_openai_api_key()
cfg = Config()
logger = configure_logging()
parse_arguments()
ai_name = ""
prompt = construct_prompt()
@@ -341,7 +353,7 @@ while True:
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break

View File

@@ -39,9 +39,15 @@ def gtts_speech(text):
playsound("speech.mp3")
os.remove("speech.mp3")
def macos_tts_speech(text):
os.system(f'say "{text}"')
def say_text(text, voice_index=0):
if not cfg.elevenlabs_api_key:
gtts_speech(text)
if cfg.use_mac_os_tts == 'True':
macos_tts_speech(text)
else:
gtts_speech(text)
else:
success = eleven_labs_speech(text, voice_index)
if not success:

8
scripts/utils.py Normal file
View File

@@ -0,0 +1,8 @@
def clean_input(prompt: str=''):
try:
return input(prompt)
except KeyboardInterrupt:
print("You interrupted Auto-GPT")
print("Quitting...")
exit(0)