mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
Code review changes
This commit is contained in:
@@ -2,7 +2,7 @@ FROM python:3.11
|
|||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY scripts/ /app
|
COPY scripts/ /app
|
||||||
COPY requirements.txt /app/requirements.txt
|
|
||||||
RUN pip install -r requirements.txt
|
RUN pip install -r requirements.txt
|
||||||
|
|
||||||
CMD ["python", "main.py"]
|
CMD ["python", "main.py"]
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ class AIConfig:
|
|||||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||||
SAVE_FILE = "last_run_ai_settings.yaml"
|
SAVE_FILE = "last_run_ai_settings.yaml"
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, config_file=SAVE_FILE):
|
def load(cls, config_file=SAVE_FILE):
|
||||||
"""Load variables from yaml file if it exists, otherwise use defaults."""
|
"""Load variables from yaml file if it exists, otherwise use defaults."""
|
||||||
@@ -29,7 +28,6 @@ class AIConfig:
|
|||||||
|
|
||||||
return cls(ai_name, ai_role, ai_goals)
|
return cls(ai_name, ai_role, ai_goals)
|
||||||
|
|
||||||
|
|
||||||
def save(self, config_file=SAVE_FILE):
|
def save(self, config_file=SAVE_FILE):
|
||||||
"""Save variables to yaml file."""
|
"""Save variables to yaml file."""
|
||||||
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
|
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
from json import dumps
|
import json
|
||||||
from config import Config
|
from config import Config
|
||||||
from call_ai_function import call_ai_function
|
from call_ai_function import call_ai_function
|
||||||
from json_parser import fix_and_parse_json
|
from json_parser import fix_and_parse_json
|
||||||
@@ -23,7 +23,7 @@ def improve_code(suggestions: List[str], code: str) -> str:
|
|||||||
function_string = (
|
function_string = (
|
||||||
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
||||||
)
|
)
|
||||||
args = [dumps(suggestions), code]
|
args = [json.dumps(suggestions), code]
|
||||||
description_string = """Improves the provided code based on the suggestions provided, making no other changes."""
|
description_string = """Improves the provided code based on the suggestions provided, making no other changes."""
|
||||||
|
|
||||||
result_string = call_ai_function(function_string, args, description_string)
|
result_string = call_ai_function(function_string, args, description_string)
|
||||||
@@ -36,7 +36,7 @@ def write_tests(code: str, focus: List[str]) -> str:
|
|||||||
function_string = (
|
function_string = (
|
||||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||||
)
|
)
|
||||||
args = [code, dumps(focus)]
|
args = [code, json.dumps(focus)]
|
||||||
description_string = """Generates test cases for the existing code, focusing on specific areas if required."""
|
description_string = """Generates test cases for the existing code, focusing on specific areas if required."""
|
||||||
|
|
||||||
result_string = call_ai_function(function_string, args, description_string)
|
result_string = call_ai_function(function_string, args, description_string)
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
from requests import get
|
import requests
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from config import Config
|
from config import Config
|
||||||
from llm_utils import create_chat_completion
|
from llm_utils import create_chat_completion
|
||||||
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
|
|
||||||
def scrape_text(url):
|
def scrape_text(url):
|
||||||
"""Scrape text from a webpage"""
|
"""Scrape text from a webpage"""
|
||||||
response = get(url)
|
response = requests.get(url)
|
||||||
|
|
||||||
# Check if the response contains an HTTP error
|
# Check if the response contains an HTTP error
|
||||||
if response.status_code >= 400:
|
if response.status_code >= 400:
|
||||||
@@ -30,26 +29,22 @@ def scrape_text(url):
|
|||||||
def extract_hyperlinks(soup):
|
def extract_hyperlinks(soup):
|
||||||
"""Extract hyperlinks from a BeautifulSoup object"""
|
"""Extract hyperlinks from a BeautifulSoup object"""
|
||||||
hyperlinks = []
|
hyperlinks = []
|
||||||
|
|
||||||
for link in soup.find_all('a', href=True):
|
for link in soup.find_all('a', href=True):
|
||||||
hyperlinks.append((link.text, link['href']))
|
hyperlinks.append((link.text, link['href']))
|
||||||
|
|
||||||
return hyperlinks
|
return hyperlinks
|
||||||
|
|
||||||
|
|
||||||
def format_hyperlinks(hyperlinks):
|
def format_hyperlinks(hyperlinks):
|
||||||
"""Format hyperlinks into a list of strings"""
|
"""Format hyperlinks into a list of strings"""
|
||||||
formatted_links = []
|
formatted_links = []
|
||||||
|
|
||||||
for link_text, link_url in hyperlinks:
|
for link_text, link_url in hyperlinks:
|
||||||
formatted_links.append(f"{link_text} ({link_url})")
|
formatted_links.append(f"{link_text} ({link_url})")
|
||||||
|
|
||||||
return formatted_links
|
return formatted_links
|
||||||
|
|
||||||
|
|
||||||
def scrape_links(url):
|
def scrape_links(url):
|
||||||
"""Scrape hyperlinks from a webpage"""
|
"""Scrape hyperlinks from a webpage"""
|
||||||
response = get(url)
|
response = requests.get(url)
|
||||||
|
|
||||||
# Check if the response contains an HTTP error
|
# Check if the response contains an HTTP error
|
||||||
if response.status_code >= 400:
|
if response.status_code >= 400:
|
||||||
@@ -72,7 +67,6 @@ def split_text(text, max_length=8192):
|
|||||||
current_chunk = []
|
current_chunk = []
|
||||||
|
|
||||||
for paragraph in paragraphs:
|
for paragraph in paragraphs:
|
||||||
|
|
||||||
if current_length + len(paragraph) + 1 <= max_length:
|
if current_length + len(paragraph) + 1 <= max_length:
|
||||||
current_chunk.append(paragraph)
|
current_chunk.append(paragraph)
|
||||||
current_length += len(paragraph) + 1
|
current_length += len(paragraph) + 1
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
from config import Config
|
from config import Config
|
||||||
from llm_utils import create_chat_completion
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
|
from llm_utils import create_chat_completion
|
||||||
# This is a magic function that can do anything with no-code. See
|
# This is a magic function that can do anything with no-code. See
|
||||||
# https://github.com/Torantulino/AI-Functions for more info.
|
# https://github.com/Torantulino/AI-Functions for more info.
|
||||||
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
|
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
from time import sleep
|
import time
|
||||||
import openai
|
import openai
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from config import Config
|
from config import Config
|
||||||
import token_counter
|
import token_counter
|
||||||
from llm_utils import create_chat_completion
|
from llm_utils import create_chat_completion
|
||||||
cfg = Config()
|
|
||||||
|
|
||||||
|
cfg = Config()
|
||||||
|
|
||||||
def create_chat_message(role, content):
|
def create_chat_message(role, content):
|
||||||
"""
|
"""
|
||||||
@@ -48,10 +48,8 @@ def chat_with_ai(
|
|||||||
"""
|
"""
|
||||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||||
# Reserve 1000 tokens for the response
|
# Reserve 1000 tokens for the response
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
print(f"Token limit: {token_limit}")
|
print(f"Token limit: {token_limit}")
|
||||||
|
|
||||||
send_token_limit = token_limit - 1000
|
send_token_limit = token_limit - 1000
|
||||||
|
|
||||||
current_context = [
|
current_context = [
|
||||||
@@ -73,7 +71,6 @@ def chat_with_ai(
|
|||||||
message_to_add = full_message_history[next_message_to_add_index]
|
message_to_add = full_message_history[next_message_to_add_index]
|
||||||
|
|
||||||
tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
|
tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
|
||||||
|
|
||||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -99,16 +96,13 @@ def chat_with_ai(
|
|||||||
print(f"Send Token Count: {current_tokens_used}")
|
print(f"Send Token Count: {current_tokens_used}")
|
||||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||||
print("------------ CONTEXT SENT TO AI ---------------")
|
print("------------ CONTEXT SENT TO AI ---------------")
|
||||||
|
|
||||||
for message in current_context:
|
for message in current_context:
|
||||||
# Skip printing the prompt
|
# Skip printing the prompt
|
||||||
|
|
||||||
if message["role"] == "system" and message["content"] == prompt:
|
if message["role"] == "system" and message["content"] == prompt:
|
||||||
continue
|
continue
|
||||||
print(
|
print(
|
||||||
f"{message['role'].capitalize()}: {message['content']}")
|
f"{message['role'].capitalize()}: {message['content']}")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
print("----------- END OF CONTEXT ----------------")
|
print("----------- END OF CONTEXT ----------------")
|
||||||
|
|
||||||
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
||||||
@@ -130,4 +124,4 @@ def chat_with_ai(
|
|||||||
except openai.error.RateLimitError:
|
except openai.error.RateLimitError:
|
||||||
# TODO: WHen we switch to langchain, this is built in
|
# TODO: WHen we switch to langchain, this is built in
|
||||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||||
sleep(10)
|
time.sleep(10)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import browse
|
import browse
|
||||||
import json
|
import json
|
||||||
import memory as mem
|
import memory as mem
|
||||||
from datetime import datetime
|
import datetime
|
||||||
import agent_manager as agents
|
import agent_manager as agents
|
||||||
import speak
|
import speak
|
||||||
from config import Config
|
from config import Config
|
||||||
@@ -110,7 +110,7 @@ def execute_command(command_name, arguments):
|
|||||||
def get_datetime():
|
def get_datetime():
|
||||||
"""Return the current date and time"""
|
"""Return the current date and time"""
|
||||||
return "Current date and time: " + \
|
return "Current date and time: " + \
|
||||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
|
|
||||||
def google_search(query, num_results=8):
|
def google_search(query, num_results=8):
|
||||||
|
|||||||
@@ -44,52 +44,42 @@ class Config(metaclass=Singleton):
|
|||||||
# Initialize the OpenAI API client
|
# Initialize the OpenAI API client
|
||||||
openai.api_key = self.openai_api_key
|
openai.api_key = self.openai_api_key
|
||||||
|
|
||||||
|
|
||||||
def set_continuous_mode(self, value: bool):
|
def set_continuous_mode(self, value: bool):
|
||||||
"""Set the continuous mode value."""
|
"""Set the continuous mode value."""
|
||||||
self.continuous_mode = value
|
self.continuous_mode = value
|
||||||
|
|
||||||
|
|
||||||
def set_speak_mode(self, value: bool):
|
def set_speak_mode(self, value: bool):
|
||||||
"""Set the speak mode value."""
|
"""Set the speak mode value."""
|
||||||
self.speak_mode = value
|
self.speak_mode = value
|
||||||
|
|
||||||
|
|
||||||
def set_fast_llm_model(self, value: str):
|
def set_fast_llm_model(self, value: str):
|
||||||
"""Set the fast LLM model value."""
|
"""Set the fast LLM model value."""
|
||||||
self.fast_llm_model = value
|
self.fast_llm_model = value
|
||||||
|
|
||||||
|
|
||||||
def set_smart_llm_model(self, value: str):
|
def set_smart_llm_model(self, value: str):
|
||||||
"""Set the smart LLM model value."""
|
"""Set the smart LLM model value."""
|
||||||
self.smart_llm_model = value
|
self.smart_llm_model = value
|
||||||
|
|
||||||
|
|
||||||
def set_fast_token_limit(self, value: int):
|
def set_fast_token_limit(self, value: int):
|
||||||
"""Set the fast token limit value."""
|
"""Set the fast token limit value."""
|
||||||
self.fast_token_limit = value
|
self.fast_token_limit = value
|
||||||
|
|
||||||
|
|
||||||
def set_smart_token_limit(self, value: int):
|
def set_smart_token_limit(self, value: int):
|
||||||
"""Set the smart token limit value."""
|
"""Set the smart token limit value."""
|
||||||
self.smart_token_limit = value
|
self.smart_token_limit = value
|
||||||
|
|
||||||
|
|
||||||
def set_openai_api_key(self, value: str):
|
def set_openai_api_key(self, value: str):
|
||||||
"""Set the OpenAI API key value."""
|
"""Set the OpenAI API key value."""
|
||||||
self.openai_api_key = value
|
self.openai_api_key = value
|
||||||
|
|
||||||
|
|
||||||
def set_elevenlabs_api_key(self, value: str):
|
def set_elevenlabs_api_key(self, value: str):
|
||||||
"""Set the ElevenLabs API key value."""
|
"""Set the ElevenLabs API key value."""
|
||||||
self.elevenlabs_api_key = value
|
self.elevenlabs_api_key = value
|
||||||
|
|
||||||
|
|
||||||
def set_google_api_key(self, value: str):
|
def set_google_api_key(self, value: str):
|
||||||
"""Set the Google API key value."""
|
"""Set the Google API key value."""
|
||||||
self.google_api_key = value
|
self.google_api_key = value
|
||||||
|
|
||||||
|
|
||||||
def set_custom_search_engine_id(self, value: str):
|
def set_custom_search_engine_id(self, value: str):
|
||||||
"""Set the custom search engine ID value."""
|
"""Set the custom search engine ID value."""
|
||||||
self.custom_search_engine_id = value
|
self.custom_search_engine_id = value
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
from os import path
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
SRC_DIR = Path(__file__).parent
|
SRC_DIR = Path(__file__).parent
|
||||||
|
|
||||||
@@ -6,7 +6,7 @@ def load_prompt():
|
|||||||
"""Load the prompt from data/prompt.txt"""
|
"""Load the prompt from data/prompt.txt"""
|
||||||
try:
|
try:
|
||||||
# get directory of this file:
|
# get directory of this file:
|
||||||
file_dir = Path(path.dirname(path.realpath(__file__)))
|
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
|
||||||
data_dir = file_dir / "data"
|
data_dir = file_dir / "data"
|
||||||
prompt_file = data_dir / "prompt.txt"
|
prompt_file = data_dir / "prompt.txt"
|
||||||
# Load the promt from data/prompt.txt
|
# Load the promt from data/prompt.txt
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import docker
|
import docker
|
||||||
from os import path
|
import os
|
||||||
|
|
||||||
|
|
||||||
def execute_python_file(file):
|
def execute_python_file(file):
|
||||||
@@ -11,9 +11,9 @@ def execute_python_file(file):
|
|||||||
if not file.endswith(".py"):
|
if not file.endswith(".py"):
|
||||||
return "Error: Invalid file type. Only .py files are allowed."
|
return "Error: Invalid file type. Only .py files are allowed."
|
||||||
|
|
||||||
file_path = path.join(workspace_folder, file)
|
file_path = os.path.join(workspace_folder, file)
|
||||||
|
|
||||||
if not path.isfile(file_path):
|
if not os.path.isfile(file_path):
|
||||||
return f"Error: File '{file}' does not exist."
|
return f"Error: File '{file}' does not exist."
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -36,10 +36,8 @@ def write_to_file(filename, text):
|
|||||||
try:
|
try:
|
||||||
filepath = safe_join(working_directory, filename)
|
filepath = safe_join(working_directory, filename)
|
||||||
directory = os.path.dirname(filepath)
|
directory = os.path.dirname(filepath)
|
||||||
|
|
||||||
if not os.path.exists(directory):
|
if not os.path.exists(directory):
|
||||||
os.makedirs(directory)
|
os.makedirs(directory)
|
||||||
|
|
||||||
with open(filepath, "w") as f:
|
with open(filepath, "w") as f:
|
||||||
f.write(text)
|
f.write(text)
|
||||||
return "File written to successfully."
|
return "File written to successfully."
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
from call_ai_function import call_ai_function
|
from call_ai_function import call_ai_function
|
||||||
from config import Config
|
from config import Config
|
||||||
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
|
def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
|
||||||
@@ -38,18 +37,15 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
|
|||||||
json_str = json_str[:last_brace_index+1]
|
json_str = json_str[:last_brace_index+1]
|
||||||
return json.loads(json_str)
|
return json.loads(json_str)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
||||||
if try_to_fix_with_gpt:
|
if try_to_fix_with_gpt:
|
||||||
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
|
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
|
||||||
# Now try to fix this up using the ai_functions
|
# Now try to fix this up using the ai_functions
|
||||||
ai_fixed_json = fix_json(json_str, json_schema, False)
|
ai_fixed_json = fix_json(json_str, json_schema, False)
|
||||||
|
|
||||||
if ai_fixed_json != "failed":
|
if ai_fixed_json != "failed":
|
||||||
return json.loads(ai_fixed_json)
|
return json.loads(ai_fixed_json)
|
||||||
else:
|
else:
|
||||||
print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
|
print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
|
||||||
return json_str
|
return json_str
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
@@ -63,11 +59,9 @@ def fix_json(json_str: str, schema: str, debug=False) -> str:
|
|||||||
# If it doesn't already start with a "`", add one:
|
# If it doesn't already start with a "`", add one:
|
||||||
if not json_str.startswith("`"):
|
if not json_str.startswith("`"):
|
||||||
json_str = "```json\n" + json_str + "\n```"
|
json_str = "```json\n" + json_str + "\n```"
|
||||||
|
|
||||||
result_string = call_ai_function(
|
result_string = call_ai_function(
|
||||||
function_string, args, description_string, model=cfg.fast_llm_model
|
function_string, args, description_string, model=cfg.fast_llm_model
|
||||||
)
|
)
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
print("------------ JSON FIX ATTEMPT ---------------")
|
print("------------ JSON FIX ATTEMPT ---------------")
|
||||||
print(f"Original JSON: {json_str}")
|
print(f"Original JSON: {json_str}")
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import openai
|
import openai
|
||||||
from config import Config
|
from config import Config
|
||||||
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
openai.api_key = cfg.openai_api_key
|
openai.api_key = cfg.openai_api_key
|
||||||
|
|||||||
@@ -27,24 +27,17 @@ def print_to_console(
|
|||||||
max_typing_speed=0.01):
|
max_typing_speed=0.01):
|
||||||
"""Prints text to the console with a typing effect"""
|
"""Prints text to the console with a typing effect"""
|
||||||
global cfg
|
global cfg
|
||||||
|
|
||||||
if speak_text and cfg.speak_mode:
|
if speak_text and cfg.speak_mode:
|
||||||
speak.say_text(f"{title}. {content}")
|
speak.say_text(f"{title}. {content}")
|
||||||
|
|
||||||
print(title_color + title + " " + Style.RESET_ALL, end="")
|
print(title_color + title + " " + Style.RESET_ALL, end="")
|
||||||
|
|
||||||
if content:
|
if content:
|
||||||
|
|
||||||
if isinstance(content, list):
|
if isinstance(content, list):
|
||||||
content = " ".join(content)
|
content = " ".join(content)
|
||||||
words = content.split()
|
words = content.split()
|
||||||
|
|
||||||
for i, word in enumerate(words):
|
for i, word in enumerate(words):
|
||||||
print(word, end="", flush=True)
|
print(word, end="", flush=True)
|
||||||
|
|
||||||
if i < len(words) - 1:
|
if i < len(words) - 1:
|
||||||
print(" ", end="", flush=True)
|
print(" ", end="", flush=True)
|
||||||
|
|
||||||
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
||||||
time.sleep(typing_speed)
|
time.sleep(typing_speed)
|
||||||
# type faster after each word
|
# type faster after each word
|
||||||
@@ -88,7 +81,6 @@ def print_assistant_thoughts(assistant_reply):
|
|||||||
if assistant_thoughts_plan:
|
if assistant_thoughts_plan:
|
||||||
print_to_console("PLAN:", Fore.YELLOW, "")
|
print_to_console("PLAN:", Fore.YELLOW, "")
|
||||||
# If it's a list, join it into a string
|
# If it's a list, join it into a string
|
||||||
|
|
||||||
if isinstance(assistant_thoughts_plan, list):
|
if isinstance(assistant_thoughts_plan, list):
|
||||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||||
elif isinstance(assistant_thoughts_plan, dict):
|
elif isinstance(assistant_thoughts_plan, dict):
|
||||||
@@ -96,7 +88,6 @@ def print_assistant_thoughts(assistant_reply):
|
|||||||
|
|
||||||
# Split the input_string using the newline character and dashes
|
# Split the input_string using the newline character and dashes
|
||||||
lines = assistant_thoughts_plan.split('\n')
|
lines = assistant_thoughts_plan.split('\n')
|
||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
line = line.lstrip("- ")
|
line = line.lstrip("- ")
|
||||||
print_to_console("- ", Fore.GREEN, line.strip())
|
print_to_console("- ", Fore.GREEN, line.strip())
|
||||||
@@ -131,13 +122,11 @@ def load_variables(config_file="config.yaml"):
|
|||||||
# Prompt the user for input if config file is missing or empty values
|
# Prompt the user for input if config file is missing or empty values
|
||||||
if not ai_name:
|
if not ai_name:
|
||||||
ai_name = input("Name your AI: ")
|
ai_name = input("Name your AI: ")
|
||||||
|
|
||||||
if ai_name == "":
|
if ai_name == "":
|
||||||
ai_name = "Entrepreneur-GPT"
|
ai_name = "Entrepreneur-GPT"
|
||||||
|
|
||||||
if not ai_role:
|
if not ai_role:
|
||||||
ai_role = input(f"{ai_name} is: ")
|
ai_role = input(f"{ai_name} is: ")
|
||||||
|
|
||||||
if ai_role == "":
|
if ai_role == "":
|
||||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||||
|
|
||||||
@@ -146,20 +135,16 @@ def load_variables(config_file="config.yaml"):
|
|||||||
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||||
print("Enter nothing to load defaults, enter nothing when finished.")
|
print("Enter nothing to load defaults, enter nothing when finished.")
|
||||||
ai_goals = []
|
ai_goals = []
|
||||||
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
ai_goal = input(f"Goal {i+1}: ")
|
ai_goal = input(f"Goal {i+1}: ")
|
||||||
|
|
||||||
if ai_goal == "":
|
if ai_goal == "":
|
||||||
break
|
break
|
||||||
ai_goals.append(ai_goal)
|
ai_goals.append(ai_goal)
|
||||||
|
|
||||||
if len(ai_goals) == 0:
|
if len(ai_goals) == 0:
|
||||||
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
|
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
|
||||||
|
|
||||||
# Save variables to yaml file
|
# Save variables to yaml file
|
||||||
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
|
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
|
||||||
|
|
||||||
with open(config_file, "w") as file:
|
with open(config_file, "w") as file:
|
||||||
documents = yaml.dump(config, file)
|
documents = yaml.dump(config, file)
|
||||||
|
|
||||||
@@ -168,7 +153,6 @@ def load_variables(config_file="config.yaml"):
|
|||||||
|
|
||||||
# Construct full prompt
|
# Construct full prompt
|
||||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||||
|
|
||||||
for i, goal in enumerate(ai_goals):
|
for i, goal in enumerate(ai_goals):
|
||||||
full_prompt += f"{i+1}. {goal}\n"
|
full_prompt += f"{i+1}. {goal}\n"
|
||||||
|
|
||||||
@@ -179,7 +163,6 @@ def load_variables(config_file="config.yaml"):
|
|||||||
def construct_prompt():
|
def construct_prompt():
|
||||||
"""Construct the prompt for the AI to respond to"""
|
"""Construct the prompt for the AI to respond to"""
|
||||||
config = AIConfig.load()
|
config = AIConfig.load()
|
||||||
|
|
||||||
if config.ai_name:
|
if config.ai_name:
|
||||||
print_to_console(
|
print_to_console(
|
||||||
f"Welcome back! ",
|
f"Welcome back! ",
|
||||||
@@ -191,7 +174,6 @@ def construct_prompt():
|
|||||||
Role: {config.ai_role}
|
Role: {config.ai_role}
|
||||||
Goals: {config.ai_goals}
|
Goals: {config.ai_goals}
|
||||||
Continue (y/n): """)
|
Continue (y/n): """)
|
||||||
|
|
||||||
if should_continue.lower() == "n":
|
if should_continue.lower() == "n":
|
||||||
config = AIConfig()
|
config = AIConfig()
|
||||||
|
|
||||||
@@ -222,9 +204,7 @@ def prompt_user():
|
|||||||
"Name your AI: ",
|
"Name your AI: ",
|
||||||
Fore.GREEN,
|
Fore.GREEN,
|
||||||
"For example, 'Entrepreneur-GPT'")
|
"For example, 'Entrepreneur-GPT'")
|
||||||
|
|
||||||
ai_name = input("AI Name: ")
|
ai_name = input("AI Name: ")
|
||||||
|
|
||||||
if ai_name == "":
|
if ai_name == "":
|
||||||
ai_name = "Entrepreneur-GPT"
|
ai_name = "Entrepreneur-GPT"
|
||||||
|
|
||||||
@@ -240,7 +220,6 @@ def prompt_user():
|
|||||||
Fore.GREEN,
|
Fore.GREEN,
|
||||||
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
|
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
|
||||||
ai_role = input(f"{ai_name} is: ")
|
ai_role = input(f"{ai_name} is: ")
|
||||||
|
|
||||||
if ai_role == "":
|
if ai_role == "":
|
||||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||||
|
|
||||||
@@ -249,18 +228,13 @@ def prompt_user():
|
|||||||
"Enter up to 5 goals for your AI: ",
|
"Enter up to 5 goals for your AI: ",
|
||||||
Fore.GREEN,
|
Fore.GREEN,
|
||||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||||
|
|
||||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||||
ai_goals = []
|
ai_goals = []
|
||||||
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
ai_goal = input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
ai_goal = input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
||||||
|
|
||||||
if ai_goal == "":
|
if ai_goal == "":
|
||||||
break
|
break
|
||||||
|
|
||||||
ai_goals.append(ai_goal)
|
ai_goals.append(ai_goal)
|
||||||
|
|
||||||
if len(ai_goals) == 0:
|
if len(ai_goals) == 0:
|
||||||
ai_goals = ["Increase net worth", "Grow Twitter Account",
|
ai_goals = ["Increase net worth", "Grow Twitter Account",
|
||||||
"Develop and manage multiple businesses autonomously"]
|
"Develop and manage multiple businesses autonomously"]
|
||||||
@@ -268,7 +242,6 @@ def prompt_user():
|
|||||||
config = AIConfig(ai_name, ai_role, ai_goals)
|
config = AIConfig(ai_name, ai_role, ai_goals)
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
"""Parses the arguments passed to the script"""
|
"""Parses the arguments passed to the script"""
|
||||||
global cfg
|
global cfg
|
||||||
@@ -347,7 +320,6 @@ while True:
|
|||||||
flush=True)
|
flush=True)
|
||||||
while True:
|
while True:
|
||||||
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||||
|
|
||||||
if console_input.lower() == "y":
|
if console_input.lower() == "y":
|
||||||
user_input = "GENERATE NEXT COMMAND JSON"
|
user_input = "GENERATE NEXT COMMAND JSON"
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ import os
|
|||||||
from playsound import playsound
|
from playsound import playsound
|
||||||
import requests
|
import requests
|
||||||
from config import Config
|
from config import Config
|
||||||
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
import gtts
|
import gtts
|
||||||
|
|
||||||
@@ -20,7 +19,8 @@ def eleven_labs_speech(text, voice_index=0):
|
|||||||
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
||||||
voice_id=voices[voice_index])
|
voice_id=voices[voice_index])
|
||||||
formatted_message = {"text": text}
|
formatted_message = {"text": text}
|
||||||
response = requests.post(tts_url, headers=tts_headers, json=formatted_message)
|
response = requests.post(
|
||||||
|
tts_url, headers=tts_headers, json=formatted_message)
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
with open("speech.mpeg", "wb") as f:
|
with open("speech.mpeg", "wb") as f:
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ class Spinner:
|
|||||||
self.running = False
|
self.running = False
|
||||||
self.spinner_thread = None
|
self.spinner_thread = None
|
||||||
|
|
||||||
|
|
||||||
def spin(self):
|
def spin(self):
|
||||||
"""Spin the spinner"""
|
"""Spin the spinner"""
|
||||||
while self.running:
|
while self.running:
|
||||||
@@ -23,14 +22,12 @@ class Spinner:
|
|||||||
time.sleep(self.delay)
|
time.sleep(self.delay)
|
||||||
sys.stdout.write('\b' * (len(self.message) + 2))
|
sys.stdout.write('\b' * (len(self.message) + 2))
|
||||||
|
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
"""Start the spinner"""
|
"""Start the spinner"""
|
||||||
self.running = True
|
self.running = True
|
||||||
self.spinner_thread = threading.Thread(target=self.spin)
|
self.spinner_thread = threading.Thread(target=self.spin)
|
||||||
self.spinner_thread.start()
|
self.spinner_thread.start()
|
||||||
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||||
"""Stop the spinner"""
|
"""Stop the spinner"""
|
||||||
self.running = False
|
self.running = False
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import tiktoken
|
import tiktoken
|
||||||
from typing import List, Dict
|
from typing import List, Dict
|
||||||
|
|
||||||
|
|
||||||
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
|
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
|
||||||
"""
|
"""
|
||||||
Returns the number of tokens used by a list of messages.
|
Returns the number of tokens used by a list of messages.
|
||||||
@@ -18,7 +17,6 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
print("Warning: model not found. Using cl100k_base encoding.")
|
print("Warning: model not found. Using cl100k_base encoding.")
|
||||||
encoding = tiktoken.get_encoding("cl100k_base")
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
|
|
||||||
if model == "gpt-3.5-turbo":
|
if model == "gpt-3.5-turbo":
|
||||||
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
|
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||||
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
||||||
@@ -34,19 +32,15 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
|
|||||||
else:
|
else:
|
||||||
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
||||||
num_tokens = 0
|
num_tokens = 0
|
||||||
|
|
||||||
for message in messages:
|
for message in messages:
|
||||||
num_tokens += tokens_per_message
|
num_tokens += tokens_per_message
|
||||||
|
|
||||||
for key, value in message.items():
|
for key, value in message.items():
|
||||||
num_tokens += len(encoding.encode(value))
|
num_tokens += len(encoding.encode(value))
|
||||||
|
|
||||||
if key == "name":
|
if key == "name":
|
||||||
num_tokens += tokens_per_name
|
num_tokens += tokens_per_name
|
||||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||||
return num_tokens
|
return num_tokens
|
||||||
|
|
||||||
|
|
||||||
def count_string_tokens(string: str, model_name: str) -> int:
|
def count_string_tokens(string: str, model_name: str) -> int:
|
||||||
"""
|
"""
|
||||||
Returns the number of tokens in a text string.
|
Returns the number of tokens in a text string.
|
||||||
|
|||||||
Reference in New Issue
Block a user