mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
Make compatible with gpt-3.5
I made the json parsing more forgivable. I improved the prompt, using things I learned from: Koobah/Auto-GPT
This commit is contained in:
122
scripts/main.py
122
scripts/main.py
@@ -11,16 +11,15 @@ import speak
|
||||
from enum import Enum, auto
|
||||
import sys
|
||||
from config import Config
|
||||
|
||||
from json_parser import fix_and_parse_json
|
||||
from ai_config import AIConfig
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
class Argument(Enum):
|
||||
CONTINUOUS_MODE = "continuous-mode"
|
||||
SPEAK_MODE = "speak-mode"
|
||||
|
||||
# normally 6000 for gpt-4
|
||||
TOKEN_LIMIT=4000
|
||||
|
||||
|
||||
def print_to_console(
|
||||
title,
|
||||
title_color,
|
||||
@@ -53,7 +52,7 @@ def print_assistant_thoughts(assistant_reply):
|
||||
global cfg
|
||||
try:
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = json.loads(assistant_reply)
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
|
||||
assistant_thoughts = assistant_reply_json.get("thoughts")
|
||||
if assistant_thoughts:
|
||||
@@ -80,8 +79,13 @@ def print_assistant_thoughts(assistant_reply):
|
||||
if assistant_thoughts_plan:
|
||||
print_to_console("PLAN:", Fore.YELLOW, "")
|
||||
if assistant_thoughts_plan:
|
||||
|
||||
# Split the input_string using the newline character and dash
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
# Split the input_string using the newline character and dash
|
||||
|
||||
lines = assistant_thoughts_plan.split('\n')
|
||||
|
||||
# Iterate through the lines and print each one with a bullet
|
||||
@@ -103,11 +107,89 @@ def print_assistant_thoughts(assistant_reply):
|
||||
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
print_to_console("Error: \n", Fore.RED, str(e))
|
||||
call_stack = traceback.format_exc()
|
||||
print_to_console("Error: \n", Fore.RED, call_stack)
|
||||
|
||||
def load_variables(config_file="config.yaml"):
|
||||
# Load variables from yaml file if it exists
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config = yaml.load(file, Loader=yaml.FullLoader)
|
||||
ai_name = config.get("ai_name")
|
||||
ai_role = config.get("ai_role")
|
||||
ai_goals = config.get("ai_goals")
|
||||
except FileNotFoundError:
|
||||
ai_name = ""
|
||||
ai_role = ""
|
||||
ai_goals = []
|
||||
|
||||
# Prompt the user for input if config file is missing or empty values
|
||||
if not ai_name:
|
||||
ai_name = input("Name your AI: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
if not ai_role:
|
||||
ai_role = input(f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||
|
||||
if not ai_goals:
|
||||
print("Enter up to 5 goals for your AI: ")
|
||||
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||
print("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = input(f"Goal {i+1}: ")
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if len(ai_goals) == 0:
|
||||
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
|
||||
|
||||
# Save variables to yaml file
|
||||
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
|
||||
with open(config_file, "w") as file:
|
||||
documents = yaml.dump(config, file)
|
||||
|
||||
prompt = data.load_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{prompt}"
|
||||
return full_prompt
|
||||
|
||||
|
||||
def construct_prompt():
|
||||
config = AIConfig.load()
|
||||
if config.ai_name:
|
||||
print_to_console(
|
||||
f"Welcome back, {config.ai_name}!",
|
||||
Fore.GREEN,
|
||||
"Let's continue our journey.",
|
||||
speak_text=True)
|
||||
should_continue = input(f"Continue with the last settings? (Settings: {config.ai_name}, {config.ai_role}, {config.ai_goals}) (Y/n): ")
|
||||
if should_continue.lower() == "n":
|
||||
config = AIConfig()
|
||||
|
||||
if not config.ai_name:
|
||||
config = prompt_user()
|
||||
config.save()
|
||||
|
||||
# Get rid of this global:
|
||||
global ai_name
|
||||
ai_name = config.ai_name
|
||||
|
||||
full_prompt = config.construct_full_prompt()
|
||||
return full_prompt
|
||||
|
||||
|
||||
def prompt_user():
|
||||
ai_name = ""
|
||||
# Construct the prompt
|
||||
print_to_console(
|
||||
"Welcome to Auto-GPT! ",
|
||||
@@ -155,19 +237,8 @@ def construct_prompt():
|
||||
ai_goals = ["Increase net worth", "Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously"]
|
||||
|
||||
prompt = data.load_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{prompt}"
|
||||
return full_prompt
|
||||
|
||||
# Check if the python script was executed with arguments, get those arguments
|
||||
|
||||
config = AIConfig(ai_name, ai_role, ai_goals)
|
||||
return config
|
||||
|
||||
def parse_arguments():
|
||||
global cfg
|
||||
@@ -185,6 +256,8 @@ def parse_arguments():
|
||||
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_speak_mode(True)
|
||||
|
||||
# TODO: Better argument parsing:
|
||||
# TODO: fill in llm values here
|
||||
|
||||
cfg = Config()
|
||||
|
||||
@@ -194,9 +267,10 @@ prompt = construct_prompt()
|
||||
print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
token_limit = TOKEN_LIMIT # The maximum number of tokens allowed in the API call
|
||||
token_limit = cfg.thinking_token_limit # The maximum number of tokens allowed in the API call
|
||||
result = None
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
# Make a constant:
|
||||
user_input = "Determine which next command to use, and respond using the format specified above:"
|
||||
|
||||
# Interaction Loop
|
||||
while True:
|
||||
|
||||
Reference in New Issue
Block a user