diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b90b55d..366aaf67 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,11 +32,11 @@ jobs: - name: Lint with flake8 continue-on-error: false - run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302 + run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 - name: Run unittest tests with coverage run: | - coverage run --source=scripts -m unittest discover tests + coverage run --source=autogpt -m unittest discover tests - name: Generate coverage report run: | diff --git a/.gitignore b/.gitignore index b0be8967..5a2ce371 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ -scripts/keys.py -scripts/*json -scripts/node_modules/ -scripts/__pycache__/keys.cpython-310.pyc +autogpt/keys.py +autogpt/*json +autogpt/node_modules/ +autogpt/__pycache__/keys.cpython-310.pyc package-lock.json *.pyc auto_gpt_workspace/* diff --git a/Dockerfile b/Dockerfile index e776664e..3ae1ac12 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ COPY --chown=appuser:appuser requirements.txt . RUN pip install --no-cache-dir --user -r requirements.txt # Copy the application files -COPY --chown=appuser:appuser scripts/ . +COPY --chown=appuser:appuser autogpt/ . # Set the entrypoint -ENTRYPOINT ["python", "main.py"] \ No newline at end of file +ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/README.md b/README.md index 27150fa2..fcf0cc3f 100644 --- a/README.md +++ b/README.md @@ -119,11 +119,11 @@ pip install -r requirements.txt ## 🔧 Usage -1. Run the `main.py` Python script in your terminal: +1. Run the `autogpt` Python module in your terminal: _(Type this into your CMD window)_ ``` -python scripts/main.py +python -m autogpt ``` 2. After each of action, enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter additional feedback for the AI. @@ -136,7 +136,21 @@ You will find activity and error logs in the folder `./output/logs` To output debug logs: ``` -python scripts/main.py --debug +python -m autogpt --debug +``` + +### Docker + +You can also build this into a docker image and run it: + +``` +docker build -t autogpt . +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt +``` + +You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: +``` +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt --gpt3only --continuous ``` ### Command Line Arguments Here are some common arguments you can use when running Auto-GPT: @@ -152,7 +166,7 @@ Here are some common arguments you can use when running Auto-GPT: Use this to use TTS for Auto-GPT ``` -python scripts/main.py --speak +python -m autogpt --speak ``` ## 🔍 Google API Keys Configuration @@ -328,10 +342,10 @@ Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk. -1. Run the `main.py` Python script in your terminal: +1. Run the `autogpt` python module in your terminal: ``` -python scripts/main.py --continuous +python -m autogpt --speak --continuous ``` @@ -342,7 +356,7 @@ python scripts/main.py --continuous If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! ``` -python scripts/main.py --gpt3only +python -m autogpt --speak --gpt3only ``` It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. @@ -415,8 +429,8 @@ This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We To run the linter, run the following command: ``` -flake8 scripts/ tests/ +flake8 autogpt/ tests/ # Or, if you want to run flake8 with the same configuration as the CI: -flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302 +flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 ``` diff --git a/scripts/__init__.py b/autogpt/__init__.py similarity index 100% rename from scripts/__init__.py rename to autogpt/__init__.py diff --git a/autogpt/__main__.py b/autogpt/__main__.py new file mode 100644 index 00000000..7f59266c --- /dev/null +++ b/autogpt/__main__.py @@ -0,0 +1,465 @@ +import json +import random +from autogpt import commands as cmd +from autogpt import utils +from autogpt.memory import get_memory, get_supported_memory_backends +from autogpt import chat +from colorama import Fore, Style +from autogpt.spinner import Spinner +import time +from autogpt import speak +from autogpt.config import Config +from autogpt.json_parser import fix_and_parse_json +from autogpt.ai_config import AIConfig +import traceback +import yaml +import argparse +from autogpt.logger import logger +import logging +from autogpt.prompt import get_prompt + +cfg = Config() + + +def check_openai_api_key(): + """Check if the OpenAI API key is set in config.py or as an environment variable.""" + if not cfg.openai_api_key: + print( + Fore.RED + + "Please set your OpenAI API key in .env or as an environment variable." + ) + print("You can get your key from https://beta.openai.com/account/api-keys") + exit(1) + + +def attempt_to_fix_json_by_finding_outermost_brackets(json_string): + if cfg.speak_mode and cfg.debug_mode: + speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") + logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") + + try: + # Use regex to search for JSON objects + import regex + json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") + json_match = json_pattern.search(json_string) + + if json_match: + # Extract the valid JSON object from the string + json_string = json_match.group(0) + logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN) + if cfg.speak_mode and cfg.debug_mode: + speak.say_text("Apparently json was fixed.") + else: + raise ValueError("No valid JSON object found") + + except (json.JSONDecodeError, ValueError) as e: + if cfg.speak_mode: + speak.say_text("Didn't work. I will have to ignore this response then.") + logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") + json_string = {} + + return json_string + + +def print_assistant_thoughts(assistant_reply): + """Prints the assistant's thoughts to the console""" + global ai_name + global cfg + try: + try: + # Parse and print Assistant response + assistant_reply_json = fix_and_parse_json(assistant_reply) + except json.JSONDecodeError as e: + logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) + assistant_reply_json = fix_and_parse_json(assistant_reply_json) + + # Check if assistant_reply_json is a string and attempt to parse it into a JSON object + if isinstance(assistant_reply_json, str): + try: + assistant_reply_json = json.loads(assistant_reply_json) + except json.JSONDecodeError as e: + logger.error("Error: Invalid JSON\n", assistant_reply) + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json) + + assistant_thoughts_reasoning = None + assistant_thoughts_plan = None + assistant_thoughts_speak = None + assistant_thoughts_criticism = None + assistant_thoughts = assistant_reply_json.get("thoughts", {}) + assistant_thoughts_text = assistant_thoughts.get("text") + + if assistant_thoughts: + assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") + assistant_thoughts_plan = assistant_thoughts.get("plan") + assistant_thoughts_criticism = assistant_thoughts.get("criticism") + assistant_thoughts_speak = assistant_thoughts.get("speak") + + logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) + logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) + + if assistant_thoughts_plan: + logger.typewriter_log("PLAN:", Fore.YELLOW, "") + # If it's a list, join it into a string + if isinstance(assistant_thoughts_plan, list): + assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) + elif isinstance(assistant_thoughts_plan, dict): + assistant_thoughts_plan = str(assistant_thoughts_plan) + + # Split the input_string using the newline character and dashes + lines = assistant_thoughts_plan.split('\n') + for line in lines: + line = line.lstrip("- ") + logger.typewriter_log("- ", Fore.GREEN, line.strip()) + + logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) + # Speak the assistant's thoughts + if cfg.speak_mode and assistant_thoughts_speak: + speak.say_text(assistant_thoughts_speak) + + return assistant_reply_json + except json.decoder.JSONDecodeError as e: + logger.error("Error: Invalid JSON\n", assistant_reply) + if cfg.speak_mode: + speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.") + + # All other errors, return "Error: + error message" + except Exception as e: + call_stack = traceback.format_exc() + logger.error("Error: \n", call_stack) + + +def construct_prompt(): + """Construct the prompt for the AI to respond to""" + config = AIConfig.load(cfg.ai_settings_file) + if cfg.skip_reprompt and config.ai_name: + logger.typewriter_log("Name :", Fore.GREEN, config.ai_name) + logger.typewriter_log("Role :", Fore.GREEN, config.ai_role) + logger.typewriter_log("Goals:", Fore.GREEN, config.ai_goals) + elif config.ai_name: + logger.typewriter_log( + f"Welcome back! ", + Fore.GREEN, + f"Would you like me to return to being {config.ai_name}?", + speak_text=True) + should_continue = utils.clean_input(f"""Continue with the last settings? +Name: {config.ai_name} +Role: {config.ai_role} +Goals: {config.ai_goals} +Continue (y/n): """) + if should_continue.lower() == "n": + config = AIConfig() + + if not config.ai_name: + config = prompt_user() + config.save() + + # Get rid of this global: + global ai_name + ai_name = config.ai_name + + full_prompt = config.construct_full_prompt() + return full_prompt + + +def prompt_user(): + """Prompt the user for input""" + ai_name = "" + # Construct the prompt + logger.typewriter_log( + "Welcome to Auto-GPT! ", + Fore.GREEN, + "Enter the name of your AI and its role below. Entering nothing will load defaults.", + speak_text=True) + + # Get AI Name from User + logger.typewriter_log( + "Name your AI: ", + Fore.GREEN, + "For example, 'Entrepreneur-GPT'") + ai_name = utils.clean_input("AI Name: ") + if ai_name == "": + ai_name = "Entrepreneur-GPT" + + logger.typewriter_log( + f"{ai_name} here!", + Fore.LIGHTBLUE_EX, + "I am at your service.", + speak_text=True) + + # Get AI Role from User + logger.typewriter_log( + "Describe your AI's role: ", + Fore.GREEN, + "For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'") + ai_role = utils.clean_input(f"{ai_name} is: ") + if ai_role == "": + ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth." + + # Enter up to 5 goals for the AI + logger.typewriter_log( + "Enter up to 5 goals for your AI: ", + Fore.GREEN, + "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'") + print("Enter nothing to load defaults, enter nothing when finished.", flush=True) + ai_goals = [] + for i in range(5): + ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ") + if ai_goal == "": + break + ai_goals.append(ai_goal) + if len(ai_goals) == 0: + ai_goals = ["Increase net worth", "Grow Twitter Account", + "Develop and manage multiple businesses autonomously"] + + config = AIConfig(ai_name, ai_role, ai_goals) + return config + + +def parse_arguments(): + """Parses the arguments passed to the script""" + global cfg + cfg.set_debug_mode(False) + cfg.set_continuous_mode(False) + cfg.set_speak_mode(False) + + parser = argparse.ArgumentParser(description='Process arguments.') + parser.add_argument('--continuous', '-c', action='store_true', help='Enable Continuous Mode') + parser.add_argument('--continuous-limit', '-l', type=int, dest="continuous_limit", help='Defines the number of times to run in continuous mode') + parser.add_argument('--speak', action='store_true', help='Enable Speak Mode') + parser.add_argument('--debug', action='store_true', help='Enable Debug Mode') + parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode') + parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode') + parser.add_argument('--use-memory', '-m', dest="memory_type", help='Defines which Memory backend to use') + parser.add_argument('--skip-reprompt', '-y', dest='skip_reprompt', action='store_true', help='Skips the re-prompting messages at the beginning of the script') + parser.add_argument('--ai-settings', '-C', dest='ai_settings_file', help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.") + args = parser.parse_args() + + if args.debug: + logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") + cfg.set_debug_mode(True) + + if args.continuous: + logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") + logger.typewriter_log( + "WARNING: ", + Fore.RED, + "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.") + cfg.set_continuous_mode(True) + + if args.continuous_limit: + logger.typewriter_log( + "Continuous Limit: ", + Fore.GREEN, + f"{args.continuous_limit}") + cfg.set_continuous_limit(args.continuous_limit) + + # Check if continuous limit is used without continuous mode + if args.continuous_limit and not args.continuous: + parser.error("--continuous-limit can only be used with --continuous") + + if args.speak: + logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") + cfg.set_speak_mode(True) + + if args.gpt3only: + logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") + cfg.set_smart_llm_model(cfg.fast_llm_model) + + if args.gpt4only: + logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") + cfg.set_fast_llm_model(cfg.smart_llm_model) + + if args.memory_type: + supported_memory = get_supported_memory_backends() + chosen = args.memory_type + if not chosen in supported_memory: + logger.typewriter_log("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}') + logger.typewriter_log(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend) + else: + cfg.memory_backend = chosen + + if args.skip_reprompt: + logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED") + cfg.skip_reprompt = True + + if args.ai_settings_file: + file = args.ai_settings_file + + # Validate file + (validated, message) = utils.validate_yaml_file(file) + if not validated: + logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message) + logger.double_check() + exit(1) + + logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file) + cfg.ai_settings_file = file + cfg.skip_reprompt = True + + +def main(): + global ai_name, memory + # TODO: fill in llm values here + check_openai_api_key() + parse_arguments() + logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) + ai_name = "" + prompt = construct_prompt() + # print(prompt) + # Initialize variables + full_message_history = [] + result = None + next_action_count = 0 + # Make a constant: + user_input = "Determine which next command to use, and respond using the format specified above:" + # Initialize memory and make sure it is empty. + # this is particularly important for indexing and referencing pinecone memory + memory = get_memory(cfg, init=True) + print('Using memory of type: ' + memory.__class__.__name__) + agent = Agent( + ai_name=ai_name, + memory=memory, + full_message_history=full_message_history, + next_action_count=next_action_count, + prompt=prompt, + user_input=user_input + ) + agent.start_interaction_loop() + + +class Agent: + """Agent class for interacting with Auto-GPT. + + Attributes: + ai_name: The name of the agent. + memory: The memory object to use. + full_message_history: The full message history. + next_action_count: The number of actions to execute. + prompt: The prompt to use. + user_input: The user input. + + """ + def __init__(self, + ai_name, + memory, + full_message_history, + next_action_count, + prompt, + user_input): + self.ai_name = ai_name + self.memory = memory + self.full_message_history = full_message_history + self.next_action_count = next_action_count + self.prompt = prompt + self.user_input = user_input + + def start_interaction_loop(self): + # Interaction Loop + loop_count = 0 + while True: + # Discontinue if continuous limit is reached + loop_count += 1 + if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit: + logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}") + break + + # Send message to AI, get response + with Spinner("Thinking... "): + assistant_reply = chat.chat_with_ai( + self.prompt, + self.user_input, + self.full_message_history, + self.memory, + cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + + # Print Assistant thoughts + print_assistant_thoughts(assistant_reply) + + # Get command name and arguments + try: + command_name, arguments = cmd.get_command( + attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)) + if cfg.speak_mode: + speak.say_text(f"I want to execute {command_name}") + except Exception as e: + logger.error("Error: \n", str(e)) + + if not cfg.continuous_mode and self.next_action_count == 0: + ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # Get key press: Prompt the user to press enter to continue or escape + # to exit + self.user_input = "" + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + print( + f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...", + flush=True) + while True: + console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) + if console_input.lower().rstrip() == "y": + self.user_input = "GENERATE NEXT COMMAND JSON" + break + elif console_input.lower().startswith("y -"): + try: + self.next_action_count = abs(int(console_input.split(" ")[1])) + self.user_input = "GENERATE NEXT COMMAND JSON" + except ValueError: + print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") + continue + break + elif console_input.lower() == "n": + self.user_input = "EXIT" + break + else: + self.user_input = console_input + command_name = "human_feedback" + break + + if self.user_input == "GENERATE NEXT COMMAND JSON": + logger.typewriter_log( + "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", + Fore.MAGENTA, + "") + elif self.user_input == "EXIT": + print("Exiting...", flush=True) + break + else: + # Print command + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + + # Execute command + if command_name is not None and command_name.lower().startswith("error"): + result = f"Command {command_name} threw the following error: " + arguments + elif command_name == "human_feedback": + result = f"Human feedback: {self.user_input}" + else: + result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" + if self.next_action_count > 0: + self.next_action_count -= 1 + + memory_to_add = f"Assistant Reply: {assistant_reply} " \ + f"\nResult: {result} " \ + f"\nHuman Feedback: {self.user_input} " + + self.memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + self.full_message_history.append(chat.create_chat_message("system", result)) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) + else: + self.full_message_history.append( + chat.create_chat_message( + "system", "Unable to execute command")) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + + +if __name__ == "__main__": + main() diff --git a/autogpt/agent.py b/autogpt/agent.py new file mode 100644 index 00000000..d28769a9 --- /dev/null +++ b/autogpt/agent.py @@ -0,0 +1,245 @@ +import autogpt.commands as cmd +import json +import traceback +from tkinter.ttk import Style + +from colorama import Fore + +import autogpt.chat +from autogpt.config import Config +from autogpt.logger import logger +import autogpt.speak +from autogpt.spinner import Spinner + + +class Agent: + """Agent class for interacting with Auto-GPT. + + Attributes: + ai_name: The name of the agent. + memory: The memory object to use. + full_message_history: The full message history. + next_action_count: The number of actions to execute. + prompt: The prompt to use. + user_input: The user input. + + """ + def __init__(self, + ai_name, + memory, + full_message_history, + next_action_count, + prompt, + user_input): + self.ai_name = ai_name + self.memory = memory + self.full_message_history = full_message_history + self.next_action_count = next_action_count + self.prompt = prompt + self.user_input = user_input + + def start_interaction_loop(self): + # Interaction Loop + cfg = Config() + loop_count = 0 + while True: + # Discontinue if continuous limit is reached + loop_count += 1 + if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit: + logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}") + break + + # Send message to AI, get response + with Spinner("Thinking... "): + assistant_reply = chat.chat_with_ai( + self.prompt, + self.user_input, + self.full_message_history, + self.memory, + cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + + # Print Assistant thoughts + print_assistant_thoughts(assistant_reply) + + # Get command name and arguments + try: + command_name, arguments = cmd.get_command( + attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)) + if cfg.speak_mode: + speak.say_text(f"I want to execute {command_name}") + except Exception as e: + logger.error("Error: \n", str(e)) + + if not cfg.continuous_mode and self.next_action_count == 0: + ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # Get key press: Prompt the user to press enter to continue or escape + # to exit + self.user_input = "" + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + print( + f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...", + flush=True) + while True: + console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) + if console_input.lower().rstrip() == "y": + self.user_input = "GENERATE NEXT COMMAND JSON" + break + elif console_input.lower().startswith("y -"): + try: + self.next_action_count = abs(int(console_input.split(" ")[1])) + self.user_input = "GENERATE NEXT COMMAND JSON" + except ValueError: + print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") + continue + break + elif console_input.lower() == "n": + self.user_input = "EXIT" + break + else: + self.user_input = console_input + command_name = "human_feedback" + break + + if self.user_input == "GENERATE NEXT COMMAND JSON": + logger.typewriter_log( + "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", + Fore.MAGENTA, + "") + elif self.user_input == "EXIT": + print("Exiting...", flush=True) + break + else: + # Print command + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + + # Execute command + if command_name is not None and command_name.lower().startswith("error"): + result = f"Command {command_name} threw the following error: " + arguments + elif command_name == "human_feedback": + result = f"Human feedback: {self.user_input}" + else: + result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" + if self.next_action_count > 0: + self.next_action_count -= 1 + + memory_to_add = f"Assistant Reply: {assistant_reply} " \ + f"\nResult: {result} " \ + f"\nHuman Feedback: {self.user_input} " + + self.memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + self.full_message_history.append(chat.create_chat_message("system", result)) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) + else: + self.full_message_history.append( + chat.create_chat_message( + "system", "Unable to execute command")) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + + +def attempt_to_fix_json_by_finding_outermost_brackets(json_string): + cfg = Config() + if cfg.speak_mode and cfg.debug_mode: + speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") + logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") + + try: + # Use regex to search for JSON objects + import regex + json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") + json_match = json_pattern.search(json_string) + + if json_match: + # Extract the valid JSON object from the string + json_string = json_match.group(0) + logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN) + if cfg.speak_mode and cfg.debug_mode: + speak.say_text("Apparently json was fixed.") + else: + raise ValueError("No valid JSON object found") + + except (json.JSONDecodeError, ValueError) as e: + if cfg.speak_mode: + speak.say_text("Didn't work. I will have to ignore this response then.") + logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") + json_string = {} + + return json_string + + +def print_assistant_thoughts(assistant_reply): + """Prints the assistant's thoughts to the console""" + global ai_name + global cfg + cfg = Config() + try: + try: + # Parse and print Assistant response + assistant_reply_json = fix_and_parse_json(assistant_reply) + except json.JSONDecodeError as e: + logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) + assistant_reply_json = fix_and_parse_json(assistant_reply_json) + + # Check if assistant_reply_json is a string and attempt to parse it into a JSON object + if isinstance(assistant_reply_json, str): + try: + assistant_reply_json = json.loads(assistant_reply_json) + except json.JSONDecodeError as e: + logger.error("Error: Invalid JSON\n", assistant_reply) + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json) + + assistant_thoughts_reasoning = None + assistant_thoughts_plan = None + assistant_thoughts_speak = None + assistant_thoughts_criticism = None + assistant_thoughts = assistant_reply_json.get("thoughts", {}) + assistant_thoughts_text = assistant_thoughts.get("text") + + if assistant_thoughts: + assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") + assistant_thoughts_plan = assistant_thoughts.get("plan") + assistant_thoughts_criticism = assistant_thoughts.get("criticism") + assistant_thoughts_speak = assistant_thoughts.get("speak") + + logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) + logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) + + if assistant_thoughts_plan: + logger.typewriter_log("PLAN:", Fore.YELLOW, "") + # If it's a list, join it into a string + if isinstance(assistant_thoughts_plan, list): + assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) + elif isinstance(assistant_thoughts_plan, dict): + assistant_thoughts_plan = str(assistant_thoughts_plan) + + # Split the input_string using the newline character and dashes + lines = assistant_thoughts_plan.split('\n') + for line in lines: + line = line.lstrip("- ") + logger.typewriter_log("- ", Fore.GREEN, line.strip()) + + logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) + # Speak the assistant's thoughts + if cfg.speak_mode and assistant_thoughts_speak: + speak.say_text(assistant_thoughts_speak) + + return assistant_reply_json + except json.decoder.JSONDecodeError as e: + logger.error("Error: Invalid JSON\n", assistant_reply) + if cfg.speak_mode: + speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.") + + # All other errors, return "Error: + error message" + except Exception as e: + call_stack = traceback.format_exc() + logger.error("Error: \n", call_stack) diff --git a/scripts/agent_manager.py b/autogpt/agent_manager.py similarity index 97% rename from scripts/agent_manager.py rename to autogpt/agent_manager.py index 191ab838..2722b4be 100644 --- a/scripts/agent_manager.py +++ b/autogpt/agent_manager.py @@ -1,4 +1,4 @@ -from llm_utils import create_chat_completion +from autogpt.llm_utils import create_chat_completion next_key = 0 agents = {} # key, (task, full_message_history, model) diff --git a/scripts/ai_config.py b/autogpt/ai_config.py similarity index 98% rename from scripts/ai_config.py rename to autogpt/ai_config.py index 89a4e07e..73011206 100644 --- a/scripts/ai_config.py +++ b/autogpt/ai_config.py @@ -1,6 +1,6 @@ import yaml import os -from prompt import get_prompt +from autogpt.prompt import get_prompt class AIConfig: diff --git a/scripts/ai_functions.py b/autogpt/ai_functions.py similarity index 96% rename from scripts/ai_functions.py rename to autogpt/ai_functions.py index f4ee79cd..b6e3df48 100644 --- a/scripts/ai_functions.py +++ b/autogpt/ai_functions.py @@ -1,7 +1,7 @@ from typing import List import json -from config import Config -from call_ai_function import call_ai_function +from autogpt.config import Config +from autogpt.call_ai_function import call_ai_function cfg = Config() diff --git a/scripts/browse.py b/autogpt/browse.py similarity index 97% rename from scripts/browse.py rename to autogpt/browse.py index ef22de03..32e74fea 100644 --- a/scripts/browse.py +++ b/autogpt/browse.py @@ -1,8 +1,8 @@ import requests from bs4 import BeautifulSoup -from memory import get_memory -from config import Config -from llm_utils import create_chat_completion +from autogpt.memory import get_memory +from autogpt.config import Config +from autogpt.llm_utils import create_chat_completion from urllib.parse import urlparse, urljoin cfg = Config() diff --git a/scripts/call_ai_function.py b/autogpt/call_ai_function.py similarity index 90% rename from scripts/call_ai_function.py rename to autogpt/call_ai_function.py index 940eacfe..5bcd76af 100644 --- a/scripts/call_ai_function.py +++ b/autogpt/call_ai_function.py @@ -1,8 +1,7 @@ -from config import Config - +from autogpt.config import Config cfg = Config() -from llm_utils import create_chat_completion +from autogpt.llm_utils import create_chat_completion # This is a magic function that can do anything with no-code. See diff --git a/scripts/chat.py b/autogpt/chat.py similarity index 97% rename from scripts/chat.py rename to autogpt/chat.py index 5392e438..6b901ffa 100644 --- a/scripts/chat.py +++ b/autogpt/chat.py @@ -1,10 +1,10 @@ import time import openai from dotenv import load_dotenv -from config import Config -import token_counter -from llm_utils import create_chat_completion -from logger import logger +from autogpt.config import Config +from autogpt import token_counter +from autogpt.llm_utils import create_chat_completion +from autogpt.logger import logger import logging cfg = Config() diff --git a/scripts/commands.py b/autogpt/commands.py similarity index 95% rename from scripts/commands.py rename to autogpt/commands.py index 43f5ae42..7bcdaa6c 100644 --- a/scripts/commands.py +++ b/autogpt/commands.py @@ -1,15 +1,15 @@ -import browse +from autogpt import browse import json -from memory import get_memory +from autogpt.memory import get_memory import datetime -import agent_manager as agents -import speak -from config import Config -import ai_functions as ai -from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files -from execute_code import execute_python_file, execute_shell -from json_parser import fix_and_parse_json -from image_gen import generate_image +import autogpt.agent_manager as agents +from autogpt import speak +from autogpt.config import Config +import autogpt.ai_functions as ai +from autogpt.file_operations import read_file, write_to_file, append_to_file, delete_file, search_files +from autogpt.execute_code import execute_python_file, execute_shell +from autogpt.json_parser import fix_and_parse_json +from autogpt.image_gen import generate_image from duckduckgo_search import ddg from googleapiclient.discovery import build from googleapiclient.errors import HttpError diff --git a/scripts/config.py b/autogpt/config.py similarity index 100% rename from scripts/config.py rename to autogpt/config.py diff --git a/scripts/data_ingestion.py b/autogpt/data_ingestion.py similarity index 95% rename from scripts/data_ingestion.py rename to autogpt/data_ingestion.py index 9addc34b..f8753240 100644 --- a/scripts/data_ingestion.py +++ b/autogpt/data_ingestion.py @@ -1,8 +1,8 @@ import argparse import logging -from config import Config -from memory import get_memory -from file_operations import ingest_file, search_files +from autogpt.config import Config +from autogpt.memory import get_memory +from autogpt.file_operations import ingest_file, search_files cfg = Config() diff --git a/scripts/execute_code.py b/autogpt/execute_code.py similarity index 100% rename from scripts/execute_code.py rename to autogpt/execute_code.py diff --git a/scripts/file_operations.py b/autogpt/file_operations.py similarity index 100% rename from scripts/file_operations.py rename to autogpt/file_operations.py diff --git a/scripts/image_gen.py b/autogpt/image_gen.py similarity index 97% rename from scripts/image_gen.py rename to autogpt/image_gen.py index 6c27df3f..cc5112e3 100644 --- a/scripts/image_gen.py +++ b/autogpt/image_gen.py @@ -2,7 +2,7 @@ import requests import io import os.path from PIL import Image -from config import Config +from autogpt.config import Config import uuid import openai from base64 import b64decode diff --git a/scripts/json_parser.py b/autogpt/json_parser.py similarity index 95% rename from scripts/json_parser.py rename to autogpt/json_parser.py index 29995629..36555d5f 100644 --- a/scripts/json_parser.py +++ b/autogpt/json_parser.py @@ -1,9 +1,9 @@ import json from typing import Any, Dict, Union -from call_ai_function import call_ai_function -from config import Config -from json_utils import correct_json -from logger import logger +from autogpt.call_ai_function import call_ai_function +from autogpt.config import Config +from autogpt.json_utils import correct_json +from autogpt.logger import logger cfg = Config() diff --git a/scripts/json_utils.py b/autogpt/json_utils.py similarity index 99% rename from scripts/json_utils.py rename to autogpt/json_utils.py index 80aab192..8493f094 100644 --- a/scripts/json_utils.py +++ b/autogpt/json_utils.py @@ -1,6 +1,6 @@ import re import json -from config import Config +from autogpt.config import Config cfg = Config() diff --git a/scripts/llm_utils.py b/autogpt/llm_utils.py similarity index 98% rename from scripts/llm_utils.py rename to autogpt/llm_utils.py index 731acae2..24f47cc6 100644 --- a/scripts/llm_utils.py +++ b/autogpt/llm_utils.py @@ -1,7 +1,7 @@ import time import openai from colorama import Fore -from config import Config +from autogpt.config import Config cfg = Config() diff --git a/scripts/logger.py b/autogpt/logger.py similarity index 98% rename from scripts/logger.py rename to autogpt/logger.py index 4c7e588f..096d0891 100644 --- a/scripts/logger.py +++ b/autogpt/logger.py @@ -8,9 +8,9 @@ from colorama import Fore from colorama import Style -import speak -from config import Config -from config import Singleton +from autogpt import speak +from autogpt.config import Config +from autogpt.config import Singleton cfg = Config() diff --git a/scripts/memory/__init__.py b/autogpt/memory/__init__.py similarity index 87% rename from scripts/memory/__init__.py rename to autogpt/memory/__init__.py index 9b53d8d2..5cd5767f 100644 --- a/scripts/memory/__init__.py +++ b/autogpt/memory/__init__.py @@ -1,19 +1,18 @@ -from memory.local import LocalCache -from memory.no_memory import NoMemory - +from autogpt.memory.local import LocalCache +from autogpt.memory.no_memory import NoMemory # List of supported memory backends # Add a backend to this list if the import attempt is successful supported_memory = ['local', 'no_memory'] try: - from memory.redismem import RedisMemory + from autogpt.memory.redismem import RedisMemory supported_memory.append('redis') except ImportError: print("Redis not installed. Skipping import.") RedisMemory = None try: - from memory.pinecone import PineconeMemory + from autogpt.memory.pinecone import PineconeMemory supported_memory.append('pinecone') except ImportError: print("Pinecone not installed. Skipping import.") diff --git a/scripts/memory/base.py b/autogpt/memory/base.py similarity index 93% rename from scripts/memory/base.py rename to autogpt/memory/base.py index 4dbf6791..6b1f083c 100644 --- a/scripts/memory/base.py +++ b/autogpt/memory/base.py @@ -1,6 +1,6 @@ """Base class for memory providers.""" import abc -from config import AbstractSingleton, Config +from autogpt.config import AbstractSingleton, Config import openai cfg = Config() diff --git a/scripts/memory/local.py b/autogpt/memory/local.py similarity index 97% rename from scripts/memory/local.py rename to autogpt/memory/local.py index b0afacf6..23f632df 100644 --- a/scripts/memory/local.py +++ b/autogpt/memory/local.py @@ -3,7 +3,7 @@ import orjson from typing import Any, List, Optional import numpy as np import os -from memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding EMBED_DIM = 1536 diff --git a/scripts/memory/no_memory.py b/autogpt/memory/no_memory.py similarity index 96% rename from scripts/memory/no_memory.py rename to autogpt/memory/no_memory.py index 830982f9..37f00085 100644 --- a/scripts/memory/no_memory.py +++ b/autogpt/memory/no_memory.py @@ -1,6 +1,6 @@ from typing import Optional, List, Any -from memory.base import MemoryProviderSingleton +from autogpt.memory.base import MemoryProviderSingleton class NoMemory(MemoryProviderSingleton): diff --git a/scripts/memory/pinecone.py b/autogpt/memory/pinecone.py similarity index 94% rename from scripts/memory/pinecone.py rename to autogpt/memory/pinecone.py index 20a905b3..877e0b3d 100644 --- a/scripts/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -1,8 +1,9 @@ +from autogpt.config import Config, Singleton import pinecone -from memory.base import MemoryProviderSingleton, get_ada_embedding -from logger import logger +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.logger import logger from colorama import Fore, Style diff --git a/scripts/memory/redismem.py b/autogpt/memory/redismem.py similarity index 97% rename from scripts/memory/redismem.py rename to autogpt/memory/redismem.py index 49045dd8..febfd3a8 100644 --- a/scripts/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -6,8 +6,8 @@ from redis.commands.search.query import Query from redis.commands.search.indexDefinition import IndexDefinition, IndexType import numpy as np -from memory.base import MemoryProviderSingleton, get_ada_embedding -from logger import logger +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.logger import logger from colorama import Fore, Style diff --git a/scripts/prompt.py b/autogpt/prompt.py similarity index 98% rename from scripts/prompt.py rename to autogpt/prompt.py index 188603a3..4ea70749 100644 --- a/scripts/prompt.py +++ b/autogpt/prompt.py @@ -1,4 +1,4 @@ -from promptgenerator import PromptGenerator +from autogpt.promptgenerator import PromptGenerator def get_prompt(): diff --git a/scripts/promptgenerator.py b/autogpt/promptgenerator.py similarity index 100% rename from scripts/promptgenerator.py rename to autogpt/promptgenerator.py diff --git a/scripts/speak.py b/autogpt/speak.py similarity index 99% rename from scripts/speak.py rename to autogpt/speak.py index 3afa591d..9fadaa0e 100644 --- a/scripts/speak.py +++ b/autogpt/speak.py @@ -1,7 +1,7 @@ import os from playsound import playsound import requests -from config import Config +from autogpt.config import Config cfg = Config() import gtts import threading diff --git a/scripts/spinner.py b/autogpt/spinner.py similarity index 100% rename from scripts/spinner.py rename to autogpt/spinner.py diff --git a/scripts/token_counter.py b/autogpt/token_counter.py similarity index 100% rename from scripts/token_counter.py rename to autogpt/token_counter.py diff --git a/scripts/utils.py b/autogpt/utils.py similarity index 100% rename from scripts/utils.py rename to autogpt/utils.py diff --git a/docker-compose.yml b/docker-compose.yml index af086f05..79f20bb5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,7 +8,7 @@ services: - redis build: ./ volumes: - - "./scripts:/app" + - "./autogpt:/app" - ".env:/app/.env" profiles: ["exclude-from-up"] diff --git a/main.py b/main.py index 656c34ec..160addc3 100644 --- a/main.py +++ b/main.py @@ -1 +1 @@ -from scripts.main import main +from autogpt import main diff --git a/scripts/main.py b/scripts/main.py index a12f9c7f..b451e977 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -1,465 +1,9 @@ -import json -import random -import commands as cmd -import utils -from memory import get_memory, get_supported_memory_backends -import chat -from colorama import Fore, Style -from spinner import Spinner -import time -import speak -from config import Config -from json_parser import fix_and_parse_json -from ai_config import AIConfig -import traceback -import yaml -import argparse -from logger import logger -import logging -from prompt import get_prompt +from colorama import init, Style -cfg = Config() +# Initialize colorama +init(autoreset=True) - -def check_openai_api_key(): - """Check if the OpenAI API key is set in config.py or as an environment variable.""" - if not cfg.openai_api_key: - print( - Fore.RED + - "Please set your OpenAI API key in .env or as an environment variable." - ) - print("You can get your key from https://beta.openai.com/account/api-keys") - exit(1) - - -def attempt_to_fix_json_by_finding_outermost_brackets(json_string): - if cfg.speak_mode and cfg.debug_mode: - speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") - logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") - - try: - # Use regex to search for JSON objects - import regex - json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") - json_match = json_pattern.search(json_string) - - if json_match: - # Extract the valid JSON object from the string - json_string = json_match.group(0) - logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN) - if cfg.speak_mode and cfg.debug_mode: - speak.say_text("Apparently json was fixed.") - else: - raise ValueError("No valid JSON object found") - - except (json.JSONDecodeError, ValueError) as e: - if cfg.speak_mode: - speak.say_text("Didn't work. I will have to ignore this response then.") - logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") - json_string = {} - - return json_string - - -def print_assistant_thoughts(assistant_reply): - """Prints the assistant's thoughts to the console""" - global ai_name - global cfg - try: - try: - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - except json.JSONDecodeError as e: - logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) - assistant_reply_json = fix_and_parse_json(assistant_reply_json) - - # Check if assistant_reply_json is a string and attempt to parse it into a JSON object - if isinstance(assistant_reply_json, str): - try: - assistant_reply_json = json.loads(assistant_reply_json) - except json.JSONDecodeError as e: - logger.error("Error: Invalid JSON\n", assistant_reply) - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json) - - assistant_thoughts_reasoning = None - assistant_thoughts_plan = None - assistant_thoughts_speak = None - assistant_thoughts_criticism = None - assistant_thoughts = assistant_reply_json.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") - - if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") - - logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) - logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) - - if assistant_thoughts_plan: - logger.typewriter_log("PLAN:", Fore.YELLOW, "") - # If it's a list, join it into a string - if isinstance(assistant_thoughts_plan, list): - assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) - elif isinstance(assistant_thoughts_plan, dict): - assistant_thoughts_plan = str(assistant_thoughts_plan) - - # Split the input_string using the newline character and dashes - lines = assistant_thoughts_plan.split('\n') - for line in lines: - line = line.lstrip("- ") - logger.typewriter_log("- ", Fore.GREEN, line.strip()) - - logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) - # Speak the assistant's thoughts - if cfg.speak_mode and assistant_thoughts_speak: - speak.say_text(assistant_thoughts_speak) - - return assistant_reply_json - except json.decoder.JSONDecodeError as e: - logger.error("Error: Invalid JSON\n", assistant_reply) - if cfg.speak_mode: - speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.") - - # All other errors, return "Error: + error message" - except Exception as e: - call_stack = traceback.format_exc() - logger.error("Error: \n", call_stack) - - -def construct_prompt(): - """Construct the prompt for the AI to respond to""" - config = AIConfig.load(cfg.ai_settings_file) - if cfg.skip_reprompt and config.ai_name: - logger.typewriter_log("Name :", Fore.GREEN, config.ai_name) - logger.typewriter_log("Role :", Fore.GREEN, config.ai_role) - logger.typewriter_log("Goals:", Fore.GREEN, config.ai_goals) - elif config.ai_name: - logger.typewriter_log( - f"Welcome back! ", - Fore.GREEN, - f"Would you like me to return to being {config.ai_name}?", - speak_text=True) - should_continue = utils.clean_input(f"""Continue with the last settings? -Name: {config.ai_name} -Role: {config.ai_role} -Goals: {config.ai_goals} -Continue (y/n): """) - if should_continue.lower() == "n": - config = AIConfig() - - if not config.ai_name: - config = prompt_user() - config.save() - - # Get rid of this global: - global ai_name - ai_name = config.ai_name - - full_prompt = config.construct_full_prompt() - return full_prompt - - -def prompt_user(): - """Prompt the user for input""" - ai_name = "" - # Construct the prompt - logger.typewriter_log( - "Welcome to Auto-GPT! ", - Fore.GREEN, - "Enter the name of your AI and its role below. Entering nothing will load defaults.", - speak_text=True) - - # Get AI Name from User - logger.typewriter_log( - "Name your AI: ", - Fore.GREEN, - "For example, 'Entrepreneur-GPT'") - ai_name = utils.clean_input("AI Name: ") - if ai_name == "": - ai_name = "Entrepreneur-GPT" - - logger.typewriter_log( - f"{ai_name} here!", - Fore.LIGHTBLUE_EX, - "I am at your service.", - speak_text=True) - - # Get AI Role from User - logger.typewriter_log( - "Describe your AI's role: ", - Fore.GREEN, - "For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'") - ai_role = utils.clean_input(f"{ai_name} is: ") - if ai_role == "": - ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth." - - # Enter up to 5 goals for the AI - logger.typewriter_log( - "Enter up to 5 goals for your AI: ", - Fore.GREEN, - "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'") - print("Enter nothing to load defaults, enter nothing when finished.", flush=True) - ai_goals = [] - for i in range(5): - ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ") - if ai_goal == "": - break - ai_goals.append(ai_goal) - if len(ai_goals) == 0: - ai_goals = ["Increase net worth", "Grow Twitter Account", - "Develop and manage multiple businesses autonomously"] - - config = AIConfig(ai_name, ai_role, ai_goals) - return config - - -def parse_arguments(): - """Parses the arguments passed to the script""" - global cfg - cfg.set_debug_mode(False) - cfg.set_continuous_mode(False) - cfg.set_speak_mode(False) - - parser = argparse.ArgumentParser(description='Process arguments.') - parser.add_argument('--continuous', '-c', action='store_true', help='Enable Continuous Mode') - parser.add_argument('--continuous-limit', '-l', type=int, dest="continuous_limit", help='Defines the number of times to run in continuous mode') - parser.add_argument('--speak', action='store_true', help='Enable Speak Mode') - parser.add_argument('--debug', action='store_true', help='Enable Debug Mode') - parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode') - parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode') - parser.add_argument('--use-memory', '-m', dest="memory_type", help='Defines which Memory backend to use') - parser.add_argument('--skip-reprompt', '-y', dest='skip_reprompt', action='store_true', help='Skips the re-prompting messages at the beginning of the script') - parser.add_argument('--ai-settings', '-C', dest='ai_settings_file', help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.") - args = parser.parse_args() - - if args.debug: - logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") - cfg.set_debug_mode(True) - - if args.continuous: - logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") - logger.typewriter_log( - "WARNING: ", - Fore.RED, - "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.") - cfg.set_continuous_mode(True) - - if args.continuous_limit: - logger.typewriter_log( - "Continuous Limit: ", - Fore.GREEN, - f"{args.continuous_limit}") - cfg.set_continuous_limit(args.continuous_limit) - - # Check if continuous limit is used without continuous mode - if args.continuous_limit and not args.continuous: - parser.error("--continuous-limit can only be used with --continuous") - - if args.speak: - logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") - cfg.set_speak_mode(True) - - if args.gpt3only: - logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") - cfg.set_smart_llm_model(cfg.fast_llm_model) - - if args.gpt4only: - logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") - cfg.set_fast_llm_model(cfg.smart_llm_model) - - if args.memory_type: - supported_memory = get_supported_memory_backends() - chosen = args.memory_type - if not chosen in supported_memory: - logger.typewriter_log("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}') - logger.typewriter_log(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend) - else: - cfg.memory_backend = chosen - - if args.skip_reprompt: - logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED") - cfg.skip_reprompt = True - - if args.ai_settings_file: - file = args.ai_settings_file - - # Validate file - (validated, message) = utils.validate_yaml_file(file) - if not validated: - logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message) - logger.double_check() - exit(1) - - logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file) - cfg.ai_settings_file = file - cfg.skip_reprompt = True - - -def main(): - global ai_name, memory - # TODO: fill in llm values here - check_openai_api_key() - parse_arguments() - logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) - ai_name = "" - prompt = construct_prompt() - # print(prompt) - # Initialize variables - full_message_history = [] - result = None - next_action_count = 0 - # Make a constant: - user_input = "Determine which next command to use, and respond using the format specified above:" - # Initialize memory and make sure it is empty. - # this is particularly important for indexing and referencing pinecone memory - memory = get_memory(cfg, init=True) - print('Using memory of type: ' + memory.__class__.__name__) - agent = Agent( - ai_name=ai_name, - memory=memory, - full_message_history=full_message_history, - next_action_count=next_action_count, - prompt=prompt, - user_input=user_input - ) - agent.start_interaction_loop() - - -class Agent: - """Agent class for interacting with Auto-GPT. - - Attributes: - ai_name: The name of the agent. - memory: The memory object to use. - full_message_history: The full message history. - next_action_count: The number of actions to execute. - prompt: The prompt to use. - user_input: The user input. - - """ - def __init__(self, - ai_name, - memory, - full_message_history, - next_action_count, - prompt, - user_input): - self.ai_name = ai_name - self.memory = memory - self.full_message_history = full_message_history - self.next_action_count = next_action_count - self.prompt = prompt - self.user_input = user_input - - def start_interaction_loop(self): - # Interaction Loop - loop_count = 0 - while True: - # Discontinue if continuous limit is reached - loop_count += 1 - if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit: - logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}") - break - - # Send message to AI, get response - with Spinner("Thinking... "): - assistant_reply = chat.chat_with_ai( - self.prompt, - self.user_input, - self.full_message_history, - self.memory, - cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument - - # Print Assistant thoughts - print_assistant_thoughts(assistant_reply) - - # Get command name and arguments - try: - command_name, arguments = cmd.get_command( - attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)) - if cfg.speak_mode: - speak.say_text(f"I want to execute {command_name}") - except Exception as e: - logger.error("Error: \n", str(e)) - - if not cfg.continuous_mode and self.next_action_count == 0: - ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### - # Get key press: Prompt the user to press enter to continue or escape - # to exit - self.user_input = "" - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") - print( - f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...", - flush=True) - while True: - console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) - if console_input.lower().rstrip() == "y": - self.user_input = "GENERATE NEXT COMMAND JSON" - break - elif console_input.lower().startswith("y -"): - try: - self.next_action_count = abs(int(console_input.split(" ")[1])) - self.user_input = "GENERATE NEXT COMMAND JSON" - except ValueError: - print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") - continue - break - elif console_input.lower() == "n": - self.user_input = "EXIT" - break - else: - self.user_input = console_input - command_name = "human_feedback" - break - - if self.user_input == "GENERATE NEXT COMMAND JSON": - logger.typewriter_log( - "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", - Fore.MAGENTA, - "") - elif self.user_input == "EXIT": - print("Exiting...", flush=True) - break - else: - # Print command - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") - - # Execute command - if command_name is not None and command_name.lower().startswith("error"): - result = f"Command {command_name} threw the following error: " + arguments - elif command_name == "human_feedback": - result = f"Human feedback: {self.user_input}" - else: - result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" - if self.next_action_count > 0: - self.next_action_count -= 1 - - memory_to_add = f"Assistant Reply: {assistant_reply} " \ - f"\nResult: {result} " \ - f"\nHuman Feedback: {self.user_input} " - - self.memory.add(memory_to_add) - - # Check if there's a result from the command append it to the message - # history - if result is not None: - self.full_message_history.append(chat.create_chat_message("system", result)) - logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) - else: - self.full_message_history.append( - chat.create_chat_message( - "system", "Unable to execute command")) - logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") - - -if __name__ == "__main__": - main() +# Use the bold ANSI style +print(f"""{Style.BRIGHT}Please run: +python -m autogpt +""") diff --git a/tests.py b/tests.py index 4dbfdd46..487e0038 100644 --- a/tests.py +++ b/tests.py @@ -1,8 +1,8 @@ import unittest if __name__ == "__main__": - # Load all tests from the 'scripts/tests' package - suite = unittest.defaultTestLoader.discover('scripts/tests') + # Load all tests from the 'autogpt/tests' package + suite = unittest.defaultTestLoader.discover('autogpt/tests') # Run the tests unittest.TextTestRunner().run(suite) diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py index d0c30962..ea96c4c2 100644 --- a/tests/integration/memory_tests.py +++ b/tests/integration/memory_tests.py @@ -3,16 +3,15 @@ import random import string import sys from pathlib import Path -# Add the parent directory of the 'scripts' folder to the Python path -sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts')) -from config import Config -from memory.local import LocalCache +from autogpt.config import Config +from autogpt.memory.local import LocalCache class TestLocalCache(unittest.TestCase): def random_string(self, length): - return ''.join(random.choice(string.ascii_letters) for _ in range(length)) + return ''.join( + random.choice(string.ascii_letters) for _ in range(length)) def setUp(self): cfg = cfg = Config() diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py index 0352624e..601b11d8 100644 --- a/tests/local_cache_test.py +++ b/tests/local_cache_test.py @@ -1,8 +1,7 @@ import os import sys -# Probably a better way: -sys.path.append(os.path.abspath('../scripts')) -from memory.local import LocalCache + +from autogpt.memory.local import LocalCache def MockConfig(): diff --git a/tests/promptgenerator_tests.py b/tests/promptgenerator_tests.py index 181fdea6..aac70b5e 100644 --- a/tests/promptgenerator_tests.py +++ b/tests/promptgenerator_tests.py @@ -3,9 +3,7 @@ import unittest import sys import os -# Add the path to the "scripts" directory to import the PromptGenerator module -sys.path.append(os.path.abspath("../scripts")) -from promptgenerator import PromptGenerator +from autogpt.promptgenerator import PromptGenerator # Create a test class for the PromptGenerator, subclassed from unittest.TestCase diff --git a/tests/test_config.py b/tests/test_config.py index ba8381e1..af5fb2a8 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,5 +1,5 @@ import unittest -from scripts.config import Config +from autogpt.config import Config class TestConfig(unittest.TestCase): diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index c403c73d..c9d5e14b 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -1,7 +1,7 @@ import unittest import tests.context -from scripts.json_parser import fix_and_parse_json +from autogpt.json_parser import fix_and_parse_json class TestParseJson(unittest.TestCase): diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index 4f326721..d2612345 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -1,9 +1,6 @@ import unittest -import os -import sys -# Probably a better way: -sys.path.append(os.path.abspath('../scripts')) -from json_parser import fix_and_parse_json + +from autogpt.json_parser import fix_and_parse_json class TestParseJson(unittest.TestCase): diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index 9385cde7..f98e8628 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -3,7 +3,7 @@ import requests -from scripts.browse import scrape_text +from autogpt.browse import scrape_text """ Code Analysis