diff --git a/.env.template b/.env.template index c4a5ec72..479730e8 100644 --- a/.env.template +++ b/.env.template @@ -1,6 +1,7 @@ PINECONE_API_KEY=your-pinecone-api-key PINECONE_ENV=your-pinecone-region OPENAI_API_KEY=your-openai-api-key +TEMPERATURE=1 ELEVENLABS_API_KEY=your-elevenlabs-api-key ELEVENLABS_VOICE_1_ID=your-voice-id ELEVENLABS_VOICE_2_ID=your-voice-id @@ -9,6 +10,7 @@ FAST_LLM_MODEL=gpt-3.5-turbo GOOGLE_API_KEY= CUSTOM_SEARCH_ENGINE_ID= USE_AZURE=False +EXECUTE_LOCAL_COMMANDS=False IMAGE_PROVIDER=dalle HUGGINGFACE_API_TOKEN= USE_MAC_OS_TTS=False diff --git a/scripts/commands.py b/scripts/commands.py index 92d46ae1..3966e86a 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -7,7 +7,7 @@ import speak from config import Config import ai_functions as ai from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files -from execute_code import execute_python_file +from execute_code import execute_python_file, execute_shell from json_parser import fix_and_parse_json from image_gen import generate_image from duckduckgo_search import ddg @@ -103,6 +103,11 @@ def execute_command(command_name, arguments): return ai.write_tests(arguments["code"], arguments.get("focus")) elif command_name == "execute_python_file": # Add this command return execute_python_file(arguments["file"]) + elif command_name == "execute_shell": + if cfg.execute_local_commands: + return execute_shell(arguments["command_line"]) + else: + return "You are not allowed to run local shell commands. To execute shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' in your config. Do not attempt to bypass the restriction." elif command_name == "generate_image": return generate_image(arguments["prompt"]) elif command_name == "do_nothing": diff --git a/scripts/config.py b/scripts/config.py index 77b06676..e4fa29ac 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -44,8 +44,11 @@ class Config(metaclass=Singleton): self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.openai_api_key = os.getenv("OPENAI_API_KEY") + self.temperature = int(os.getenv("TEMPERATURE", "1")) self.use_azure = False self.use_azure = os.getenv("USE_AZURE") == 'True' + self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True' + if self.use_azure: self.load_azure_config() openai.api_type = "azure" diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index fc68f3ae..ffb9eb50 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -22,9 +22,10 @@ COMMANDS: 16. Get Improved Code: "improve_code", args: "suggestions": "", "code": "" 17. Write Tests: "write_tests", args: "code": "", "focus": "" 18. Execute Python File: "execute_python_file", args: "file": "" -19. Task Complete (Shutdown): "task_complete", args: "reason": "" -20. Generate Image: "generate_image", args: "prompt": "" -21. Do Nothing: "do_nothing", args: "" +19. Execute Shell Command, non-interactive commands only: "execute_shell", args: "command_line": "". +20. Task Complete (Shutdown): "task_complete", args: "reason": "" +21. Generate Image: "generate_image", args: "prompt": "" +22. Do Nothing: "do_nothing", args: "" RESOURCES: diff --git a/scripts/execute_code.py b/scripts/execute_code.py index a8f90911..2c92903c 100644 --- a/scripts/execute_code.py +++ b/scripts/execute_code.py @@ -1,17 +1,20 @@ import docker import os +import subprocess + + +WORKSPACE_FOLDER = "auto_gpt_workspace" def execute_python_file(file): """Execute a Python file in a Docker container and return the output""" - workspace_folder = "auto_gpt_workspace" - print (f"Executing file '{file}' in workspace '{workspace_folder}'") + print (f"Executing file '{file}' in workspace '{WORKSPACE_FOLDER}'") if not file.endswith(".py"): return "Error: Invalid file type. Only .py files are allowed." - file_path = os.path.join(workspace_folder, file) + file_path = os.path.join(WORKSPACE_FOLDER, file) if not os.path.isfile(file_path): return f"Error: File '{file}' does not exist." @@ -19,14 +22,31 @@ def execute_python_file(file): try: client = docker.from_env() + image_name = 'python:3.10' + try: + client.images.get(image_name) + print(f"Image '{image_name}' found locally") + except docker.errors.ImageNotFound: + print(f"Image '{image_name}' not found locally, pulling from Docker Hub") + # Use the low-level API to stream the pull response + low_level_client = docker.APIClient() + for line in low_level_client.pull(image_name, stream=True, decode=True): + # Print the status and progress, if available + status = line.get('status') + progress = line.get('progress') + if status and progress: + print(f"{status}: {progress}") + elif status: + print(status) + # You can replace 'python:3.8' with the desired Python image/version # You can find available Python images on Docker Hub: # https://hub.docker.com/_/python container = client.containers.run( - 'python:3.10', + image_name, f'python {file}', volumes={ - os.path.abspath(workspace_folder): { + os.path.abspath(WORKSPACE_FOLDER): { 'bind': '/workspace', 'mode': 'ro'}}, working_dir='/workspace', @@ -46,3 +66,22 @@ def execute_python_file(file): except Exception as e: return f"Error: {str(e)}" + +def execute_shell(command_line): + + current_dir = os.getcwd() + + if not WORKSPACE_FOLDER in current_dir: # Change dir into workspace if necessary + work_dir = os.path.join(os.getcwd(), WORKSPACE_FOLDER) + os.chdir(work_dir) + + print (f"Executing command '{command_line}' in working directory '{os.getcwd()}'") + + result = subprocess.run(command_line, capture_output=True, shell=True) + output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + + # Change back to whatever the prior working dir was + + os.chdir(current_dir) + + return output diff --git a/scripts/llm_utils.py b/scripts/llm_utils.py index c49aeb09..35cc5ce0 100644 --- a/scripts/llm_utils.py +++ b/scripts/llm_utils.py @@ -5,7 +5,7 @@ cfg = Config() openai.api_key = cfg.openai_api_key # Overly simple abstraction until we create something better -def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str: +def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str: """Create a chat completion using the OpenAI API""" if cfg.use_azure: response = openai.ChatCompletion.create( diff --git a/scripts/main.py b/scripts/main.py index 7a4a32d4..81f560b2 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -318,7 +318,6 @@ def parse_arguments(): # TODO: fill in llm values here check_openai_api_key() -cfg = Config() parse_arguments() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = ""