diff --git a/.env.template b/.env.template index c64d8502..525cd61c 100644 --- a/.env.template +++ b/.env.template @@ -1,6 +1,14 @@ +PINECONE_API_KEY=your-pinecone-api-key +PINECONE_ENV=your-pinecone-region OPENAI_API_KEY=your-openai-api-key ELEVENLABS_API_KEY=your-elevenlabs-api-key SMART_LLM_MODEL="gpt-4" FAST_LLM_MODEL="gpt-3.5-turbo" GOOGLE_API_KEY= -CUSTOM_SEARCH_ENGINE_ID= \ No newline at end of file +CUSTOM_SEARCH_ENGINE_ID= +USE_AZURE=False +OPENAI_API_BASE=your-base-url-for-azure +OPENAI_API_VERSION=api-version-for-azure +OPENAI_DEPLOYMENT_ID=deployment-id-for-azure +IMAGE_PROVIDER=dalle +HUGGINGFACE_API_TOKEN= \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml new file mode 100644 index 00000000..cf49ab5f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -0,0 +1,39 @@ +name: Bug report 🐛 +description: Create a bug report for Auto-GPT. +labels: ['status: needs triage'] +body: + - type: markdown + attributes: + value: | + Please provide a searchable summary of the issue in the title above ⬆️. + + Thanks for contributing by creating an issue! ❤️ + - type: checkboxes + attributes: + label: Duplicates + description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Steps to reproduce 🕹 + description: | + **⚠️ Issues that we can't reproduce will be closed.** + - type: textarea + attributes: + label: Current behavior 😯 + description: Describe what happens instead of the expected behavior. + - type: textarea + attributes: + label: Expected behavior 🤔 + description: Describe what should happen. + - type: textarea + attributes: + label: Your prompt 📝 + description: | + Please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. + value: | + ```yaml + # Paste your prompt here + ``` \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/2.feature.yml b/.github/ISSUE_TEMPLATE/2.feature.yml new file mode 100644 index 00000000..0ea882ef --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2.feature.yml @@ -0,0 +1,29 @@ +name: Feature request 🚀 +description: Suggest a new idea for Auto-GPT. +labels: ['status: needs triage'] +body: + - type: markdown + attributes: + value: | + Please provide a searchable summary of the issue in the title above ⬆️. + + Thanks for contributing by creating an issue! ❤️ + - type: checkboxes + attributes: + label: Duplicates + description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Summary 💡 + description: Describe how it should work. + - type: textarea + attributes: + label: Examples 🌈 + description: Provide a link to other implementations, or screenshots of the expected behavior. + - type: textarea + attributes: + label: Motivation 🔦 + description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world. \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..cb8ce34a --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,18 @@ +### Background + + + +### Changes + + + +### Test Plan + + + +### Change Safety + +- [ ] I have added tests to cover my changes +- [ ] I have considered potential risks and mitigations for my changes + + diff --git a/.gitignore b/.gitignore index 6b8f00b5..7091a872 100644 --- a/.gitignore +++ b/.gitignore @@ -4,8 +4,8 @@ scripts/node_modules/ scripts/__pycache__/keys.cpython-310.pyc package-lock.json *.pyc -scripts/auto_gpt_workspace/* +auto_gpt_workspace/* *.mpeg .env -last_run_ai_settings.yaml -outputs/* \ No newline at end of file +outputs/* +ai_settings.yaml \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..001d5547 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,56 @@ + +To contribute to this GitHub project, you can follow these steps: + +1. Fork the repository you want to contribute to by clicking the "Fork" button on the project page. + +2. Clone the repository to your local machine using the following command: + +``` +git clone https://github.com/Torantulino/Auto-GPT +``` +3. Create a new branch for your changes using the following command: + +``` +git checkout -b "branch-name" +``` +4. Make your changes to the code or documentation. +- Example: Improve User Interface or Add Documentation. + + +5. Add the changes to the staging area using the following command: +``` +git add . +``` + +6. Commit the changes with a meaningful commit message using the following command: +``` +git commit -m "your commit message" +``` +7. Push the changes to your forked repository using the following command: +``` +git push origin branch-name +``` +8. Go to the GitHub website and navigate to your forked repository. + +9. Click the "New pull request" button. + +10. Select the branch you just pushed to and the branch you want to merge into on the original repository. + +11. Add a description of your changes and click the "Create pull request" button. + +12. Wait for the project maintainer to review your changes and provide feedback. + +13. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project. + +14. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands: + +``` +git fetch upstream +git checkout master +git merge upstream/master +``` +Finally, delete the branch you created with the following command: +``` +git branch -d branch-name +``` +That's it you made it 🐣⭐⭐ diff --git a/README.md b/README.md index 5848c585..ba80818d 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Auto-GPT is an experimental open-source application showcasing the capabilities https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4 -## 💖 Help Fund Auto-GPT's Development +
If you can spare a coffee, you can help to cover the API costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! A full day of development can easily cost as much as $20 in API costs, which for a free project is quite limiting. @@ -17,13 +17,13 @@ Your support is greatly appreciated
- Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. 💖 + Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. + +
-
- @@ -42,6 +42,7 @@ Your support is greatly appreciated - [Setting up environment variables](#setting-up-environment-variables) - [💀 Continuous Mode ⚠️](#-continuous-mode-️) - [GPT3.5 ONLY Mode](#gpt35-only-mode) + - [🖼 Image Generation](#image-generation) - [⚠️ Limitations](#️-limitations) - [🛡 Disclaimer](#-disclaimer) - [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter) @@ -56,8 +57,9 @@ Your support is greatly appreciated - 🗃️ File storage and summarization with GPT-3.5 ## 📋 Requirements -- [Python 3.7 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows) +- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows) - OpenAI API key +- PINECONE API key Optional: - ElevenLabs Key (If you want the AI to speak) @@ -91,6 +93,7 @@ pip install -r requirements.txt 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. + - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_API_BASE`, `OPENAI_API_VERSION` and `OPENAI_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section ## 🔧 Usage @@ -138,6 +141,70 @@ export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" ``` +## Redis Setup + +Install docker desktop. + +Run: +``` +docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest +``` +See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. + +Set the following environment variables: +``` +MEMORY_BACKEND=redis +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD= +``` + +Note that this is not intended to be run facing the internet and is not secure, do not expose redis to the internet without a password or at all really. + +You can optionally set + +``` +WIPE_REDIS_ON_START=False +``` + +To persist memory stored in Redis. + +You can specify the memory index for redis using the following: + +```` +MEMORY_INDEX=whatever +```` + +## 🌲 Pinecone API Key Setup + +Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories +are loaded for the agent at any given time. + +1. Go to app.pinecone.io and make an account if you don't already have one. +2. Choose the `Starter` plan to avoid being charged. +3. Find your API key and region under the default project in the left sidebar. + +### Setting up environment variables + For Windows Users: +``` +setx PINECONE_API_KEY "YOUR_PINECONE_API_KEY" +export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp + +``` +For macOS and Linux users: +``` +export PINECONE_API_KEY="YOUR_PINECONE_API_KEY" +export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp + +``` + +Or you can set them in the `.env` file. + +## View Memory Usage + +1. View memory usage by using the `--debug` flag :) + + ## 💀 Continuous Mode ⚠️ Run the AI **without** user authorisation, 100% automated. Continuous mode is not recommended. @@ -156,6 +223,15 @@ If you don't have access to the GPT4 api, this mode will allow you to use Auto-G python scripts/main.py --gpt3only ``` +## 🖼 Image Generation +By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [HuggingFace API Token](https://huggingface.co/settings/tokens) is required. + +Once you have a token, set these variables in your `.env`: +``` +IMAGE_PROVIDER=sd +HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" +``` + ## ⚠️ Limitations This experiment aims to showcase the potential of GPT-4 but comes with some limitations: diff --git a/ai_settings.yaml b/ai_settings.yaml new file mode 100644 index 00000000..b37ba849 --- /dev/null +++ b/ai_settings.yaml @@ -0,0 +1,7 @@ +ai_goals: +- Increase net worth. +- Develop and manage multiple businesses autonomously. +- Play to your strengths as a Large Language Model. +ai_name: Entrepreneur-GPT +ai_role: an AI designed to autonomously develop and run businesses with the sole goal + of increasing your net worth. diff --git a/requirements.txt b/requirements.txt index 158e9324..6a9ba643 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ beautifulsoup4 colorama==0.4.6 openai==0.27.2 -playsound==1.3.0 +playsound==1.2.2 python-dotenv==1.0.0 pyyaml==6.0 readability-lxml==0.8.1 @@ -10,4 +10,8 @@ tiktoken==0.3.3 gTTS==2.3.1 docker duckduckgo-search -google-api-python-client #(https://developers.google.com/custom-search/v1/overview) +google-api-python-client #(https://developers.google.com/custom-search/v1/overview) +pinecone-client==2.2.1 +redis +orjson +Pillow diff --git a/scripts/ai_config.py b/scripts/ai_config.py index de214463..14b45887 100644 --- a/scripts/ai_config.py +++ b/scripts/ai_config.py @@ -1,6 +1,6 @@ import yaml import data - +import os class AIConfig: """Class to store the AI's name, role, and goals.""" @@ -11,7 +11,7 @@ class AIConfig: self.ai_goals = ai_goals # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = "last_run_ai_settings.yaml" + SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml') @classmethod def load(cls, config_file=SAVE_FILE): diff --git a/scripts/browse.py b/scripts/browse.py index 89e41af4..214ccadb 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -7,7 +7,7 @@ cfg = Config() def scrape_text(url): """Scrape text from a webpage""" - response = requests.get(url) + response = requests.get(url, headers=cfg.user_agent_header) # Check if the response contains an HTTP error if response.status_code >= 400: @@ -43,8 +43,8 @@ def format_hyperlinks(hyperlinks): def scrape_links(url): - """Scrape hyperlinks from a webpage""" - response = requests.get(url) + """Scrape links from a webpage""" + response = requests.get(url, headers=cfg.user_agent_header) # Check if the response contains an HTTP error if response.status_code >= 400: diff --git a/scripts/chat.py b/scripts/chat.py index 89a03509..f4cf2299 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -21,6 +21,22 @@ def create_chat_message(role, content): return {"role": role, "content": content} +def generate_context(prompt, relevant_memory, full_message_history, model): + current_context = [ + create_chat_message( + "system", prompt), + create_chat_message( + "system", f"The current time and date is {time.strftime('%c')}"), + create_chat_message( + "system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")] + + # Add messages from the full message history until we reach the token limit + next_message_to_add_index = len(full_message_history) - 1 + insertion_index = len(current_context) + # Count the currently used tokens + current_tokens_used = token_counter.count_message_tokens(current_context, model) + return next_message_to_add_index, current_tokens_used, insertion_index, current_context + # TODO: Change debug from hardcode to argument def chat_with_ai( @@ -40,7 +56,7 @@ def chat_with_ai( prompt (str): The prompt explaining the rules to the AI. user_input (str): The input from the user. full_message_history (list): The list of all messages sent between the user and the AI. - permanent_memory (list): The list of items in the AI's permanent memory. + permanent_memory (Obj): The memory object containing the permanent memory. token_limit (int): The maximum number of tokens allowed in the API call. Returns: @@ -52,18 +68,20 @@ def chat_with_ai( print(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 - current_context = [ - create_chat_message( - "system", prompt), create_chat_message( - "system", f"Permanent memory: {permanent_memory}")] + relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) - # Add messages from the full message history until we reach the token limit - next_message_to_add_index = len(full_message_history) - 1 - current_tokens_used = 0 - insertion_index = len(current_context) + if debug: + print('Memory Stats: ', permanent_memory.get_stats()) + + next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( + prompt, relevant_memory, full_message_history, model) + + while current_tokens_used > 2500: + # remove memories until we are under 2500 tokens + relevant_memory = relevant_memory[1:] + next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( + prompt, relevant_memory, full_message_history, model) - # Count the currently used tokens - current_tokens_used = token_counter.count_message_tokens(current_context, model) current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later) while next_message_to_add_index >= 0: @@ -79,7 +97,7 @@ def chat_with_ai( # Count the currently used tokens current_tokens_used += tokens_to_add - + # Move to the next most recent message in the full message history next_message_to_add_index -= 1 diff --git a/scripts/commands.py b/scripts/commands.py index 38912b48..6d264d54 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -1,14 +1,15 @@ import browse import json -import memory as mem +from memory import get_memory import datetime import agent_manager as agents import speak from config import Config import ai_functions as ai -from file_operations import read_file, write_to_file, append_to_file, delete_file +from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files from execute_code import execute_python_file from json_parser import fix_and_parse_json +from image_gen import generate_image from duckduckgo_search import ddg from googleapiclient.discovery import build from googleapiclient.errors import HttpError @@ -16,6 +17,13 @@ from googleapiclient.errors import HttpError cfg = Config() +def is_valid_int(value): + try: + int(value) + return True + except ValueError: + return False + def get_command(response): """Parse the response and return the command name and arguments""" try: @@ -46,10 +54,12 @@ def get_command(response): def execute_command(command_name, arguments): - """Execute the command and return the response""" + """Execute the command and return the result""" + memory = get_memory(cfg) + try: if command_name == "google": - + # Check if the Google API key is set and use the official search method # If the API key is not set or has only whitespaces, use the unofficial search method if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None): @@ -57,11 +67,7 @@ def execute_command(command_name, arguments): else: return google_search(arguments["input"]) elif command_name == "memory_add": - return commit_memory(arguments["string"]) - elif command_name == "memory_del": - return delete_memory(arguments["key"]) - elif command_name == "memory_ovr": - return overwrite_memory(arguments["key"], arguments["string"]) + return memory.add(arguments["string"]) elif command_name == "start_agent": return start_agent( arguments["name"], @@ -85,6 +91,8 @@ def execute_command(command_name, arguments): return append_to_file(arguments["file"], arguments["text"]) elif command_name == "delete_file": return delete_file(arguments["file"]) + elif command_name == "search_files": + return search_files(arguments["directory"]) elif command_name == "browse_website": return browse_website(arguments["url"], arguments["question"]) # TODO: Change these to take in a file rather than pasted code, if @@ -98,10 +106,12 @@ def execute_command(command_name, arguments): return ai.write_tests(arguments["code"], arguments.get("focus")) elif command_name == "execute_python_file": # Add this command return execute_python_file(arguments["file"]) + elif command_name == "generate_image": + return generate_image(arguments["prompt"]) elif command_name == "task_complete": shutdown() else: - return f"Unknown command {command_name}" + return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format." # All errors, return "Error: + error message" except Exception as e: return "Error: " + str(e) @@ -205,14 +215,28 @@ def delete_memory(key): def overwrite_memory(key, string): """Overwrite a memory with a given key and string""" - if int(key) >= 0 and key < len(mem.permanent_memory): - _text = "Overwriting memory with key " + \ - str(key) + " and string " + string + # Check if the key is a valid integer + if is_valid_int(key): + key_int = int(key) + # Check if the integer key is within the range of the permanent_memory list + if 0 <= key_int < len(mem.permanent_memory): + _text = "Overwriting memory with key " + str(key) + " and string " + string + # Overwrite the memory slot with the given integer key and string + mem.permanent_memory[key_int] = string + print(_text) + return _text + else: + print(f"Invalid key '{key}', out of range.") + return None + # Check if the key is a valid string + elif isinstance(key, str): + _text = "Overwriting memory with key " + key + " and string " + string + # Overwrite the memory slot with the given string key and string mem.permanent_memory[key] = string print(_text) return _text else: - print("Invalid key, cannot overwrite memory.") + print(f"Invalid key '{key}', must be an integer or a string.") return None @@ -249,13 +273,20 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model): def message_agent(key, message): """Message an agent with a given key and message""" global cfg - agent_response = agents.message_agent(key, message) + + # Check if the key is a valid integer + if is_valid_int(key): + agent_response = agents.message_agent(int(key), message) + # Check if the key is a valid string + elif isinstance(key, str): + agent_response = agents.message_agent(key, message) + else: + return "Invalid key, must be an integer or a string." # Speak response if cfg.speak_mode: speak.say_text(agent_response, 1) - - return f"Agent {key} responded: {agent_response}" + return agent_response def list_agents(): diff --git a/scripts/config.py b/scripts/config.py index cf8fe7e7..03f1d5df 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -1,10 +1,12 @@ +import abc import os import openai from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() -class Singleton(type): + +class Singleton(abc.ABCMeta, type): """ Singleton metaclass for ensuring only one instance of a class. """ @@ -20,13 +22,18 @@ class Singleton(type): return cls._instances[cls] +class AbstractSingleton(abc.ABC, metaclass=Singleton): + pass + + class Config(metaclass=Singleton): """ Configuration class to store the state of bools for different scripts access. """ def __init__(self): - """Initialize the configuration class.""" + """Initialize the Config class""" + self.debug = False self.continuous_mode = False self.speak_mode = False # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good @@ -36,11 +43,38 @@ class Config(metaclass=Singleton): self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.openai_api_key = os.getenv("OPENAI_API_KEY") + self.use_azure = False + self.use_azure = os.getenv("USE_AZURE") == 'True' + if self.use_azure: + self.openai_api_base = os.getenv("OPENAI_API_BASE") + self.openai_api_version = os.getenv("OPENAI_API_VERSION") + self.openai_deployment_id = os.getenv("OPENAI_DEPLOYMENT_ID") + openai.api_type = "azure" + openai.api_base = self.openai_api_base + openai.api_version = self.openai_api_version + self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") self.google_api_key = os.getenv("GOOGLE_API_KEY") self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") + self.pinecone_api_key = os.getenv("PINECONE_API_KEY") + self.pinecone_region = os.getenv("PINECONE_ENV") + + self.image_provider = os.getenv("IMAGE_PROVIDER") + self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") + + # User agent headers to use when browsing web + # Some websites might just completely deny request with an error code if no user agent was found. + self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} + self.redis_host = os.getenv("REDIS_HOST", "localhost") + self.redis_port = os.getenv("REDIS_PORT", "6379") + self.redis_password = os.getenv("REDIS_PASSWORD", "") + self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True' + self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt') + # Note that indexes must be created on db 0 in redis, this is not configureable. + + self.memory_backend = os.getenv("MEMORY_BACKEND", 'local') # Initialize the OpenAI API client openai.api_key = self.openai_api_key @@ -81,5 +115,17 @@ class Config(metaclass=Singleton): self.google_api_key = value def set_custom_search_engine_id(self, value: str): - """Set the custom search engine ID value.""" - self.custom_search_engine_id = value \ No newline at end of file + """Set the custom search engine id value.""" + self.custom_search_engine_id = value + + def set_pinecone_api_key(self, value: str): + """Set the Pinecone API key value.""" + self.pinecone_api_key = value + + def set_pinecone_region(self, value: str): + """Set the Pinecone region value.""" + self.pinecone_region = value + + def set_debug_mode(self, value: bool): + """Set the debug mode value.""" + self.debug = value diff --git a/scripts/data.py b/scripts/data.py index e0840dd9..cd41f313 100644 --- a/scripts/data.py +++ b/scripts/data.py @@ -1,16 +1,15 @@ import os from pathlib import Path -SRC_DIR = Path(__file__).parent def load_prompt(): """Load the prompt from data/prompt.txt""" try: # get directory of this file: - file_dir = Path(os.path.dirname(os.path.realpath(__file__))) - data_dir = file_dir / "data" - prompt_file = data_dir / "prompt.txt" - # Load the promt from data/prompt.txt - with open(SRC_DIR/ "data/prompt.txt", "r") as prompt_file: + file_dir = Path(__file__).parent + prompt_file_path = file_dir / "data" / "prompt.txt" + + # Load the prompt from data/prompt.txt + with open(prompt_file_path, "r") as prompt_file: prompt = prompt_file.read() return prompt diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index a93e783e..77a449de 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -1,17 +1,15 @@ CONSTRAINTS: -1. ~4000 word limit for memory. Your memory is short, so immediately save important information to long term memory and code to files. -2. No user assistance -3. Exclusively use the commands listed in double quotes e.g. "command name" +1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files. +2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. +3. No user assistance +4. Exclusively use the commands listed in double quotes e.g. "command name" COMMANDS: 1. Google Search: "google", args: "input": "