diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index cf49ab5f..e2404c76 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -7,7 +7,19 @@ body: value: | Please provide a searchable summary of the issue in the title above ⬆️. - Thanks for contributing by creating an issue! ❤️ + ⚠️ SUPER-busy repo, please help the volunteer maintainers. + The less time we spend here, the more time we spend building AutoGPT. + + Please help us help you: + - Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)? + - Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)? + - Search for existing issues, "add comment" is tidier than "new issue" + - Ask on our Discord (https://discord.gg/autogpt) + - Provide relevant info: + - Provide commit-hash (`git rev-parse HEAD` gets it) + - If it's a pip/packages issue, provide pip version, python version + - If it's a crash, provide traceback. + - type: checkboxes attributes: label: Duplicates @@ -32,8 +44,8 @@ body: attributes: label: Your prompt 📝 description: | - Please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. + If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. value: | ```yaml # Paste your prompt here - ``` \ No newline at end of file + ``` diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/ci.yml similarity index 84% rename from .github/workflows/unit_tests.yml rename to .github/workflows/ci.yml index 5973dd02..070df794 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Unit Tests +name: Python CI on: push: @@ -30,6 +30,10 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt + - name: Lint with flake8 + continue-on-error: false + run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 + - name: Run unittest tests with coverage run: | coverage run --source=scripts -m unittest discover tests diff --git a/.gitignore b/.gitignore index aa0dceaa..cfa3b08b 100644 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,4 @@ log.txt # Coverage reports .coverage coverage.xml -htmlcov/ \ No newline at end of file +htmlcov/ diff --git a/Dockerfile b/Dockerfile index 146a3747..4d264c88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,7 @@ -FROM python:3.11 - +FROM python:3.11-slim +ENV PIP_NO_CACHE_DIR=yes WORKDIR /app -COPY scripts/ /app -COPY requirements.txt /app - +COPY requirements.txt . RUN pip install -r requirements.txt - -CMD ["python", "main.py"] +COPY scripts/ . +ENTRYPOINT ["python", "main.py"] diff --git a/README.md b/README.md index 2900daa9..c9ef9d5c 100644 --- a/README.md +++ b/README.md @@ -96,9 +96,10 @@ pip install -r requirements.txt ``` 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. - - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively + +- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. +- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. +- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively ## 🔧 Usage @@ -113,9 +114,11 @@ python scripts/main.py 3. To exit the program, type "exit" and press Enter. ### Logs -You will find activity and error logs in the folder ```./logs``` + +You will find activity and error logs in the folder `./logs` To output debug logs: + ``` python scripts/main.py --debug ``` @@ -331,3 +334,14 @@ To run tests and see coverage, run the following command: ``` coverage run -m unittest discover tests ``` + +## Run linter + +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: + +``` +flake8 scripts/ tests/ + +# Or, if you want to run flake8 with the same configuration as the CI: +flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 +``` \ No newline at end of file diff --git a/main.py b/main.py index 5f044237..656c34ec 100644 --- a/main.py +++ b/main.py @@ -1 +1 @@ -from scripts.main import main \ No newline at end of file +from scripts.main import main diff --git a/requirements.txt b/requirements.txt index b196c3d7..b864c1d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,4 +15,5 @@ pinecone-client==2.2.1 redis orjson Pillow -coverage \ No newline at end of file +coverage +flake8 \ No newline at end of file diff --git a/scripts/ai_functions.py b/scripts/ai_functions.py index 8ad77441..782bb558 100644 --- a/scripts/ai_functions.py +++ b/scripts/ai_functions.py @@ -45,8 +45,6 @@ def improve_code(suggestions: List[str], code: str) -> str: result_string = call_ai_function(function_string, args, description_string) return result_string - - def write_tests(code: str, focus: List[str]) -> str: """ A function that takes in code and focus topics and returns a response from create chat completion api call. diff --git a/scripts/browse.py b/scripts/browse.py index c3fc0662..b936c5b1 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -41,7 +41,7 @@ def scrape_text(url): # Restrict access to local files if check_local_file_access(url): return "Error: Access to local files is restricted" - + # Validate the input URL if not is_valid_url(url): # Sanitize the input URL diff --git a/scripts/config.py b/scripts/config.py index 09f5276e..4f60aa72 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -62,7 +62,7 @@ class Config(metaclass=Singleton): self.use_mac_os_tts = False self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") - + self.google_api_key = os.getenv("GOOGLE_API_KEY") self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") diff --git a/scripts/file_operations.py b/scripts/file_operations.py index c6066ef9..7b48c134 100644 --- a/scripts/file_operations.py +++ b/scripts/file_operations.py @@ -80,4 +80,4 @@ def search_files(directory): relative_path = os.path.relpath(os.path.join(root, file), working_directory) found_files.append(relative_path) - return found_files \ No newline at end of file + return found_files diff --git a/scripts/logger.py b/scripts/logger.py index a609e602..5c7d68bb 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -159,6 +159,7 @@ class ConsoleHandler(logging.StreamHandler): except Exception: self.handleError(record) + ''' Allows to handle custom placeholders 'title_color' and 'message_no_color'. To use this formatter, make sure to pass 'color', 'title' as log extras. diff --git a/scripts/main.py b/scripts/main.py index 90033afb..e0ddc9fb 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -391,7 +391,7 @@ while True: flush=True) while True: console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) - if console_input.lower() == "y": + if console_input.lower().rstrip() == "y": user_input = "GENERATE NEXT COMMAND JSON" break elif console_input.lower().startswith("y -"): diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index 2900353e..a07f9fd8 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -44,6 +44,7 @@ def get_memory(cfg, init=False): def get_supported_memory_backends(): return supported_memory + __all__ = [ "get_memory", "LocalCache", diff --git a/scripts/memory/local.py b/scripts/memory/local.py index 372b59c4..b0afacf6 100644 --- a/scripts/memory/local.py +++ b/scripts/memory/local.py @@ -28,10 +28,20 @@ class LocalCache(MemoryProviderSingleton): def __init__(self, cfg) -> None: self.filename = f"{cfg.memory_index}.json" if os.path.exists(self.filename): - with open(self.filename, 'rb') as f: - loaded = orjson.loads(f.read()) - self.data = CacheContent(**loaded) + try: + with open(self.filename, 'w+b') as f: + file_content = f.read() + if not file_content.strip(): + file_content = b'{}' + f.write(file_content) + + loaded = orjson.loads(file_content) + self.data = CacheContent(**loaded) + except orjson.JSONDecodeError: + print(f"Error: The file '{self.filename}' is not in JSON format.") + self.data = CacheContent() else: + print(f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file.") self.data = CacheContent() def add(self, text: str): diff --git a/scripts/speak.py b/scripts/speak.py index 2cc6a558..64054e3c 100644 --- a/scripts/speak.py +++ b/scripts/speak.py @@ -61,7 +61,7 @@ def gtts_speech(text): def macos_tts_speech(text, voice_index=0): if voice_index == 0: os.system(f'say "{text}"') - else: + else: if voice_index == 1: os.system(f'say -v "Ava (Premium)" "{text}"') else: @@ -79,7 +79,7 @@ def say_text(text, voice_index=0): success = eleven_labs_speech(text, voice_index) if not success: gtts_speech(text) - + queue_semaphore.release() queue_semaphore.acquire(True) diff --git a/tests/context.py b/tests/context.py index 2adb9dd6..b668c8dc 100644 --- a/tests/context.py +++ b/tests/context.py @@ -2,4 +2,4 @@ import sys import os sys.path.insert(0, os.path.abspath( - os.path.join(os.path.dirname(__file__), '../scripts'))) \ No newline at end of file + os.path.join(os.path.dirname(__file__), '../scripts'))) diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py index ed444d91..5f1611be 100644 --- a/tests/integration/memory_tests.py +++ b/tests/integration/memory_tests.py @@ -45,5 +45,6 @@ class TestLocalCache(unittest.TestCase): self.assertEqual(len(relevant_texts), k) self.assertIn(self.example_texts[1], relevant_texts) + if __name__ == '__main__': unittest.main() diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py new file mode 100644 index 00000000..d1f1ef08 --- /dev/null +++ b/tests/local_cache_test.py @@ -0,0 +1,52 @@ +import os +import sys +# Probably a better way: +sys.path.append(os.path.abspath('../scripts')) +from memory.local import LocalCache + +def MockConfig(): + return type('MockConfig', (object,), { + 'debug_mode': False, + 'continuous_mode': False, + 'speak_mode': False, + 'memory_index': 'auto-gpt', + }) + +class TestLocalCache(unittest.TestCase): + + def setUp(self): + self.cfg = MockConfig() + self.cache = LocalCache(self.cfg) + + def test_add(self): + text = "Sample text" + self.cache.add(text) + self.assertIn(text, self.cache.data.texts) + + def test_clear(self): + self.cache.clear() + self.assertEqual(self.cache.data, [""]) + + def test_get(self): + text = "Sample text" + self.cache.add(text) + result = self.cache.get(text) + self.assertEqual(result, [text]) + + def test_get_relevant(self): + text1 = "Sample text 1" + text2 = "Sample text 2" + self.cache.add(text1) + self.cache.add(text2) + result = self.cache.get_relevant(text1, 1) + self.assertEqual(result, [text1]) + + def test_get_stats(self): + text = "Sample text" + self.cache.add(text) + stats = self.cache.get_stats() + self.assertEqual(stats, (1, self.cache.data.embeddings.shape)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_browse_scrape_text.py b/tests/test_browse_scrape_text.py index 5ecd7407..775eefcd 100644 --- a/tests/test_browse_scrape_text.py +++ b/tests/test_browse_scrape_text.py @@ -33,8 +33,6 @@ Additional aspects: - The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. """ - - class TestScrapeText: # Tests that scrape_text() returns the expected text when given a valid URL. diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index 4561659e..b8cb2680 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -66,8 +66,6 @@ class TestParseJson(unittest.TestCase): # Assert that this raises an exception: self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - - def test_invalid_json_leading_sentence_with_gpt(self): # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. @@ -108,6 +106,5 @@ class TestParseJson(unittest.TestCase): self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - if __name__ == '__main__': unittest.main() diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index fdac9c2f..1edbaeaf 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -68,8 +68,6 @@ class TestParseJson(unittest.TestCase): # Assert that this raises an exception: self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - - def test_invalid_json_leading_sentence_with_gpt(self): # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. @@ -110,6 +108,5 @@ class TestParseJson(unittest.TestCase): self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - if __name__ == '__main__': unittest.main()