Merge branch 'master' into continuous-mode-limit

This commit is contained in:
Joseph C. Miller, II
2023-04-12 15:50:05 -06:00
22 changed files with 122 additions and 38 deletions

View File

@@ -7,7 +7,19 @@ body:
value: |
Please provide a searchable summary of the issue in the title above ⬆️.
Thanks for contributing by creating an issue! ❤️
⚠️ SUPER-busy repo, please help the volunteer maintainers.
The less time we spend here, the more time we spend building AutoGPT.
Please help us help you:
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
- Search for existing issues, "add comment" is tidier than "new issue"
- Ask on our Discord (https://discord.gg/autogpt)
- Provide relevant info:
- Provide commit-hash (`git rev-parse HEAD` gets it)
- If it's a pip/packages issue, provide pip version, python version
- If it's a crash, provide traceback.
- type: checkboxes
attributes:
label: Duplicates
@@ -32,7 +44,7 @@ body:
attributes:
label: Your prompt 📝
description: |
Please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml.
If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml.
value: |
```yaml
# Paste your prompt here

View File

@@ -1,4 +1,4 @@
name: Unit Tests
name: Python CI
on:
push:
@@ -30,6 +30,10 @@ jobs:
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Lint with flake8
continue-on-error: false
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305
- name: Run unittest tests with coverage
run: |
coverage run --source=scripts -m unittest discover tests

View File

@@ -1,9 +1,7 @@
FROM python:3.11
FROM python:3.11-slim
ENV PIP_NO_CACHE_DIR=yes
WORKDIR /app
COPY scripts/ /app
COPY requirements.txt /app
COPY requirements.txt .
RUN pip install -r requirements.txt
CMD ["python", "main.py"]
COPY scripts/ .
ENTRYPOINT ["python", "main.py"]

View File

@@ -96,9 +96,10 @@ pip install -r requirements.txt
```
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively
## 🔧 Usage
@@ -113,9 +114,11 @@ python scripts/main.py
3. To exit the program, type "exit" and press Enter.
### Logs
You will find activity and error logs in the folder ```./logs```
You will find activity and error logs in the folder `./logs`
To output debug logs:
```
python scripts/main.py --debug
```
@@ -331,3 +334,14 @@ To run tests and see coverage, run the following command:
```
coverage run -m unittest discover tests
```
## Run linter
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command:
```
flake8 scripts/ tests/
# Or, if you want to run flake8 with the same configuration as the CI:
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305
```

View File

@@ -16,3 +16,4 @@ redis
orjson
Pillow
coverage
flake8

View File

@@ -45,8 +45,6 @@ def improve_code(suggestions: List[str], code: str) -> str:
result_string = call_ai_function(function_string, args, description_string)
return result_string
def write_tests(code: str, focus: List[str]) -> str:
"""
A function that takes in code and focus topics and returns a response from create chat completion api call.

View File

@@ -159,6 +159,7 @@ class ConsoleHandler(logging.StreamHandler):
except Exception:
self.handleError(record)
'''
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.

View File

@@ -391,7 +391,7 @@ while True:
flush=True)
while True:
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower() == "y":
if console_input.lower().rstrip() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):

View File

@@ -44,6 +44,7 @@ def get_memory(cfg, init=False):
def get_supported_memory_backends():
return supported_memory
__all__ = [
"get_memory",
"LocalCache",

View File

@@ -28,10 +28,20 @@ class LocalCache(MemoryProviderSingleton):
def __init__(self, cfg) -> None:
self.filename = f"{cfg.memory_index}.json"
if os.path.exists(self.filename):
with open(self.filename, 'rb') as f:
loaded = orjson.loads(f.read())
try:
with open(self.filename, 'w+b') as f:
file_content = f.read()
if not file_content.strip():
file_content = b'{}'
f.write(file_content)
loaded = orjson.loads(file_content)
self.data = CacheContent(**loaded)
except orjson.JSONDecodeError:
print(f"Error: The file '{self.filename}' is not in JSON format.")
self.data = CacheContent()
else:
print(f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file.")
self.data = CacheContent()
def add(self, text: str):

View File

@@ -45,5 +45,6 @@ class TestLocalCache(unittest.TestCase):
self.assertEqual(len(relevant_texts), k)
self.assertIn(self.example_texts[1], relevant_texts)
if __name__ == '__main__':
unittest.main()

52
tests/local_cache_test.py Normal file
View File

@@ -0,0 +1,52 @@
import os
import sys
# Probably a better way:
sys.path.append(os.path.abspath('../scripts'))
from memory.local import LocalCache
def MockConfig():
return type('MockConfig', (object,), {
'debug_mode': False,
'continuous_mode': False,
'speak_mode': False,
'memory_index': 'auto-gpt',
})
class TestLocalCache(unittest.TestCase):
def setUp(self):
self.cfg = MockConfig()
self.cache = LocalCache(self.cfg)
def test_add(self):
text = "Sample text"
self.cache.add(text)
self.assertIn(text, self.cache.data.texts)
def test_clear(self):
self.cache.clear()
self.assertEqual(self.cache.data, [""])
def test_get(self):
text = "Sample text"
self.cache.add(text)
result = self.cache.get(text)
self.assertEqual(result, [text])
def test_get_relevant(self):
text1 = "Sample text 1"
text2 = "Sample text 2"
self.cache.add(text1)
self.cache.add(text2)
result = self.cache.get_relevant(text1, 1)
self.assertEqual(result, [text1])
def test_get_stats(self):
text = "Sample text"
self.cache.add(text)
stats = self.cache.get_stats()
self.assertEqual(stats, (1, self.cache.data.embeddings.shape))
if __name__ == '__main__':
unittest.main()

View File

@@ -33,8 +33,6 @@ Additional aspects:
- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text.
"""
class TestScrapeText:
# Tests that scrape_text() returns the expected text when given a valid URL.

View File

@@ -66,8 +66,6 @@ class TestParseJson(unittest.TestCase):
# Assert that this raises an exception:
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
def test_invalid_json_leading_sentence_with_gpt(self):
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
@@ -108,6 +106,5 @@ class TestParseJson(unittest.TestCase):
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
if __name__ == '__main__':
unittest.main()

View File

@@ -68,8 +68,6 @@ class TestParseJson(unittest.TestCase):
# Assert that this raises an exception:
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
def test_invalid_json_leading_sentence_with_gpt(self):
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
@@ -110,6 +108,5 @@ class TestParseJson(unittest.TestCase):
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
if __name__ == '__main__':
unittest.main()